diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000000..5014bfa48ac1 --- /dev/null +++ b/.gitignore @@ -0,0 +1,30 @@ +# +# NOTE! Don't add files that are generated in specific +# subdirectories here. Add them in the ".gitignore" file +# in that subdirectory instead. +# +# Normal rules +# +.* +*.o +*.a +*.s +*.ko +*.mod.c + +# +# Top-level generic files +# +vmlinux* +System.map +Module.symvers + +# +# Generated include files +# +include/asm +include/config +include/linux/autoconf.h +include/linux/compile.h +include/linux/version.h + diff --git a/Documentation/Changes b/Documentation/Changes index 5eaab0441d76..27232be26e1a 100644 --- a/Documentation/Changes +++ b/Documentation/Changes @@ -237,6 +237,12 @@ udev udev is a userspace application for populating /dev dynamically with only entries for devices actually present. udev replaces devfs. +FUSE +---- + +Needs libfuse 2.4.0 or later. Absolute minimum is 2.3.0 but mount +options 'direct_io' and 'kernel_cache' won't work. + Networking ========== @@ -390,6 +396,10 @@ udev ---- o +FUSE +---- +o + Networking ********** diff --git a/Documentation/DocBook/libata.tmpl b/Documentation/DocBook/libata.tmpl index 375ae760dc1e..d260d92089ad 100644 --- a/Documentation/DocBook/libata.tmpl +++ b/Documentation/DocBook/libata.tmpl @@ -415,6 +415,362 @@ and other resources, etc. + + Error handling + + + This chapter describes how errors are handled under libata. + Readers are advised to read SCSI EH + (Documentation/scsi/scsi_eh.txt) and ATA exceptions doc first. + + + Origins of commands + + In libata, a command is represented with struct ata_queued_cmd + or qc. qc's are preallocated during port initialization and + repetitively used for command executions. Currently only one + qc is allocated per port but yet-to-be-merged NCQ branch + allocates one for each tag and maps each qc to NCQ tag 1-to-1. + + + libata commands can originate from two sources - libata itself + and SCSI midlayer. libata internal commands are used for + initialization and error handling. All normal blk requests + and commands for SCSI emulation are passed as SCSI commands + through queuecommand callback of SCSI host template. + + + + How commands are issued + + + + Internal commands + + + First, qc is allocated and initialized using + ata_qc_new_init(). Although ata_qc_new_init() doesn't + implement any wait or retry mechanism when qc is not + available, internal commands are currently issued only during + initialization and error recovery, so no other command is + active and allocation is guaranteed to succeed. + + + Once allocated qc's taskfile is initialized for the command to + be executed. qc currently has two mechanisms to notify + completion. One is via qc->complete_fn() callback and the + other is completion qc->waiting. qc->complete_fn() callback + is the asynchronous path used by normal SCSI translated + commands and qc->waiting is the synchronous (issuer sleeps in + process context) path used by internal commands. + + + Once initialization is complete, host_set lock is acquired + and the qc is issued. + + + + + SCSI commands + + + All libata drivers use ata_scsi_queuecmd() as + hostt->queuecommand callback. scmds can either be simulated + or translated. No qc is involved in processing a simulated + scmd. The result is computed right away and the scmd is + completed. + + + For a translated scmd, ata_qc_new_init() is invoked to + allocate a qc and the scmd is translated into the qc. SCSI + midlayer's completion notification function pointer is stored + into qc->scsidone. + + + qc->complete_fn() callback is used for completion + notification. ATA commands use ata_scsi_qc_complete() while + ATAPI commands use atapi_qc_complete(). Both functions end up + calling qc->scsidone to notify upper layer when the qc is + finished. After translation is completed, the qc is issued + with ata_qc_issue(). + + + Note that SCSI midlayer invokes hostt->queuecommand while + holding host_set lock, so all above occur while holding + host_set lock. + + + + + + + + How commands are processed + + Depending on which protocol and which controller are used, + commands are processed differently. For the purpose of + discussion, a controller which uses taskfile interface and all + standard callbacks is assumed. + + + Currently 6 ATA command protocols are used. They can be + sorted into the following four categories according to how + they are processed. + + + + ATA NO DATA or DMA + + + ATA_PROT_NODATA and ATA_PROT_DMA fall into this category. + These types of commands don't require any software + intervention once issued. Device will raise interrupt on + completion. + + + + + ATA PIO + + + ATA_PROT_PIO is in this category. libata currently + implements PIO with polling. ATA_NIEN bit is set to turn + off interrupt and pio_task on ata_wq performs polling and + IO. + + + + + ATAPI NODATA or DMA + + + ATA_PROT_ATAPI_NODATA and ATA_PROT_ATAPI_DMA are in this + category. packet_task is used to poll BSY bit after + issuing PACKET command. Once BSY is turned off by the + device, packet_task transfers CDB and hands off processing + to interrupt handler. + + + + + ATAPI PIO + + + ATA_PROT_ATAPI is in this category. ATA_NIEN bit is set + and, as in ATAPI NODATA or DMA, packet_task submits cdb. + However, after submitting cdb, further processing (data + transfer) is handed off to pio_task. + + + + + + + How commands are completed + + Once issued, all qc's are either completed with + ata_qc_complete() or time out. For commands which are handled + by interrupts, ata_host_intr() invokes ata_qc_complete(), and, + for PIO tasks, pio_task invokes ata_qc_complete(). In error + cases, packet_task may also complete commands. + + + ata_qc_complete() does the following. + + + + + + + DMA memory is unmapped. + + + + + + ATA_QCFLAG_ACTIVE is clared from qc->flags. + + + + + + qc->complete_fn() callback is invoked. If the return value of + the callback is not zero. Completion is short circuited and + ata_qc_complete() returns. + + + + + + __ata_qc_complete() is called, which does + + + + + qc->flags is cleared to zero. + + + + + + ap->active_tag and qc->tag are poisoned. + + + + + + qc->waiting is claread & completed (in that order). + + + + + + qc is deallocated by clearing appropriate bit in ap->qactive. + + + + + + + + + + + So, it basically notifies upper layer and deallocates qc. One + exception is short-circuit path in #3 which is used by + atapi_qc_complete(). + + + For all non-ATAPI commands, whether it fails or not, almost + the same code path is taken and very little error handling + takes place. A qc is completed with success status if it + succeeded, with failed status otherwise. + + + However, failed ATAPI commands require more handling as + REQUEST SENSE is needed to acquire sense data. If an ATAPI + command fails, ata_qc_complete() is invoked with error status, + which in turn invokes atapi_qc_complete() via + qc->complete_fn() callback. + + + This makes atapi_qc_complete() set scmd->result to + SAM_STAT_CHECK_CONDITION, complete the scmd and return 1. As + the sense data is empty but scmd->result is CHECK CONDITION, + SCSI midlayer will invoke EH for the scmd, and returning 1 + makes ata_qc_complete() to return without deallocating the qc. + This leads us to ata_scsi_error() with partially completed qc. + + + + + ata_scsi_error() + + ata_scsi_error() is the current hostt->eh_strategy_handler() + for libata. As discussed above, this will be entered in two + cases - timeout and ATAPI error completion. This function + calls low level libata driver's eng_timeout() callback, the + standard callback for which is ata_eng_timeout(). It checks + if a qc is active and calls ata_qc_timeout() on the qc if so. + Actual error handling occurs in ata_qc_timeout(). + + + If EH is invoked for timeout, ata_qc_timeout() stops BMDMA and + completes the qc. Note that as we're currently in EH, we + cannot call scsi_done. As described in SCSI EH doc, a + recovered scmd should be either retried with + scsi_queue_insert() or finished with scsi_finish_command(). + Here, we override qc->scsidone with scsi_finish_command() and + calls ata_qc_complete(). + + + If EH is invoked due to a failed ATAPI qc, the qc here is + completed but not deallocated. The purpose of this + half-completion is to use the qc as place holder to make EH + code reach this place. This is a bit hackish, but it works. + + + Once control reaches here, the qc is deallocated by invoking + __ata_qc_complete() explicitly. Then, internal qc for REQUEST + SENSE is issued. Once sense data is acquired, scmd is + finished by directly invoking scsi_finish_command() on the + scmd. Note that as we already have completed and deallocated + the qc which was associated with the scmd, we don't need + to/cannot call ata_qc_complete() again. + + + + + Problems with the current EH + + + + + + Error representation is too crude. Currently any and all + error conditions are represented with ATA STATUS and ERROR + registers. Errors which aren't ATA device errors are treated + as ATA device errors by setting ATA_ERR bit. Better error + descriptor which can properly represent ATA and other + errors/exceptions is needed. + + + + + + When handling timeouts, no action is taken to make device + forget about the timed out command and ready for new commands. + + + + + + EH handling via ata_scsi_error() is not properly protected + from usual command processing. On EH entrance, the device is + not in quiescent state. Timed out commands may succeed or + fail any time. pio_task and atapi_task may still be running. + + + + + + Too weak error recovery. Devices / controllers causing HSM + mismatch errors and other errors quite often require reset to + return to known state. Also, advanced error handling is + necessary to support features like NCQ and hotplug. + + + + + + ATA errors are directly handled in the interrupt handler and + PIO errors in pio_task. This is problematic for advanced + error handling for the following reasons. + + + First, advanced error handling often requires context and + internal qc execution. + + + Second, even a simple failure (say, CRC error) needs + information gathering and could trigger complex error handling + (say, resetting & reconfiguring). Having multiple code + paths to gather information, enter EH and trigger actions + makes life painful. + + + Third, scattered EH code makes implementing low level drivers + difficult. Low level drivers override libata callbacks. If + EH is scattered over several places, each affected callbacks + should perform its part of error handling. This can be error + prone and painful. + + + + + + + libata Library !Edrivers/scsi/libata-core.c @@ -431,6 +787,722 @@ and other resources, etc. !Idrivers/scsi/libata-scsi.c + + ATA errors & exceptions + + + This chapter tries to identify what error/exception conditions exist + for ATA/ATAPI devices and describe how they should be handled in + implementation-neutral way. + + + + The term 'error' is used to describe conditions where either an + explicit error condition is reported from device or a command has + timed out. + + + + The term 'exception' is either used to describe exceptional + conditions which are not errors (say, power or hotplug events), or + to describe both errors and non-error exceptional conditions. Where + explicit distinction between error and exception is necessary, the + term 'non-error exception' is used. + + + + Exception categories + + Exceptions are described primarily with respect to legacy + taskfile + bus master IDE interface. If a controller provides + other better mechanism for error reporting, mapping those into + categories described below shouldn't be difficult. + + + + In the following sections, two recovery actions - reset and + reconfiguring transport - are mentioned. These are described + further in . + + + + HSM violation + + This error is indicated when STATUS value doesn't match HSM + requirement during issuing or excution any ATA/ATAPI command. + + + + Examples + + + + ATA_STATUS doesn't contain !BSY && DRDY && !DRQ while trying + to issue a command. + + + + + + !BSY && !DRQ during PIO data transfer. + + + + + + DRQ on command completion. + + + + + + !BSY && ERR after CDB tranfer starts but before the + last byte of CDB is transferred. ATA/ATAPI standard states + that "The device shall not terminate the PACKET command + with an error before the last byte of the command packet has + been written" in the error outputs description of PACKET + command and the state diagram doesn't include such + transitions. + + + + + + + In these cases, HSM is violated and not much information + regarding the error can be acquired from STATUS or ERROR + register. IOW, this error can be anything - driver bug, + faulty device, controller and/or cable. + + + + As HSM is violated, reset is necessary to restore known state. + Reconfiguring transport for lower speed might be helpful too + as transmission errors sometimes cause this kind of errors. + + + + + ATA/ATAPI device error (non-NCQ / non-CHECK CONDITION) + + + These are errors detected and reported by ATA/ATAPI devices + indicating device problems. For this type of errors, STATUS + and ERROR register values are valid and describe error + condition. Note that some of ATA bus errors are detected by + ATA/ATAPI devices and reported using the same mechanism as + device errors. Those cases are described later in this + section. + + + + For ATA commands, this type of errors are indicated by !BSY + && ERR during command execution and on completion. + + + For ATAPI commands, + + + + + + !BSY && ERR && ABRT right after issuing PACKET + indicates that PACKET command is not supported and falls in + this category. + + + + + + !BSY && ERR(==CHK) && !ABRT after the last + byte of CDB is transferred indicates CHECK CONDITION and + doesn't fall in this category. + + + + + + !BSY && ERR(==CHK) && ABRT after the last byte + of CDB is transferred *probably* indicates CHECK CONDITION and + doesn't fall in this category. + + + + + + + Of errors detected as above, the followings are not ATA/ATAPI + device errors but ATA bus errors and should be handled + according to . + + + + + + CRC error during data transfer + + + This is indicated by ICRC bit in the ERROR register and + means that corruption occurred during data transfer. Upto + ATA/ATAPI-7, the standard specifies that this bit is only + applicable to UDMA transfers but ATA/ATAPI-8 draft revision + 1f says that the bit may be applicable to multiword DMA and + PIO. + + + + + + ABRT error during data transfer or on completion + + + Upto ATA/ATAPI-7, the standard specifies that ABRT could be + set on ICRC errors and on cases where a device is not able + to complete a command. Combined with the fact that MWDMA + and PIO transfer errors aren't allowed to use ICRC bit upto + ATA/ATAPI-7, it seems to imply that ABRT bit alone could + indicate tranfer errors. + + + However, ATA/ATAPI-8 draft revision 1f removes the part + that ICRC errors can turn on ABRT. So, this is kind of + gray area. Some heuristics are needed here. + + + + + + + + ATA/ATAPI device errors can be further categorized as follows. + + + + + + Media errors + + + This is indicated by UNC bit in the ERROR register. ATA + devices reports UNC error only after certain number of + retries cannot recover the data, so there's nothing much + else to do other than notifying upper layer. + + + READ and WRITE commands report CHS or LBA of the first + failed sector but ATA/ATAPI standard specifies that the + amount of transferred data on error completion is + indeterminate, so we cannot assume that sectors preceding + the failed sector have been transferred and thus cannot + complete those sectors successfully as SCSI does. + + + + + + Media changed / media change requested error + + + <<TODO: fill here>> + + + + + Address error + + + This is indicated by IDNF bit in the ERROR register. + Report to upper layer. + + + + + Other errors + + + This can be invalid command or parameter indicated by ABRT + ERROR bit or some other error condition. Note that ABRT + bit can indicate a lot of things including ICRC and Address + errors. Heuristics needed. + + + + + + + + Depending on commands, not all STATUS/ERROR bits are + applicable. These non-applicable bits are marked with + "na" in the output descriptions but upto ATA/ATAPI-7 + no definition of "na" can be found. However, + ATA/ATAPI-8 draft revision 1f describes "N/A" as + follows. + + +
+ + 3.2.3.3a N/A + + + A keyword the indicates a field has no defined value in + this standard and should not be checked by the host or + device. N/A fields should be cleared to zero. + + + + +
+ + + So, it seems reasonable to assume that "na" bits are + cleared to zero by devices and thus need no explicit masking. + + +
+ + + ATAPI device CHECK CONDITION + + + ATAPI device CHECK CONDITION error is indicated by set CHK bit + (ERR bit) in the STATUS register after the last byte of CDB is + transferred for a PACKET command. For this kind of errors, + sense data should be acquired to gather information regarding + the errors. REQUEST SENSE packet command should be used to + acquire sense data. + + + + Once sense data is acquired, this type of errors can be + handled similary to other SCSI errors. Note that sense data + may indicate ATA bus error (e.g. Sense Key 04h HARDWARE ERROR + && ASC/ASCQ 47h/00h SCSI PARITY ERROR). In such + cases, the error should be considered as an ATA bus error and + handled according to . + + + + + + ATA device error (NCQ) + + + NCQ command error is indicated by cleared BSY and set ERR bit + during NCQ command phase (one or more NCQ commands + outstanding). Although STATUS and ERROR registers will + contain valid values describing the error, READ LOG EXT is + required to clear the error condition, determine which command + has failed and acquire more information. + + + + READ LOG EXT Log Page 10h reports which tag has failed and + taskfile register values describing the error. With this + information the failed command can be handled as a normal ATA + command error as in and all + other in-flight commands must be retried. Note that this + retry should not be counted - it's likely that commands + retried this way would have completed normally if it were not + for the failed command. + + + + Note that ATA bus errors can be reported as ATA device NCQ + errors. This should be handled as described in . + + + + If READ LOG EXT Log Page 10h fails or reports NQ, we're + thoroughly screwed. This condition should be treated + according to . + + + + + + ATA bus error + + + ATA bus error means that data corruption occurred during + transmission over ATA bus (SATA or PATA). This type of errors + can be indicated by + + + + + + + ICRC or ABRT error as described in . + + + + + + Controller-specific error completion with error information + indicating transmission error. + + + + + + On some controllers, command timeout. In this case, there may + be a mechanism to determine that the timeout is due to + transmission error. + + + + + + Unknown/random errors, timeouts and all sorts of weirdities. + + + + + + + As described above, transmission errors can cause wide variety + of symptoms ranging from device ICRC error to random device + lockup, and, for many cases, there is no way to tell if an + error condition is due to transmission error or not; + therefore, it's necessary to employ some kind of heuristic + when dealing with errors and timeouts. For example, + encountering repetitive ABRT errors for known supported + command is likely to indicate ATA bus error. + + + + Once it's determined that ATA bus errors have possibly + occurred, lowering ATA bus transmission speed is one of + actions which may alleviate the problem. See for more information. + + + + + + PCI bus error + + + Data corruption or other failures during transmission over PCI + (or other system bus). For standard BMDMA, this is indicated + by Error bit in the BMDMA Status register. This type of + errors must be logged as it indicates something is very wrong + with the system. Resetting host controller is recommended. + + + + + + Late completion + + + This occurs when timeout occurs and the timeout handler finds + out that the timed out command has completed successfully or + with error. This is usually caused by lost interrupts. This + type of errors must be logged. Resetting host controller is + recommended. + + + + + + Unknown error (timeout) + + + This is when timeout occurs and the command is still + processing or the host and device are in unknown state. When + this occurs, HSM could be in any valid or invalid state. To + bring the device to known state and make it forget about the + timed out command, resetting is necessary. The timed out + command may be retried. + + + + Timeouts can also be caused by transmission errors. Refer to + for more details. + + + + + + Hotplug and power management exceptions + + + <<TODO: fill here>> + + + + +
+ + + EH recovery actions + + + This section discusses several important recovery actions. + + + + Clearing error condition + + + Many controllers require its error registers to be cleared by + error handler. Different controllers may have different + requirements. + + + + For SATA, it's strongly recommended to clear at least SError + register during error handling. + + + + + Reset + + + During EH, resetting is necessary in the following cases. + + + + + + + HSM is in unknown or invalid state + + + + + + HBA is in unknown or invalid state + + + + + + EH needs to make HBA/device forget about in-flight commands + + + + + + HBA/device behaves weirdly + + + + + + + Resetting during EH might be a good idea regardless of error + condition to improve EH robustness. Whether to reset both or + either one of HBA and device depends on situation but the + following scheme is recommended. + + + + + + + When it's known that HBA is in ready state but ATA/ATAPI + device in in unknown state, reset only device. + + + + + + If HBA is in unknown state, reset both HBA and device. + + + + + + + HBA resetting is implementation specific. For a controller + complying to taskfile/BMDMA PCI IDE, stopping active DMA + transaction may be sufficient iff BMDMA state is the only HBA + context. But even mostly taskfile/BMDMA PCI IDE complying + controllers may have implementation specific requirements and + mechanism to reset themselves. This must be addressed by + specific drivers. + + + + OTOH, ATA/ATAPI standard describes in detail ways to reset + ATA/ATAPI devices. + + + + + PATA hardware reset + + + This is hardware initiated device reset signalled with + asserted PATA RESET- signal. There is no standard way to + initiate hardware reset from software although some + hardware provides registers that allow driver to directly + tweak the RESET- signal. + + + + + Software reset + + + This is achieved by turning CONTROL SRST bit on for at + least 5us. Both PATA and SATA support it but, in case of + SATA, this may require controller-specific support as the + second Register FIS to clear SRST should be transmitted + while BSY bit is still set. Note that on PATA, this resets + both master and slave devices on a channel. + + + + + EXECUTE DEVICE DIAGNOSTIC command + + + Although ATA/ATAPI standard doesn't describe exactly, EDD + implies some level of resetting, possibly similar level + with software reset. Host-side EDD protocol can be handled + with normal command processing and most SATA controllers + should be able to handle EDD's just like other commands. + As in software reset, EDD affects both devices on a PATA + bus. + + + Although EDD does reset devices, this doesn't suit error + handling as EDD cannot be issued while BSY is set and it's + unclear how it will act when device is in unknown/weird + state. + + + + + ATAPI DEVICE RESET command + + + This is very similar to software reset except that reset + can be restricted to the selected device without affecting + the other device sharing the cable. + + + + + SATA phy reset + + + This is the preferred way of resetting a SATA device. In + effect, it's identical to PATA hardware reset. Note that + this can be done with the standard SCR Control register. + As such, it's usually easier to implement than software + reset. + + + + + + + + One more thing to consider when resetting devices is that + resetting clears certain configuration parameters and they + need to be set to their previous or newly adjusted values + after reset. + + + + Parameters affected are. + + + + + + + CHS set up with INITIALIZE DEVICE PARAMETERS (seldomly used) + + + + + + Parameters set with SET FEATURES including transfer mode setting + + + + + + Block count set with SET MULTIPLE MODE + + + + + + Other parameters (SET MAX, MEDIA LOCK...) + + + + + + + ATA/ATAPI standard specifies that some parameters must be + maintained across hardware or software reset, but doesn't + strictly specify all of them. Always reconfiguring needed + parameters after reset is required for robustness. Note that + this also applies when resuming from deep sleep (power-off). + + + + Also, ATA/ATAPI standard requires that IDENTIFY DEVICE / + IDENTIFY PACKET DEVICE is issued after any configuration + parameter is updated or a hardware reset and the result used + for further operation. OS driver is required to implement + revalidation mechanism to support this. + + + + + + Reconfigure transport + + + For both PATA and SATA, a lot of corners are cut for cheap + connectors, cables or controllers and it's quite common to see + high transmission error rate. This can be mitigated by + lowering transmission speed. + + + + The following is a possible scheme Jeff Garzik suggested. + + +
+ + If more than $N (3?) transmission errors happen in 15 minutes, + + + + + if SATA, decrease SATA PHY speed. if speed cannot be decreased, + + + + + decrease UDMA xfer speed. if at UDMA0, switch to PIO4, + + + + + decrease PIO xfer speed. if at PIO3, complain, but continue + + + +
+ +
+ +
+ +
+ ata_piix Internals !Idrivers/scsi/ata_piix.c diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches index 7f43b040311e..237d54c44bc5 100644 --- a/Documentation/SubmittingPatches +++ b/Documentation/SubmittingPatches @@ -301,8 +301,84 @@ now, but you can do this to mark internal company procedures or just point out some special detail about the sign-off. +12) The canonical patch format -12) More references for submitting patches +The canonical patch subject line is: + + Subject: [PATCH 001/123] subsystem: summary phrase + +The canonical patch message body contains the following: + + - A "from" line specifying the patch author. + + - An empty line. + + - The body of the explanation, which will be copied to the + permanent changelog to describe this patch. + + - The "Signed-off-by:" lines, described above, which will + also go in the changelog. + + - A marker line containing simply "---". + + - Any additional comments not suitable for the changelog. + + - The actual patch (diff output). + +The Subject line format makes it very easy to sort the emails +alphabetically by subject line - pretty much any email reader will +support that - since because the sequence number is zero-padded, +the numerical and alphabetic sort is the same. + +The "subsystem" in the email's Subject should identify which +area or subsystem of the kernel is being patched. + +The "summary phrase" in the email's Subject should concisely +describe the patch which that email contains. The "summary +phrase" should not be a filename. Do not use the same "summary +phrase" for every patch in a whole patch series. + +Bear in mind that the "summary phrase" of your email becomes +a globally-unique identifier for that patch. It propagates +all the way into the git changelog. The "summary phrase" may +later be used in developer discussions which refer to the patch. +People will want to google for the "summary phrase" to read +discussion regarding that patch. + +A couple of example Subjects: + + Subject: [patch 2/5] ext2: improve scalability of bitmap searching + Subject: [PATCHv2 001/207] x86: fix eflags tracking + +The "from" line must be the very first line in the message body, +and has the form: + + From: Original Author + +The "from" line specifies who will be credited as the author of the +patch in the permanent changelog. If the "from" line is missing, +then the "From:" line from the email header will be used to determine +the patch author in the changelog. + +The explanation body will be committed to the permanent source +changelog, so should make sense to a competent reader who has long +since forgotten the immediate details of the discussion that might +have led to this patch. + +The "---" marker line serves the essential purpose of marking for patch +handling tools where the changelog message ends. + +One good use for the additional comments after the "---" marker is for +a diffstat, to show what files have changed, and the number of inserted +and deleted lines per file. A diffstat is especially useful on bigger +patches. Other comments relevant only to the moment or the maintainer, +not suitable for the permanent changelog, should also go here. + +See more details on the proper patch format in the following +references. + + +13) More references for submitting patches Andrew Morton, "The perfect patch" (tpp). @@ -310,6 +386,14 @@ Andrew Morton, "The perfect patch" (tpp). Jeff Garzik, "Linux kernel patch submission format." +Greg KH, "How to piss off a kernel subsystem maintainer" + + +Kernel Documentation/CodingStyle + + +Linus Torvald's mail on the canonical patch format: + ----------------------------------- diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt index 6dd274d7e1cf..2d65c2182161 100644 --- a/Documentation/block/biodoc.txt +++ b/Documentation/block/biodoc.txt @@ -906,9 +906,20 @@ Aside: 4. The I/O scheduler -I/O schedulers are now per queue. They should be runtime switchable and modular -but aren't yet. Jens has most bits to do this, but the sysfs implementation is -missing. +I/O scheduler, a.k.a. elevator, is implemented in two layers. Generic dispatch +queue and specific I/O schedulers. Unless stated otherwise, elevator is used +to refer to both parts and I/O scheduler to specific I/O schedulers. + +Block layer implements generic dispatch queue in ll_rw_blk.c and elevator.c. +The generic dispatch queue is responsible for properly ordering barrier +requests, requeueing, handling non-fs requests and all other subtleties. + +Specific I/O schedulers are responsible for ordering normal filesystem +requests. They can also choose to delay certain requests to improve +throughput or whatever purpose. As the plural form indicates, there are +multiple I/O schedulers. They can be built as modules but at least one should +be built inside the kernel. Each queue can choose different one and can also +change to another one dynamically. A block layer call to the i/o scheduler follows the convention elv_xxx(). This calls elevator_xxx_fn in the elevator switch (drivers/block/elevator.c). Oh, @@ -921,44 +932,36 @@ keeping work. The functions an elevator may implement are: (* are mandatory) elevator_merge_fn called to query requests for merge with a bio -elevator_merge_req_fn " " " with another request +elevator_merge_req_fn called when two requests get merged. the one + which gets merged into the other one will be + never seen by I/O scheduler again. IOW, after + being merged, the request is gone. elevator_merged_fn called when a request in the scheduler has been involved in a merge. It is used in the deadline scheduler for example, to reposition the request if its sorting order has changed. -*elevator_next_req_fn returns the next scheduled request, or NULL - if there are none (or none are ready). +elevator_dispatch_fn fills the dispatch queue with ready requests. + I/O schedulers are free to postpone requests by + not filling the dispatch queue unless @force + is non-zero. Once dispatched, I/O schedulers + are not allowed to manipulate the requests - + they belong to generic dispatch queue. -*elevator_add_req_fn called to add a new request into the scheduler +elevator_add_req_fn called to add a new request into the scheduler elevator_queue_empty_fn returns true if the merge queue is empty. Drivers shouldn't use this, but rather check if elv_next_request is NULL (without losing the request if one exists!) -elevator_remove_req_fn This is called when a driver claims ownership of - the target request - it now belongs to the - driver. It must not be modified or merged. - Drivers must not lose the request! A subsequent - call of elevator_next_req_fn must return the - _next_ request. - -elevator_requeue_req_fn called to add a request to the scheduler. This - is used when the request has alrnadebeen - returned by elv_next_request, but hasn't - completed. If this is not implemented then - elevator_add_req_fn is called instead. - elevator_former_req_fn elevator_latter_req_fn These return the request before or after the one specified in disk sort order. Used by the block layer to find merge possibilities. -elevator_completed_req_fn called when a request is completed. This might - come about due to being merged with another or - when the device completes the request. +elevator_completed_req_fn called when a request is completed. elevator_may_queue_fn returns true if the scheduler wants to allow the current context to queue a new request even if @@ -967,13 +970,33 @@ elevator_may_queue_fn returns true if the scheduler wants to allow the elevator_set_req_fn elevator_put_req_fn Must be used to allocate and free any elevator - specific storate for a request. + specific storage for a request. + +elevator_activate_req_fn Called when device driver first sees a request. + I/O schedulers can use this callback to + determine when actual execution of a request + starts. +elevator_deactivate_req_fn Called when device driver decides to delay + a request by requeueing it. elevator_init_fn elevator_exit_fn Allocate and free any elevator specific storage for a queue. -4.2 I/O scheduler implementation +4.2 Request flows seen by I/O schedulers +All requests seens by I/O schedulers strictly follow one of the following three +flows. + + set_req_fn -> + + i. add_req_fn -> (merged_fn ->)* -> dispatch_fn -> activate_req_fn -> + (deactivate_req_fn -> activate_req_fn ->)* -> completed_req_fn + ii. add_req_fn -> (merged_fn ->)* -> merge_req_fn + iii. [none] + + -> put_req_fn + +4.3 I/O scheduler implementation The generic i/o scheduler algorithm attempts to sort/merge/batch requests for optimal disk scan and request servicing performance (based on generic principles and device capabilities), optimized for: @@ -993,18 +1016,7 @@ request in sort order to prevent binary tree lookups. This arrangement is not a generic block layer characteristic however, so elevators may implement queues as they please. -ii. Last merge hint -The last merge hint is part of the generic queue layer. I/O schedulers must do -some management on it. For the most part, the most important thing is to make -sure q->last_merge is cleared (set to NULL) when the request on it is no longer -a candidate for merging (for example if it has been sent to the driver). - -The last merge performed is cached as a hint for the subsequent request. If -sequential data is being submitted, the hint is used to perform merges without -any scanning. This is not sufficient when there are multiple processes doing -I/O though, so a "merge hash" is used by some schedulers. - -iii. Merge hash +ii. Merge hash AS and deadline use a hash table indexed by the last sector of a request. This enables merging code to quickly look up "back merge" candidates, even when multiple I/O streams are being performed at once on one disk. @@ -1013,29 +1025,8 @@ multiple I/O streams are being performed at once on one disk. are far less common than "back merges" due to the nature of most I/O patterns. Front merges are handled by the binary trees in AS and deadline schedulers. -iv. Handling barrier cases -A request with flags REQ_HARDBARRIER or REQ_SOFTBARRIER must not be ordered -around. That is, they must be processed after all older requests, and before -any newer ones. This includes merges! - -In AS and deadline schedulers, barriers have the effect of flushing the reorder -queue. The performance cost of this will vary from nothing to a lot depending -on i/o patterns and device characteristics. Obviously they won't improve -performance, so their use should be kept to a minimum. - -v. Handling insertion position directives -A request may be inserted with a position directive. The directives are one of -ELEVATOR_INSERT_BACK, ELEVATOR_INSERT_FRONT, ELEVATOR_INSERT_SORT. - -ELEVATOR_INSERT_SORT is a general directive for non-barrier requests. -ELEVATOR_INSERT_BACK is used to insert a barrier to the back of the queue. -ELEVATOR_INSERT_FRONT is used to insert a barrier to the front of the queue, and -overrides the ordering requested by any previous barriers. In practice this is -harmless and required, because it is used for SCSI requeueing. This does not -require flushing the reorder queue, so does not impose a performance penalty. - -vi. Plugging the queue to batch requests in anticipation of opportunities for - merge/sort optimizations +iii. Plugging the queue to batch requests in anticipation of opportunities for + merge/sort optimizations This is just the same as in 2.4 so far, though per-device unplugging support is anticipated for 2.5. Also with a priority-based i/o scheduler, @@ -1069,7 +1060,7 @@ Aside: blk_kick_queue() to unplug a specific queue (right away ?) or optionally, all queues, is in the plan. -4.3 I/O contexts +4.4 I/O contexts I/O contexts provide a dynamically allocated per process data area. They may be used in I/O schedulers, and in the block layer (could be used for IO statis, priorities for example). See *io_context in drivers/block/ll_rw_blk.c, and diff --git a/Documentation/connector/connector.txt b/Documentation/connector/connector.txt index 54a0a14bfbe3..57a314b14cf8 100644 --- a/Documentation/connector/connector.txt +++ b/Documentation/connector/connector.txt @@ -131,3 +131,47 @@ Netlink itself is not reliable protocol, that means that messages can be lost due to memory pressure or process' receiving queue overflowed, so caller is warned must be prepared. That is why struct cn_msg [main connector's message header] contains u32 seq and u32 ack fields. + +/*****************************************/ +Userspace usage. +/*****************************************/ +2.6.14 has a new netlink socket implementation, which by default does not +allow to send data to netlink groups other than 1. +So, if to use netlink socket (for example using connector) +with different group number userspace application must subscribe to +that group. It can be achieved by following pseudocode: + +s = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR); + +l_local.nl_family = AF_NETLINK; +l_local.nl_groups = 12345; +l_local.nl_pid = 0; + +if (bind(s, (struct sockaddr *)&l_local, sizeof(struct sockaddr_nl)) == -1) { + perror("bind"); + close(s); + return -1; +} + +{ + int on = l_local.nl_groups; + setsockopt(s, 270, 1, &on, sizeof(on)); +} + +Where 270 above is SOL_NETLINK, and 1 is a NETLINK_ADD_MEMBERSHIP socket +option. To drop multicast subscription one should call above socket option +with NETLINK_DROP_MEMBERSHIP parameter which is defined as 0. + +2.6.14 netlink code only allows to select a group which is less or equal to +the maximum group number, which is used at netlink_kernel_create() time. +In case of connector it is CN_NETLINK_USERS + 0xf, so if you want to use +group number 12345, you must increment CN_NETLINK_USERS to that number. +Additional 0xf numbers are allocated to be used by non-in-kernel users. + +Due to this limitation, group 0xffffffff does not work now, so one can +not use add/remove connector's group notifications, but as far as I know, +only cn_test.c test module used it. + +Some work in netlink area is still being done, so things can be changed in +2.6.15 timeframe, if it will happen, documentation will be updated for that +kernel. diff --git a/Documentation/dell_rbu.txt b/Documentation/dell_rbu.txt index 95d7f62e4dbc..941343a7a265 100644 --- a/Documentation/dell_rbu.txt +++ b/Documentation/dell_rbu.txt @@ -35,6 +35,7 @@ The driver load creates the following directories under the /sys file system. /sys/class/firmware/dell_rbu/data /sys/devices/platform/dell_rbu/image_type /sys/devices/platform/dell_rbu/data +/sys/devices/platform/dell_rbu/packet_size The driver supports two types of update mechanism; monolithic and packetized. These update mechanism depends upon the BIOS currently running on the system. @@ -47,8 +48,26 @@ By default the driver uses monolithic memory for the update type. This can be changed to packets during the driver load time by specifying the load parameter image_type=packet. This can also be changed later as below echo packet > /sys/devices/platform/dell_rbu/image_type -Also echoing either mono ,packet or init in to image_type will free up the -memory allocated by the driver. + +In packet update mode the packet size has to be given before any packets can +be downloaded. It is done as below +echo XXXX > /sys/devices/platform/dell_rbu/packet_size +In the packet update mechanism, the user neesd to create a new file having +packets of data arranged back to back. It can be done as follows +The user creates packets header, gets the chunk of the BIOS image and +placs it next to the packetheader; now, the packetheader + BIOS image chunk +added to geather should match the specified packet_size. This makes one +packet, the user needs to create more such packets out of the entire BIOS +image file and then arrange all these packets back to back in to one single +file. +This file is then copied to /sys/class/firmware/dell_rbu/data. +Once this file gets to the driver, the driver extracts packet_size data from +the file and spreads it accross the physical memory in contiguous packet_sized +space. +This method makes sure that all the packets get to the driver in a single operation. + +In monolithic update the user simply get the BIOS image (.hdr file) and copies +to the data file as is without any change to the BIOS image itself. Do the steps below to download the BIOS image. 1) echo 1 > /sys/class/firmware/dell_rbu/loading @@ -58,7 +77,10 @@ Do the steps below to download the BIOS image. The /sys/class/firmware/dell_rbu/ entries will remain till the following is done. echo -1 > /sys/class/firmware/dell_rbu/loading. -Until this step is completed the drivr cannot be unloaded. +Until this step is completed the driver cannot be unloaded. +Also echoing either mono ,packet or init in to image_type will free up the +memory allocated by the driver. + If an user by accident executes steps 1 and 3 above without executing step 2; it will make the /sys/class/firmware/dell_rbu/ entries to disappear. The entries can be recreated by doing the following @@ -66,15 +88,11 @@ echo init > /sys/devices/platform/dell_rbu/image_type NOTE: echoing init in image_type does not change it original value. Also the driver provides /sys/devices/platform/dell_rbu/data readonly file to -read back the image downloaded. This is useful in case of packet update -mechanism where the above steps 1,2,3 will repeated for every packet. -By reading the /sys/devices/platform/dell_rbu/data file all packet data -downloaded can be verified in a single file. -The packets are arranged in this file one after the other in a FIFO order. +read back the image downloaded. NOTE: -This driver requires a patch for firmware_class.c which has the addition -of request_firmware_nowait_nohotplug function to wortk +This driver requires a patch for firmware_class.c which has the modified +request_firmware_nowait function. Also after updating the BIOS image an user mdoe application neeeds to execute code which message the BIOS update request to the BIOS. So on the next reboot the BIOS knows about the new image downloaded and it updates it self. diff --git a/Documentation/device-mapper/snapshot.txt b/Documentation/device-mapper/snapshot.txt new file mode 100644 index 000000000000..dca274ff4005 --- /dev/null +++ b/Documentation/device-mapper/snapshot.txt @@ -0,0 +1,73 @@ +Device-mapper snapshot support +============================== + +Device-mapper allows you, without massive data copying: + +*) To create snapshots of any block device i.e. mountable, saved states of +the block device which are also writable without interfering with the +original content; +*) To create device "forks", i.e. multiple different versions of the +same data stream. + + +In both cases, dm copies only the chunks of data that get changed and +uses a separate copy-on-write (COW) block device for storage. + + +There are two dm targets available: snapshot and snapshot-origin. + +*) snapshot-origin + +which will normally have one or more snapshots based on it. +You must create the snapshot-origin device before you can create snapshots. +Reads will be mapped directly to the backing device. For each write, the +original data will be saved in the of each snapshot to keep +its visible content unchanged, at least until the fills up. + + +*) snapshot + +A snapshot is created of the block device. Changed chunks of + sectors will be stored on the . Writes will +only go to the . Reads will come from the or +from for unchanged data. will often be +smaller than the origin and if it fills up the snapshot will become +useless and be disabled, returning errors. So it is important to monitor +the amount of free space and expand the before it fills up. + + is P (Persistent) or N (Not persistent - will not survive +after reboot). + + +How this is used by LVM2 +======================== +When you create the first LVM2 snapshot of a volume, four dm devices are used: + +1) a device containing the original mapping table of the source volume; +2) a device used as the ; +3) a "snapshot" device, combining #1 and #2, which is the visible snapshot + volume; +4) the "original" volume (which uses the device number used by the original + source volume), whose table is replaced by a "snapshot-origin" mapping + from device #1. + +A fixed naming scheme is used, so with the following commands: + +lvcreate -L 1G -n base volumeGroup +lvcreate -L 100M --snapshot -n snap volumeGroup/base + +we'll have this situation (with volumes in above order): + +# dmsetup table|grep volumeGroup + +volumeGroup-base-real: 0 2097152 linear 8:19 384 +volumeGroup-snap-cow: 0 204800 linear 8:19 2097536 +volumeGroup-snap: 0 2097152 snapshot 254:11 254:12 P 16 +volumeGroup-base: 0 2097152 snapshot-origin 254:11 + +# ls -lL /dev/mapper/volumeGroup-* +brw------- 1 root root 254, 11 29 ago 18:15 /dev/mapper/volumeGroup-base-real +brw------- 1 root root 254, 12 29 ago 18:15 /dev/mapper/volumeGroup-snap-cow +brw------- 1 root root 254, 13 29 ago 18:15 /dev/mapper/volumeGroup-snap +brw------- 1 root root 254, 10 29 ago 18:14 /dev/mapper/volumeGroup-base + diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 7086f0a90d14..971589a9752d 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -17,7 +17,7 @@ are specified on the kernel command line with the module name plus usbcore.blinkenlights=1 -The text in square brackets at the beginning of the description state the +The text in square brackets at the beginning of the description states the restrictions on the kernel for the said kernel parameter to be valid. The restrictions referred to are that the relevant option is valid if: @@ -27,8 +27,8 @@ restrictions referred to are that the relevant option is valid if: APM Advanced Power Management support is enabled. AX25 Appropriate AX.25 support is enabled. CD Appropriate CD support is enabled. - DEVFS devfs support is enabled. - DRM Direct Rendering Management support is enabled. + DEVFS devfs support is enabled. + DRM Direct Rendering Management support is enabled. EDD BIOS Enhanced Disk Drive Services (EDD) is enabled EFI EFI Partitioning (GPT) is enabled EIDE EIDE/ATAPI support is enabled. @@ -71,7 +71,7 @@ restrictions referred to are that the relevant option is valid if: SERIAL Serial support is enabled. SMP The kernel is an SMP kernel. SPARC Sparc architecture is enabled. - SWSUSP Software suspension is enabled. + SWSUSP Software suspend is enabled. TS Appropriate touchscreen support is enabled. USB USB support is enabled. USBHID USB Human Interface Device support is enabled. @@ -105,13 +105,13 @@ running once the system is up. See header of drivers/scsi/53c7xx.c. See also Documentation/scsi/ncr53c7xx.txt. - acpi= [HW,ACPI] Advanced Configuration and Power Interface - Format: { force | off | ht | strict } + acpi= [HW,ACPI] Advanced Configuration and Power Interface + Format: { force | off | ht | strict | noirq } force -- enable ACPI if default was off off -- disable ACPI if default was on noirq -- do not use ACPI for IRQ routing ht -- run only enough ACPI to enable Hyper Threading - strict -- Be less tolerant of platforms that are not + strict -- Be less tolerant of platforms that are not strictly ACPI specification compliant. See also Documentation/pm.txt, pci=noacpi @@ -119,20 +119,23 @@ running once the system is up. acpi_sleep= [HW,ACPI] Sleep options Format: { s3_bios, s3_mode } See Documentation/power/video.txt - + acpi_sci= [HW,ACPI] ACPI System Control Interrupt trigger mode - Format: { level | edge | high | low } + Format: { level | edge | high | low } - acpi_irq_balance [HW,ACPI] ACPI will balance active IRQs - default in APIC mode + acpi_irq_balance [HW,ACPI] + ACPI will balance active IRQs + default in APIC mode - acpi_irq_nobalance [HW,ACPI] ACPI will not move active IRQs (default) - default in PIC mode + acpi_irq_nobalance [HW,ACPI] + ACPI will not move active IRQs (default) + default in PIC mode - acpi_irq_pci= [HW,ACPI] If irq_balance, Clear listed IRQs for use by PCI + acpi_irq_pci= [HW,ACPI] If irq_balance, clear listed IRQs for + use by PCI Format: ,... - acpi_irq_isa= [HW,ACPI] If irq_balance, Mark listed IRQs used by ISA + acpi_irq_isa= [HW,ACPI] If irq_balance, mark listed IRQs used by ISA Format: ,... acpi_osi= [HW,ACPI] empty param disables _OSI @@ -145,14 +148,14 @@ running once the system is up. acpi_dbg_layer= [HW,ACPI] Format: - Each bit of the indicates an acpi debug layer, + Each bit of the indicates an ACPI debug layer, 1: enable, 0: disable. It is useful for boot time debugging. After system has booted up, it can be set via /proc/acpi/debug_layer. acpi_dbg_level= [HW,ACPI] Format: - Each bit of the indicates an acpi debug level, + Each bit of the indicates an ACPI debug level, 1: enable, 0: disable. It is useful for boot time debugging. After system has booted up, it can be set via /proc/acpi/debug_level. @@ -161,12 +164,13 @@ running once the system is up. acpi_generic_hotkey [HW,ACPI] Allow consolidated generic hotkey driver to - over-ride platform specific driver. + override platform specific driver. See also Documentation/acpi-hotkey.txt. enable_timer_pin_1 [i386,x86-64] Enable PIN 1 of APIC timer - Can be useful to work around chipset bugs (in particular on some ATI chipsets) + Can be useful to work around chipset bugs + (in particular on some ATI chipsets). The kernel tries to set a reasonable default. disable_timer_pin_1 [i386,x86-64] @@ -182,7 +186,7 @@ running once the system is up. adlib= [HW,OSS] Format: - + advansys= [HW,SCSI] See header of drivers/scsi/advansys.c. @@ -192,7 +196,7 @@ running once the system is up. aedsp16= [HW,OSS] Audio Excel DSP 16 Format: ,,,,, See also header of sound/oss/aedsp16.c. - + aha152x= [HW,SCSI] See Documentation/scsi/aha152x.txt. @@ -205,10 +209,6 @@ running once the system is up. aic79xx= [HW,SCSI] See Documentation/scsi/aic79xx.txt. - AM53C974= [HW,SCSI] - Format: ,,, - See also header of drivers/scsi/AM53C974.c. - amijoy.map= [HW,JOY] Amiga joystick support Map of devices attached to JOY0DAT and JOY1DAT Format: , @@ -219,23 +219,24 @@ running once the system is up. connected to one of 16 gameports Format: ,,.. - apc= [HW,SPARC] Power management functions (SPARCstation-4/5 + deriv.) + apc= [HW,SPARC] + Power management functions (SPARCstation-4/5 + deriv.) Format: noidle Disable APC CPU standby support. SPARCstation-Fox does not play well with APC CPU idle - disable it if you have APC and your system crashes randomly. - apic= [APIC,i386] Change the output verbosity whilst booting + apic= [APIC,i386] Change the output verbosity whilst booting Format: { quiet (default) | verbose | debug } Change the amount of debugging information output when initialising the APIC and IO-APIC components. - + apm= [APM] Advanced Power Management See header of arch/i386/kernel/apm.c. applicom= [HW] Format: , - + arcrimi= [HW,NET] ARCnet - "RIM I" (entirely mem-mapped) cards Format: ,, @@ -250,38 +251,40 @@ running once the system is up. atkbd.reset= [HW] Reset keyboard during initialization - atkbd.set= [HW] Select keyboard code set - Format: (2 = AT (default) 3 = PS/2) + atkbd.set= [HW] Select keyboard code set + Format: (2 = AT (default), 3 = PS/2) atkbd.scroll= [HW] Enable scroll wheel on MS Office and similar keyboards atkbd.softraw= [HW] Choose between synthetic and real raw mode Format: (0 = real, 1 = synthetic (default)) - - atkbd.softrepeat= - [HW] Use software keyboard repeat + + atkbd.softrepeat= [HW] + Use software keyboard repeat autotest [IA64] awe= [HW,OSS] AWE32/SB32/AWE64 wave table synth Format: ,, - + aztcd= [HW,CD] Aztech CD268 CDROM driver Format: ,0x79 (?) baycom_epp= [HW,AX25] Format: , - + baycom_par= [HW,AX25] BayCom Parallel Port AX.25 Modem Format: , See header of drivers/net/hamradio/baycom_par.c. - baycom_ser_fdx= [HW,AX25] BayCom Serial Port AX.25 Modem (Full Duplex Mode) + baycom_ser_fdx= [HW,AX25] + BayCom Serial Port AX.25 Modem (Full Duplex Mode) Format: ,,[,] See header of drivers/net/hamradio/baycom_ser_fdx.c. - baycom_ser_hdx= [HW,AX25] BayCom Serial Port AX.25 Modem (Half Duplex Mode) + baycom_ser_hdx= [HW,AX25] + BayCom Serial Port AX.25 Modem (Half Duplex Mode) Format: ,, See header of drivers/net/hamradio/baycom_ser_hdx.c. @@ -292,7 +295,8 @@ running once the system is up. blkmtd_count= bttv.card= [HW,V4L] bttv (bt848 + bt878 based grabber cards) - bttv.radio= Most important insmod options are available as kernel args too. + bttv.radio= Most important insmod options are available as + kernel args too. bttv.pll= See Documentation/video4linux/bttv/Insmod-options bttv.tuner= and Documentation/video4linux/bttv/CARDLIST @@ -318,15 +322,17 @@ running once the system is up. checkreqprot [SELINUX] Set initial checkreqprot flag value. Format: { "0" | "1" } See security/selinux/Kconfig help text. - 0 -- check protection applied by kernel (includes any implied execute protection). + 0 -- check protection applied by kernel (includes + any implied execute protection). 1 -- check protection requested by application. Default value is set via a kernel config option. - Value can be changed at runtime via /selinux/checkreqprot. - - clock= [BUGS=IA-32, HW] gettimeofday timesource override. + Value can be changed at runtime via + /selinux/checkreqprot. + + clock= [BUGS=IA-32,HW] gettimeofday timesource override. Forces specified timesource (if avaliable) to be used - when calculating gettimeofday(). If specicified timesource - is not avalible, it defaults to PIT. + when calculating gettimeofday(). If specicified + timesource is not avalible, it defaults to PIT. Format: { pit | tsc | cyclone | pmtmr } hpet= [IA-32,HPET] option to disable HPET and use PIT. @@ -336,17 +342,19 @@ running once the system is up. Format: { auto | [,][] } com20020= [HW,NET] ARCnet - COM20020 chipset - Format: [,[,[,[,[,]]]]] + Format: + [,[,[,[,[,]]]]] com90io= [HW,NET] ARCnet - COM90xx chipset (IO-mapped buffers) Format: [,] - com90xx= [HW,NET] ARCnet - COM90xx chipset (memory-mapped buffers) + com90xx= [HW,NET] + ARCnet - COM90xx chipset (memory-mapped buffers) Format: [,[,]] condev= [HW,S390] console device conmode= - + console= [KNL] Output console device and options. tty Use the virtual console device . @@ -367,7 +375,8 @@ running once the system is up. options are the same as for ttyS, above. cpcihp_generic= [HW,PCI] Generic port I/O CompactPCI driver - Format: ,,,[,] + Format: + ,,,[,] cpia_pp= [HW,PPT] Format: { parport | auto | none } @@ -384,10 +393,10 @@ running once the system is up. cs89x0_media= [HW,NET] Format: { rj45 | aui | bnc } - + cyclades= [HW,SERIAL] Cyclades multi-serial port adapter. - - dasd= [HW,NET] + + dasd= [HW,NET] See header of drivers/s390/block/dasd_devmap.c. db9.dev[2|3]= [HW,JOY] Multisystem joystick support via parallel port @@ -406,7 +415,7 @@ running once the system is up. dhash_entries= [KNL] Set number of hash buckets for dentry cache. - + digi= [HW,SERIAL] IO parameters + enable/disable command. @@ -424,11 +433,11 @@ running once the system is up. dtc3181e= [HW,SCSI] - earlyprintk= [IA-32, X86-64] + earlyprintk= [IA-32,X86-64] earlyprintk=vga earlyprintk=serial[,ttySn[,baudrate]] - Append ,keep to not disable it when the real console + Append ",keep" to not disable it when the real console takes over. Only vga or serial at a time, not both. @@ -451,7 +460,7 @@ running once the system is up. Format: {"of[f]" | "sk[ipmbr]"} See comment in arch/i386/boot/edd.S - eicon= [HW,ISDN] + eicon= [HW,ISDN] Format: ,, eisa_irq_edge= [PARISC,HW] @@ -462,12 +471,13 @@ running once the system is up. arch/i386/kernel/cpu/cpufreq/elanfreq.c. elevator= [IOSCHED] - Format: {"as"|"cfq"|"deadline"|"noop"} - See Documentation/block/as-iosched.txt - and Documentation/block/deadline-iosched.txt for details. + Format: {"as" | "cfq" | "deadline" | "noop"} + See Documentation/block/as-iosched.txt and + Documentation/block/deadline-iosched.txt for details. + elfcorehdr= [IA-32] - Specifies physical address of start of kernel core image - elf header. + Specifies physical address of start of kernel core + image elf header. See Documentation/kdump.txt for details. enforcing [SELINUX] Set initial enforcing status. @@ -485,7 +495,7 @@ running once the system is up. es1371= [HW,OSS] Format: ,[,[]] See also header of sound/oss/es1371.c. - + ether= [HW,NET] Ethernet cards parameters This option is obsoleted by the "netdev=" option, which has equivalent usage. See its documentation for details. @@ -526,12 +536,13 @@ running once the system is up. gus= [HW,OSS] Format: ,,, - + gvp11= [HW,SCSI] hashdist= [KNL,NUMA] Large hashes allocated during boot are distributed across NUMA nodes. Defaults on for IA-64, off otherwise. + Format: 0 | 1 (for off | on) hcl= [IA-64] SGI's Hardware Graph compatibility layer @@ -595,13 +606,13 @@ running once the system is up. ide?= [HW] (E)IDE subsystem Format: ide?=noprobe or chipset specific parameters. See Documentation/ide.txt. - + idebus= [HW] (E)IDE subsystem - VLB/PCI bus speed See Documentation/ide.txt. idle= [HW] Format: idle=poll or idle=halt - + ihash_entries= [KNL] Set number of hash buckets for inode cache. @@ -649,7 +660,7 @@ running once the system is up. firmware running. isapnp= [ISAPNP] - Format: , , , + Format: ,,, isolcpus= [KNL,SMP] Isolate CPUs from the general scheduler. Format: ,..., @@ -661,32 +672,33 @@ running once the system is up. "number of CPUs in system - 1". This option is the preferred way to isolate CPUs. The - alternative - manually setting the CPU mask of all tasks - in the system can cause problems and suboptimal load - balancer performance. + alternative -- manually setting the CPU mask of all + tasks in the system -- can cause problems and + suboptimal load balancer performance. isp16= [HW,CD] Format: ,,, - iucv= [HW,NET] + iucv= [HW,NET] js= [HW,JOY] Analog joystick See Documentation/input/joystick.txt. keepinitrd [HW,ARM] - kstack=N [IA-32, X86-64] Print N words from the kernel stack + kstack=N [IA-32,X86-64] Print N words from the kernel stack in oops dumps. l2cr= [PPC] - lapic [IA-32,APIC] Enable the local APIC even if BIOS disabled it. + lapic [IA-32,APIC] Enable the local APIC even if BIOS + disabled it. lasi= [HW,SCSI] PARISC LASI driver for the 53c700 chip Format: addr:,irq: - llsc*= [IA64] - See function print_params() in arch/ia64/sn/kernel/llsc4.c. + llsc*= [IA64] See function print_params() in + arch/ia64/sn/kernel/llsc4.c. load_ramdisk= [RAM] List of ramdisks to load from floppy See Documentation/ramdisk.txt. @@ -713,8 +725,9 @@ running once the system is up. 7 (KERN_DEBUG) debug-level messages log_buf_len=n Sets the size of the printk ring buffer, in bytes. - Format is n, nk, nM. n must be a power of two. The - default is set in kernel config. + Format: { n | nk | nM } + n must be a power of two. The default size + is set in the kernel config file. lp=0 [LP] Specify parallel ports to use, e.g, lp=port[,port...] lp=none,parport0 (lp0 not configured, lp1 uses @@ -750,23 +763,23 @@ running once the system is up. ltpc= [NET] Format: ,, - mac5380= [HW,SCSI] - Format: ,,,, + mac5380= [HW,SCSI] Format: + ,,,, - mac53c9x= [HW,SCSI] - Format: ,,,,,,, + mac53c9x= [HW,SCSI] Format: + ,,,,,,, - machvec= [IA64] - Force the use of a particular machine-vector (machvec) in a generic - kernel. Example: machvec=hpzx1_swiotlb + machvec= [IA64] Force the use of a particular machine-vector + (machvec) in a generic kernel. + Example: machvec=hpzx1_swiotlb - mad16= [HW,OSS] - Format: ,,,,,, + mad16= [HW,OSS] Format: + ,,,,,, maui= [HW,OSS] Format: , - - max_loop= [LOOP] Maximum number of loopback devices that can + + max_loop= [LOOP] Maximum number of loopback devices that can be mounted Format: <1-256> @@ -776,11 +789,11 @@ running once the system is up. max_addr=[KMG] [KNL,BOOT,ia64] All physical memory greater than or equal to this physical address is ignored. - max_luns= [SCSI] Maximum number of LUNs to probe + max_luns= [SCSI] Maximum number of LUNs to probe. Should be between 1 and 2^32-1. max_report_luns= - [SCSI] Maximum number of LUNs received + [SCSI] Maximum number of LUNs received. Should be between 1 and 16384. mca-pentium [BUGS=IA-32] @@ -796,11 +809,11 @@ running once the system is up. md= [HW] RAID subsystems devices and level See Documentation/md.txt. - + mdacon= [MDA] Format: , Specifies range of consoles to be captured by the MDA. - + mem=nn[KMG] [KNL,BOOT] Force usage of a specific amount of memory Amount of memory to be used when the kernel is not able to see the whole system memory or for test. @@ -851,15 +864,15 @@ running once the system is up. MTD_Partition= [MTD] Format: ,,, - MTD_Region= [MTD] - Format: ,[,,,,] + MTD_Region= [MTD] Format: + ,[,,,,] mtdparts= [MTD] See drivers/mtd/cmdline.c. mtouchusb.raw_coordinates= - [HW] Make the MicroTouch USB driver use raw coordinates ('y', default) - or cooked coordinates ('n') + [HW] Make the MicroTouch USB driver use raw coordinates + ('y', default) or cooked coordinates ('n') n2= [NET] SDL Inc. RISCom/N2 synchronous serial card @@ -880,7 +893,9 @@ running once the system is up. Format: ,,,, Note that mem_start is often overloaded to mean something different and driver-specific. - + This usage is only documented in each driver source + file if at all. + nfsaddrs= [NFS] See Documentation/nfsroot.txt. @@ -893,8 +908,8 @@ running once the system is up. emulation library even if a 387 maths coprocessor is present. - noalign [KNL,ARM] - + noalign [KNL,ARM] + noapic [SMP,APIC] Tells the kernel to not make use of any IOAPICs that may be present in the system. @@ -905,19 +920,19 @@ running once the system is up. on "Classic" PPC cores. nocache [ARM] - + nodisconnect [HW,SCSI,M68K] Disables SCSI disconnects. noexec [IA-64] - noexec [IA-32, X86-64] + noexec [IA-32,X86-64] noexec=on: enable non-executable mappings (default) noexec=off: disable nn-executable mappings nofxsr [BUGS=IA-32] nohlt [BUGS=ARM] - + no-hlt [BUGS=IA-32] Tells the kernel that the hlt instruction doesn't work correctly and not to use it. @@ -948,8 +963,9 @@ running once the system is up. noresidual [PPC] Don't use residual data on PReP machines. - noresume [SWSUSP] Disables resume and restore original swap space. - + noresume [SWSUSP] Disables resume and restores original swap + space. + no-scroll [VGA] Disables scrollback. This is required for the Braillex ib80-piezo Braille reader made by F.H. Papenmeier (Germany). @@ -965,16 +981,16 @@ running once the system is up. nousb [USB] Disable the USB subsystem nowb [ARM] - + opl3= [HW,OSS] Format: opl3sa= [HW,OSS] Format: ,,,,, - opl3sa2= [HW,OSS] - Format: ,,,,,,,[,,,,,,,,,[,, parkbd.mode= [HW] Parallel port keyboard adapter mode of operation, 0 for XT, 1 for AT (default is AT). - Format: + Format: - parport=0 [HW,PPT] Specify parallel ports. 0 disables. - parport=auto Use 'auto' to force the driver to use - parport=0xBBB[,IRQ[,DMA]] any IRQ/DMA settings detected (the - default is to ignore detected IRQ/DMA - settings because of possible - conflicts). You can specify the base - address, IRQ, and DMA settings; IRQ and - DMA should be numbers, or 'auto' (for - using detected settings on that - particular port), or 'nofifo' (to avoid - using a FIFO even if it is detected). - Parallel ports are assigned in the - order they are specified on the command - line, starting with parport0. + parport= [HW,PPT] Specify parallel ports. 0 disables. + Format: { 0 | auto | 0xBBB[,IRQ[,DMA]] } + Use 'auto' to force the driver to use any + IRQ/DMA settings detected (the default is to + ignore detected IRQ/DMA settings because of + possible conflicts). You can specify the base + address, IRQ, and DMA settings; IRQ and DMA + should be numbers, or 'auto' (for using detected + settings on that particular port), or 'nofifo' + (to avoid using a FIFO even if it is detected). + Parallel ports are assigned in the order they + are specified on the command line, starting + with parport0. - parport_init_mode= - [HW,PPT] Configure VIA parallel port to - operate in specific mode. This is - necessary on Pegasos computer where - firmware has no options for setting up - parallel port mode and sets it to - spp. Currently this function knows - 686a and 8231 chips. + parport_init_mode= [HW,PPT] + Configure VIA parallel port to operate in + a specific mode. This is necessary on Pegasos + computer where firmware has no options for setting + up parallel port mode and sets it to spp. + Currently this function knows 686a and 8231 chips. Format: [spp|ps2|epp|ecp|ecpepp] - pas2= [HW,OSS] - Format: ,,,,,,, - + pas2= [HW,OSS] Format: + ,,,,,,, + pas16= [HW,SCSI] See header of drivers/scsi/pas16.c. @@ -1032,64 +1045,67 @@ running once the system is up. See header of drivers/block/paride/pcd.c. See also Documentation/paride.txt. - pci=option[,option...] [PCI] various PCI subsystem options: - off [IA-32] don't probe for the PCI bus - bios [IA-32] force use of PCI BIOS, don't access - the hardware directly. Use this if your machine - has a non-standard PCI host bridge. - nobios [IA-32] disallow use of PCI BIOS, only direct - hardware access methods are allowed. Use this - if you experience crashes upon bootup and you - suspect they are caused by the BIOS. - conf1 [IA-32] Force use of PCI Configuration Mechanism 1. - conf2 [IA-32] Force use of PCI Configuration Mechanism 2. - nosort [IA-32] Don't sort PCI devices according to - order given by the PCI BIOS. This sorting is done - to get a device order compatible with older kernels. - biosirq [IA-32] Use PCI BIOS calls to get the interrupt - routing table. These calls are known to be buggy - on several machines and they hang the machine when used, - but on other computers it's the only way to get the - interrupt routing table. Try this option if the kernel - is unable to allocate IRQs or discover secondary PCI - buses on your motherboard. - rom [IA-32] Assign address space to expansion ROMs. - Use with caution as certain devices share address - decoders between ROMs and other resources. - irqmask=0xMMMM [IA-32] Set a bit mask of IRQs allowed to be assigned - automatically to PCI devices. You can make the kernel - exclude IRQs of your ISA cards this way. + pci=option[,option...] [PCI] various PCI subsystem options: + off [IA-32] don't probe for the PCI bus + bios [IA-32] force use of PCI BIOS, don't access + the hardware directly. Use this if your machine + has a non-standard PCI host bridge. + nobios [IA-32] disallow use of PCI BIOS, only direct + hardware access methods are allowed. Use this + if you experience crashes upon bootup and you + suspect they are caused by the BIOS. + conf1 [IA-32] Force use of PCI Configuration + Mechanism 1. + conf2 [IA-32] Force use of PCI Configuration + Mechanism 2. + nosort [IA-32] Don't sort PCI devices according to + order given by the PCI BIOS. This sorting is + done to get a device order compatible with + older kernels. + biosirq [IA-32] Use PCI BIOS calls to get the interrupt + routing table. These calls are known to be buggy + on several machines and they hang the machine + when used, but on other computers it's the only + way to get the interrupt routing table. Try + this option if the kernel is unable to allocate + IRQs or discover secondary PCI buses on your + motherboard. + rom [IA-32] Assign address space to expansion ROMs. + Use with caution as certain devices share + address decoders between ROMs and other + resources. + irqmask=0xMMMM [IA-32] Set a bit mask of IRQs allowed to be + assigned automatically to PCI devices. You can + make the kernel exclude IRQs of your ISA cards + this way. pirqaddr=0xAAAAA [IA-32] Specify the physical address - of the PIRQ table (normally generated - by the BIOS) if it is outside the - F0000h-100000h range. - lastbus=N [IA-32] Scan all buses till bus #N. Can be useful - if the kernel is unable to find your secondary buses - and you want to tell it explicitly which ones they are. - assign-busses [IA-32] Always assign all PCI bus - numbers ourselves, overriding - whatever the firmware may have - done. - usepirqmask [IA-32] Honor the possible IRQ mask - stored in the BIOS $PIR table. This is - needed on some systems with broken - BIOSes, notably some HP Pavilion N5400 - and Omnibook XE3 notebooks. This will - have no effect if ACPI IRQ routing is - enabled. - noacpi [IA-32] Do not use ACPI for IRQ routing - or for PCI scanning. - routeirq Do IRQ routing for all PCI devices. - This is normally done in pci_enable_device(), - so this option is a temporary workaround - for broken drivers that don't call it. - - firmware [ARM] Do not re-enumerate the bus but - instead just use the configuration - from the bootloader. This is currently - used on IXP2000 systems where the - bus has to be configured a certain way - for adjunct CPUs. + of the PIRQ table (normally generated + by the BIOS) if it is outside the + F0000h-100000h range. + lastbus=N [IA-32] Scan all buses thru bus #N. Can be + useful if the kernel is unable to find your + secondary buses and you want to tell it + explicitly which ones they are. + assign-busses [IA-32] Always assign all PCI bus + numbers ourselves, overriding + whatever the firmware may have done. + usepirqmask [IA-32] Honor the possible IRQ mask stored + in the BIOS $PIR table. This is needed on + some systems with broken BIOSes, notably + some HP Pavilion N5400 and Omnibook XE3 + notebooks. This will have no effect if ACPI + IRQ routing is enabled. + noacpi [IA-32] Do not use ACPI for IRQ routing + or for PCI scanning. + routeirq Do IRQ routing for all PCI devices. + This is normally done in pci_enable_device(), + so this option is a temporary workaround + for broken drivers that don't call it. + firmware [ARM] Do not re-enumerate the bus but instead + just use the configuration from the + bootloader. This is currently used on + IXP2000 systems where the bus has to be + configured a certain way for adjunct CPUs. pcmv= [HW,PCMCIA] BadgePAD 4 @@ -1127,19 +1143,20 @@ running once the system is up. [ISAPNP] Exclude DMAs for the autoconfiguration pnp_reserve_io= [ISAPNP] Exclude I/O ports for the autoconfiguration - Ranges are in pairs (I/O port base and size). + Ranges are in pairs (I/O port base and size). pnp_reserve_mem= - [ISAPNP] Exclude memory regions for the autoconfiguration + [ISAPNP] Exclude memory regions for the + autoconfiguration. Ranges are in pairs (memory base and size). profile= [KNL] Enable kernel profiling via /proc/profile - { schedule | } - (param: schedule - profile schedule points} - (param: profile step/bucket size as a power of 2 for - statistical time based profiling) + Format: [schedule,] + Param: "schedule" - profile schedule points. + Param: - step/bucket size as a power of 2 for + statistical time based profiling. - processor.max_cstate= [HW, ACPI] + processor.max_cstate= [HW,ACPI] Limit processor to maximum C-state max_cstate=9 overrides any DMI blacklist limit. @@ -1147,27 +1164,28 @@ running once the system is up. before loading. See Documentation/ramdisk.txt. - psmouse.proto= [HW,MOUSE] Highest PS2 mouse protocol extension to - probe for (bare|imps|exps|lifebook|any). + psmouse.proto= [HW,MOUSE] Highest PS2 mouse protocol extension to + probe for; one of (bare|imps|exps|lifebook|any). psmouse.rate= [HW,MOUSE] Set desired mouse report rate, in reports per second. - psmouse.resetafter= - [HW,MOUSE] Try to reset the device after so many bad packets + psmouse.resetafter= [HW,MOUSE] + Try to reset the device after so many bad packets (0 = never). psmouse.resolution= [HW,MOUSE] Set desired mouse resolution, in dpi. psmouse.smartscroll= - [HW,MOUSE] Controls Logitech smartscroll autorepeat, + [HW,MOUSE] Controls Logitech smartscroll autorepeat. 0 = disabled, 1 = enabled (default). pss= [HW,OSS] Personal Sound System (ECHO ESC614) - Format: ,,,,, + Format: + ,,,,, pt. [PARIDE] See Documentation/paride.txt. quiet= [KNL] Disable log messages - + r128= [HW,DRM] raid= [HW,RAID] @@ -1176,10 +1194,9 @@ running once the system is up. ramdisk= [RAM] Sizes of RAM disks in kilobytes [deprecated] See Documentation/ramdisk.txt. - ramdisk_blocksize= - [RAM] + ramdisk_blocksize= [RAM] See Documentation/ramdisk.txt. - + ramdisk_size= [RAM] Sizes of RAM disks in kilobytes New name for the ramdisk parameter. See Documentation/ramdisk.txt. @@ -1195,7 +1212,8 @@ running once the system is up. reserve= [KNL,BUGS] Force the kernel to ignore some iomem area - resume= [SWSUSP] Specify the partition device for software suspension + resume= [SWSUSP] + Specify the partition device for software suspend rhash_entries= [KNL,NET] Set number of hash buckets for route cache @@ -1225,7 +1243,7 @@ running once the system is up. Format: ,,, sbni= [NET] Granch SBNI12 leased line adapter - + sbpcd= [HW,CD] Soundblaster CD adapter Format: , See a comment before function sbpcd_setup() in @@ -1258,21 +1276,20 @@ running once the system is up. serialnumber [BUGS=IA-32] - sg_def_reserved_size= - [SCSI] - + sg_def_reserved_size= [SCSI] + sgalaxy= [HW,OSS] Format: ,,,, shapers= [NET] Maximal number of shapers. - + sim710= [SCSI,HW] See header of drivers/scsi/sim710.c. simeth= [IA-64] simscsi= - + sjcd= [HW,CD] Format: ,, See header of drivers/cdrom/sjcd.c. @@ -1403,10 +1420,10 @@ running once the system is up. snd-wavefront= [HW,ALSA] snd-ymfpci= [HW,ALSA] - + sonicvibes= [HW,OSS] Format: - + sonycd535= [HW,CD] Format: [,] @@ -1423,7 +1440,7 @@ running once the system is up. sscape= [HW,OSS] Format: ,,,, - + st= [HW,SCSI] SCSI tape parameters (buffers, etc.) See Documentation/scsi/st.txt. @@ -1446,7 +1463,7 @@ running once the system is up. stram_swap= [HW,M68k] swiotlb= [IA-64] Number of I/O TLB slabs - + switches= [HW,M68k] sym53c416= [HW,SCSI] @@ -1479,14 +1496,16 @@ running once the system is up. tp720= [HW,PS2] trix= [HW,OSS] MediaTrix AudioTrix Pro - Format: ,,,,,,,, - + Format: + ,,,,,,,, + tsdev.xres= [TS] Horizontal screen resolution. tsdev.yres= [TS] Vertical screen resolution. - turbografx.map[2|3]= - [HW,JOY] TurboGraFX parallel port interface - Format: ,,,,,,, + turbografx.map[2|3]= [HW,JOY] + TurboGraFX parallel port interface + Format: + ,,,,,,, See also Documentation/input/joystick-parport.txt u14-34f= [HW,SCSI] UltraStor 14F/34F SCSI host adapter @@ -1502,17 +1521,18 @@ running once the system is up. usbhid.mousepoll= [USBHID] The interval which mice are to be polled at. - + video= [FB] Frame buffer configuration See Documentation/fb/modedb.txt. vga= [BOOT,IA-32] Select a particular video mode - See Documentation/i386/boot.txt and Documentation/svga.txt. + See Documentation/i386/boot.txt and + Documentation/svga.txt. Use vga=ask for menu. This is actually a boot loader parameter; the value is passed to the kernel using a special protocol. - vmalloc=nn[KMG] [KNL,BOOT] forces the vmalloc area to have an exact + vmalloc=nn[KMG] [KNL,BOOT] Forces the vmalloc area to have an exact size of . This can be used to increase the minimum size (128MB on x86). It can also be used to decrease the size and leave more room for directly @@ -1520,11 +1540,11 @@ running once the system is up. vmhalt= [KNL,S390] - vmpoff= [KNL,S390] - + vmpoff= [KNL,S390] + waveartist= [HW,OSS] Format: ,,, - + wd33c93= [HW,SCSI] See header of drivers/scsi/wd33c93.c. @@ -1538,21 +1558,25 @@ running once the system is up. xd_geo= See header of drivers/block/xd.c. xirc2ps_cs= [NET,PCMCIA] - Format: ,,,,,[,[,[,]]] - + Format: + ,,,,,[,[,[,]]] +______________________________________________________________________ Changelog: +2000-06-?? Mr. Unknown The last known update (for 2.4.0) - the changelog was not kept before. - 2000-06-?? Mr. Unknown +2002-11-24 Petr Baudis + Randy Dunlap Update for 2.5.49, description for most of the options introduced, references to other documentation (C files, READMEs, ..), added S390, PPC, SPARC, MTD, ALSA and OSS category. Minor corrections and reformatting. - 2002-11-24 Petr Baudis - Randy Dunlap + +2005-10-19 Randy Dunlap + Lots of typos, whitespace, some reformatting. TODO: diff --git a/Documentation/keys-request-key.txt b/Documentation/keys-request-key.txt new file mode 100644 index 000000000000..5f2b9c5edbb5 --- /dev/null +++ b/Documentation/keys-request-key.txt @@ -0,0 +1,161 @@ + =================== + KEY REQUEST SERVICE + =================== + +The key request service is part of the key retention service (refer to +Documentation/keys.txt). This document explains more fully how that the +requesting algorithm works. + +The process starts by either the kernel requesting a service by calling +request_key(): + + struct key *request_key(const struct key_type *type, + const char *description, + const char *callout_string); + +Or by userspace invoking the request_key system call: + + key_serial_t request_key(const char *type, + const char *description, + const char *callout_info, + key_serial_t dest_keyring); + +The main difference between the two access points is that the in-kernel +interface does not need to link the key to a keyring to prevent it from being +immediately destroyed. The kernel interface returns a pointer directly to the +key, and it's up to the caller to destroy the key. + +The userspace interface links the key to a keyring associated with the process +to prevent the key from going away, and returns the serial number of the key to +the caller. + + +=========== +THE PROCESS +=========== + +A request proceeds in the following manner: + + (1) Process A calls request_key() [the userspace syscall calls the kernel + interface]. + + (2) request_key() searches the process's subscribed keyrings to see if there's + a suitable key there. If there is, it returns the key. If there isn't, and + callout_info is not set, an error is returned. Otherwise the process + proceeds to the next step. + + (3) request_key() sees that A doesn't have the desired key yet, so it creates + two things: + + (a) An uninstantiated key U of requested type and description. + + (b) An authorisation key V that refers to key U and notes that process A + is the context in which key U should be instantiated and secured, and + from which associated key requests may be satisfied. + + (4) request_key() then forks and executes /sbin/request-key with a new session + keyring that contains a link to auth key V. + + (5) /sbin/request-key execs an appropriate program to perform the actual + instantiation. + + (6) The program may want to access another key from A's context (say a + Kerberos TGT key). It just requests the appropriate key, and the keyring + search notes that the session keyring has auth key V in its bottom level. + + This will permit it to then search the keyrings of process A with the + UID, GID, groups and security info of process A as if it was process A, + and come up with key W. + + (7) The program then does what it must to get the data with which to + instantiate key U, using key W as a reference (perhaps it contacts a + Kerberos server using the TGT) and then instantiates key U. + + (8) Upon instantiating key U, auth key V is automatically revoked so that it + may not be used again. + + (9) The program then exits 0 and request_key() deletes key V and returns key + U to the caller. + +This also extends further. If key W (step 5 above) didn't exist, key W would be +created uninstantiated, another auth key (X) would be created [as per step 3] +and another copy of /sbin/request-key spawned [as per step 4]; but the context +specified by auth key X will still be process A, as it was in auth key V. + +This is because process A's keyrings can't simply be attached to +/sbin/request-key at the appropriate places because (a) execve will discard two +of them, and (b) it requires the same UID/GID/Groups all the way through. + + +====================== +NEGATIVE INSTANTIATION +====================== + +Rather than instantiating a key, it is possible for the possessor of an +authorisation key to negatively instantiate a key that's under construction. +This is a short duration placeholder that causes any attempt at re-requesting +the key whilst it exists to fail with error ENOKEY. + +This is provided to prevent excessive repeated spawning of /sbin/request-key +processes for a key that will never be obtainable. + +Should the /sbin/request-key process exit anything other than 0 or die on a +signal, the key under construction will be automatically negatively +instantiated for a short amount of time. + + +==================== +THE SEARCH ALGORITHM +==================== + +A search of any particular keyring proceeds in the following fashion: + + (1) When the key management code searches for a key (keyring_search_aux) it + firstly calls key_permission(SEARCH) on the keyring it's starting with, + if this denies permission, it doesn't search further. + + (2) It considers all the non-keyring keys within that keyring and, if any key + matches the criteria specified, calls key_permission(SEARCH) on it to see + if the key is allowed to be found. If it is, that key is returned; if + not, the search continues, and the error code is retained if of higher + priority than the one currently set. + + (3) It then considers all the keyring-type keys in the keyring it's currently + searching. It calls key_permission(SEARCH) on each keyring, and if this + grants permission, it recurses, executing steps (2) and (3) on that + keyring. + +The process stops immediately a valid key is found with permission granted to +use it. Any error from a previous match attempt is discarded and the key is +returned. + +When search_process_keyrings() is invoked, it performs the following searches +until one succeeds: + + (1) If extant, the process's thread keyring is searched. + + (2) If extant, the process's process keyring is searched. + + (3) The process's session keyring is searched. + + (4) If the process has a request_key() authorisation key in its session + keyring then: + + (a) If extant, the calling process's thread keyring is searched. + + (b) If extant, the calling process's process keyring is searched. + + (c) The calling process's session keyring is searched. + +The moment one succeeds, all pending errors are discarded and the found key is +returned. + +Only if all these fail does the whole thing fail with the highest priority +error. Note that several errors may have come from LSM. + +The error priority is: + + EKEYREVOKED > EKEYEXPIRED > ENOKEY + +EACCES/EPERM are only returned on a direct search of a specific keyring where +the basal keyring does not grant Search permission. diff --git a/Documentation/keys.txt b/Documentation/keys.txt index 0321ded4b9ae..4afe03a58c5b 100644 --- a/Documentation/keys.txt +++ b/Documentation/keys.txt @@ -195,8 +195,8 @@ KEY ACCESS PERMISSIONS ====================== Keys have an owner user ID, a group access ID, and a permissions mask. The mask -has up to eight bits each for user, group and other access. Only five of each -set of eight bits are defined. These permissions granted are: +has up to eight bits each for possessor, user, group and other access. Only +five of each set of eight bits are defined. These permissions granted are: (*) View @@ -241,16 +241,16 @@ about the status of the key service: type, description and permissions. The payload of the key is not available this way: - SERIAL FLAGS USAGE EXPY PERM UID GID TYPE DESCRIPTION: SUMMARY - 00000001 I----- 39 perm 1f0000 0 0 keyring _uid_ses.0: 1/4 - 00000002 I----- 2 perm 1f0000 0 0 keyring _uid.0: empty - 00000007 I----- 1 perm 1f0000 0 0 keyring _pid.1: empty - 0000018d I----- 1 perm 1f0000 0 0 keyring _pid.412: empty - 000004d2 I--Q-- 1 perm 1f0000 32 -1 keyring _uid.32: 1/4 - 000004d3 I--Q-- 3 perm 1f0000 32 -1 keyring _uid_ses.32: empty - 00000892 I--QU- 1 perm 1f0000 0 0 user metal:copper: 0 - 00000893 I--Q-N 1 35s 1f0000 0 0 user metal:silver: 0 - 00000894 I--Q-- 1 10h 1f0000 0 0 user metal:gold: 0 + SERIAL FLAGS USAGE EXPY PERM UID GID TYPE DESCRIPTION: SUMMARY + 00000001 I----- 39 perm 1f1f0000 0 0 keyring _uid_ses.0: 1/4 + 00000002 I----- 2 perm 1f1f0000 0 0 keyring _uid.0: empty + 00000007 I----- 1 perm 1f1f0000 0 0 keyring _pid.1: empty + 0000018d I----- 1 perm 1f1f0000 0 0 keyring _pid.412: empty + 000004d2 I--Q-- 1 perm 1f1f0000 32 -1 keyring _uid.32: 1/4 + 000004d3 I--Q-- 3 perm 1f1f0000 32 -1 keyring _uid_ses.32: empty + 00000892 I--QU- 1 perm 1f000000 0 0 user metal:copper: 0 + 00000893 I--Q-N 1 35s 1f1f0000 0 0 user metal:silver: 0 + 00000894 I--Q-- 1 10h 001f0000 0 0 user metal:gold: 0 The flags are: @@ -361,6 +361,8 @@ The main syscalls are: /sbin/request-key will be invoked in an attempt to obtain a key. The callout_info string will be passed as an argument to the program. + See also Documentation/keys-request-key.txt. + The keyctl syscall functions are: @@ -533,8 +535,8 @@ The keyctl syscall functions are: (*) Read the payload data from a key: - key_serial_t keyctl(KEYCTL_READ, key_serial_t keyring, char *buffer, - size_t buflen); + long keyctl(KEYCTL_READ, key_serial_t keyring, char *buffer, + size_t buflen); This function attempts to read the payload data from the specified key into the buffer. The process must have read permission on the key to @@ -555,9 +557,9 @@ The keyctl syscall functions are: (*) Instantiate a partially constructed key. - key_serial_t keyctl(KEYCTL_INSTANTIATE, key_serial_t key, - const void *payload, size_t plen, - key_serial_t keyring); + long keyctl(KEYCTL_INSTANTIATE, key_serial_t key, + const void *payload, size_t plen, + key_serial_t keyring); If the kernel calls back to userspace to complete the instantiation of a key, userspace should use this call to supply data for the key before the @@ -576,8 +578,8 @@ The keyctl syscall functions are: (*) Negatively instantiate a partially constructed key. - key_serial_t keyctl(KEYCTL_NEGATE, key_serial_t key, - unsigned timeout, key_serial_t keyring); + long keyctl(KEYCTL_NEGATE, key_serial_t key, + unsigned timeout, key_serial_t keyring); If the kernel calls back to userspace to complete the instantiation of a key, userspace should use this call mark the key as negative before the @@ -637,6 +639,34 @@ call, and the key released upon close. How to deal with conflicting keys due to two different users opening the same file is left to the filesystem author to solve. +Note that there are two different types of pointers to keys that may be +encountered: + + (*) struct key * + + This simply points to the key structure itself. Key structures will be at + least four-byte aligned. + + (*) key_ref_t + + This is equivalent to a struct key *, but the least significant bit is set + if the caller "possesses" the key. By "possession" it is meant that the + calling processes has a searchable link to the key from one of its + keyrings. There are three functions for dealing with these: + + key_ref_t make_key_ref(const struct key *key, + unsigned long possession); + + struct key *key_ref_to_ptr(const key_ref_t key_ref); + + unsigned long is_key_possessed(const key_ref_t key_ref); + + The first function constructs a key reference from a key pointer and + possession information (which must be 0 or 1 and not any other value). + + The second function retrieves the key pointer from a reference and the + third retrieves the possession flag. + When accessing a key's payload contents, certain precautions must be taken to prevent access vs modification races. See the section "Notes on accessing payload contents" for more information. @@ -660,12 +690,18 @@ payload contents" for more information. If successful, the key will have been attached to the default keyring for implicitly obtained request-key keys, as set by KEYCTL_SET_REQKEY_KEYRING. + See also Documentation/keys-request-key.txt. + (*) When it is no longer required, the key should be released using: void key_put(struct key *key); - This can be called from interrupt context. If CONFIG_KEYS is not set then + Or: + + void key_ref_put(key_ref_t key_ref); + + These can be called from interrupt context. If CONFIG_KEYS is not set then the argument will not be parsed. @@ -689,13 +725,17 @@ payload contents" for more information. (*) If a keyring was found in the search, this can be further searched by: - struct key *keyring_search(struct key *keyring, - const struct key_type *type, - const char *description) + key_ref_t keyring_search(key_ref_t keyring_ref, + const struct key_type *type, + const char *description) This searches the keyring tree specified for a matching key. Error ENOKEY - is returned upon failure. If successful, the returned key will need to be - released. + is returned upon failure (use IS_ERR/PTR_ERR to determine). If successful, + the returned key will need to be released. + + The possession attribute from the keyring reference is used to control + access through the permissions mask and is propagated to the returned key + reference pointer if successful. (*) To check the validity of a key, this function can be called: @@ -732,7 +772,7 @@ More complex payload contents must be allocated and a pointer to them set in key->payload.data. One of the following ways must be selected to access the data: - (1) Unmodifyable key type. + (1) Unmodifiable key type. If the key type does not have a modify method, then the key's payload can be accessed without any form of locking, provided that it's known to be diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt index a55f0f95b171..b0fe41da007b 100644 --- a/Documentation/networking/bonding.txt +++ b/Documentation/networking/bonding.txt @@ -777,7 +777,7 @@ doing so is the same as described in the "Configuring Multiple Bonds Manually" section, below. NOTE: It has been observed that some Red Hat supplied kernels -are apparently unable to rename modules at load time (the "-obonding1" +are apparently unable to rename modules at load time (the "-o bond1" part). Attempts to pass that option to modprobe will produce an "Operation not permitted" error. This has been reported on some Fedora Core kernels, and has been seen on RHEL 4 as well. On kernels @@ -883,7 +883,8 @@ the above does not work, and the second bonding instance never sees its options. In that case, the second options line can be substituted as follows: -install bonding1 /sbin/modprobe bonding -obond1 mode=balance-alb miimon=50 +install bond1 /sbin/modprobe --ignore-install bonding -o bond1 \ + mode=balance-alb miimon=50 This may be repeated any number of times, specifying a new and unique name in place of bond1 for each subsequent instance. diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index ab65714d95fc..b433c8a27e2d 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -355,10 +355,14 @@ ip_dynaddr - BOOLEAN Default: 0 icmp_echo_ignore_all - BOOLEAN + If set non-zero, then the kernel will ignore all ICMP ECHO + requests sent to it. + Default: 0 + icmp_echo_ignore_broadcasts - BOOLEAN - If either is set to true, then the kernel will ignore either all - ICMP ECHO requests sent to it or just those to broadcast/multicast - addresses, respectively. + If set non-zero, then the kernel will ignore all ICMP ECHO and + TIMESTAMP requests sent to it via broadcast/multicast. + Default: 1 icmp_ratelimit - INTEGER Limit the maximal rates for sending ICMP packets whose type matches diff --git a/Documentation/sparse.txt b/Documentation/sparse.txt index 5df44dc894e5..1829009db771 100644 --- a/Documentation/sparse.txt +++ b/Documentation/sparse.txt @@ -51,9 +51,9 @@ or you don't get any checking at all. Where to get sparse ~~~~~~~~~~~~~~~~~~~ -With BK, you can just get it from +With git, you can just get it from - bk://sparse.bkbits.net/sparse + rsync://rsync.kernel.org/pub/scm/devel/sparse/sparse.git and DaveJ has tar-balls at diff --git a/Documentation/usb/URB.txt b/Documentation/usb/URB.txt index d59b95cc6f1b..a49e5f2c2b46 100644 --- a/Documentation/usb/URB.txt +++ b/Documentation/usb/URB.txt @@ -1,5 +1,6 @@ Revised: 2000-Dec-05. Again: 2002-Jul-06 +Again: 2005-Sep-19 NOTE: @@ -18,8 +19,8 @@ called USB Request Block, or URB for short. and deliver the data and status back. - Execution of an URB is inherently an asynchronous operation, i.e. the - usb_submit_urb(urb) call returns immediately after it has successfully queued - the requested action. + usb_submit_urb(urb) call returns immediately after it has successfully + queued the requested action. - Transfers for one URB can be canceled with usb_unlink_urb(urb) at any time. @@ -94,8 +95,9 @@ To free an URB, use void usb_free_urb(struct urb *urb) -You may not free an urb that you've submitted, but which hasn't yet been -returned to you in a completion callback. +You may free an urb that you've submitted, but which hasn't yet been +returned to you in a completion callback. It will automatically be +deallocated when it is no longer in use. 1.4. What has to be filled in? @@ -145,30 +147,36 @@ to get seamless ISO streaming. 1.6. How to cancel an already running URB? -For an URB which you've submitted, but which hasn't been returned to -your driver by the host controller, call +There are two ways to cancel an URB you've submitted but which hasn't +been returned to your driver yet. For an asynchronous cancel, call int usb_unlink_urb(struct urb *urb) It removes the urb from the internal list and frees all allocated -HW descriptors. The status is changed to reflect unlinking. After -usb_unlink_urb() returns with that status code, you can free the URB -with usb_free_urb(). +HW descriptors. The status is changed to reflect unlinking. Note +that the URB will not normally have finished when usb_unlink_urb() +returns; you must still wait for the completion handler to be called. -There is also an asynchronous unlink mode. To use this, set the -the URB_ASYNC_UNLINK flag in urb->transfer flags before calling -usb_unlink_urb(). When using async unlinking, the URB will not -normally be unlinked when usb_unlink_urb() returns. Instead, wait -for the completion handler to be called. +To cancel an URB synchronously, call + + void usb_kill_urb(struct urb *urb) + +It does everything usb_unlink_urb does, and in addition it waits +until after the URB has been returned and the completion handler +has finished. It also marks the URB as temporarily unusable, so +that if the completion handler or anyone else tries to resubmit it +they will get a -EPERM error. Thus you can be sure that when +usb_kill_urb() returns, the URB is totally idle. 1.7. What about the completion handler? The handler is of the following type: - typedef void (*usb_complete_t)(struct urb *); + typedef void (*usb_complete_t)(struct urb *, struct pt_regs *) -i.e. it gets just the URB that caused the completion call. +I.e., it gets the URB that caused the completion call, plus the +register values at the time of the corresponding interrupt (if any). In the completion handler, you should have a look at urb->status to detect any USB errors. Since the context parameter is included in the URB, you can pass information to the completion handler. @@ -176,17 +184,11 @@ you can pass information to the completion handler. Note that even when an error (or unlink) is reported, data may have been transferred. That's because USB transfers are packetized; it might take sixteen packets to transfer your 1KByte buffer, and ten of them might -have transferred succesfully before the completion is called. +have transferred succesfully before the completion was called. NOTE: ***** WARNING ***** -Don't use urb->dev field in your completion handler; it's cleared -as part of giving urbs back to drivers. (Addressing an issue with -ownership of periodic URBs, which was otherwise ambiguous.) Instead, -use urb->context to hold all the data your driver needs. - -NOTE: ***** WARNING ***** -Also, NEVER SLEEP IN A COMPLETION HANDLER. These are normally called +NEVER SLEEP IN A COMPLETION HANDLER. These are normally called during hardware interrupt processing. If you can, defer substantial work to a tasklet (bottom half) to keep system latencies low. You'll probably need to use spinlocks to protect data structures you manipulate @@ -229,24 +231,10 @@ ISO data with some other event stream. Interrupt transfers, like isochronous transfers, are periodic, and happen in intervals that are powers of two (1, 2, 4 etc) units. Units are frames for full and low speed devices, and microframes for high speed ones. - -Currently, after you submit one interrupt URB, that urb is owned by the -host controller driver until you cancel it with usb_unlink_urb(). You -may unlink interrupt urbs in their completion handlers, if you need to. - -After a transfer completion is called, the URB is automagically resubmitted. -THIS BEHAVIOR IS EXPECTED TO BE REMOVED!! - -Interrupt transfers may only send (or receive) the "maxpacket" value for -the given interrupt endpoint; if you need more data, you will need to -copy that data out of (or into) another buffer. Similarly, you can't -queue interrupt transfers. -THESE RESTRICTIONS ARE EXPECTED TO BE REMOVED!! - -Note that this automagic resubmission model does make it awkward to use -interrupt OUT transfers. The portable solution involves unlinking those -OUT urbs after the data is transferred, and perhaps submitting a final -URB for a short packet. - The usb_submit_urb() call modifies urb->interval to the implemented interval value that is less than or equal to the requested interval value. + +In Linux 2.6, unlike earlier versions, interrupt URBs are not automagically +restarted when they complete. They end when the completion handler is +called, just like other URBs. If you want an interrupt URB to be restarted, +your completion handler must resubmit it. diff --git a/MAINTAINERS b/MAINTAINERS index ade7415d2467..767fb610963e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -604,6 +604,15 @@ P: H. Peter Anvin M: hpa@zytor.com S: Maintained +CPUSETS +P: Paul Jackson +P: Simon Derr +M: pj@sgi.com +M: simon.derr@bull.net +L: linux-kernel@vger.kernel.org +W: http://www.bullopensource.org/cpuset/ +S: Supported + CRAMFS FILESYSTEM W: http://sourceforge.net/projects/cramfs/ S: Orphan @@ -1063,8 +1072,6 @@ M: wli@holomorphy.com S: Maintained I2C SUBSYSTEM -P: Greg Kroah-Hartman -M: greg@kroah.com P: Jean Delvare M: khali@linux-fr.org L: lm-sensors@lm-sensors.org @@ -1161,11 +1168,6 @@ L: linux1394-devel@lists.sourceforge.net W: http://www.linux1394.org/ S: Orphan -IEEE 1394 SBP2 -L: linux1394-devel@lists.sourceforge.net -W: http://www.linux1394.org/ -S: Orphan - IEEE 1394 SUBSYSTEM P: Ben Collins M: bcollins@debian.org @@ -1200,6 +1202,15 @@ L: linux1394-devel@lists.sourceforge.net W: http://www.linux1394.org/ S: Maintained +IEEE 1394 SBP2 +P: Ben Collins +M: bcollins@debian.org +P: Stefan Richter +M: stefanr@s5r6.in-berlin.de +L: linux1394-devel@lists.sourceforge.net +W: http://www.linux1394.org/ +S: Maintained + IMS TWINTURBO FRAMEBUFFER DRIVER P: Paul Mundt M: lethal@chaoticdreams.org @@ -1404,6 +1415,18 @@ L: linux-kernel@vger.kernel.org L: fastboot@osdl.org S: Maintained +KPROBES +P: Prasanna S Panchamukhi +M: prasanna@in.ibm.com +P: Ananth N Mavinakayanahalli +M: ananth@in.ibm.com +P: Anil S Keshavamurthy +M: anil.s.keshavamurthy@intel.com +P: David S. Miller +M: davem@davemloft.net +L: linux-kernel@vger.kernel.org +S: Maintained + LANMEDIA WAN CARD DRIVER P: Andrew Stanley-Jones M: asj@lanmedia.com @@ -1595,6 +1618,13 @@ M: vandrove@vc.cvut.cz L: linux-fbdev-devel@lists.sourceforge.net S: Maintained +MEGARAID SCSI DRIVERS +P: Neela Syam Kolli +M: Neela.Kolli@engenio.com +S: linux-scsi@vger.kernel.org +W: http://megaraid.lsilogic.com +S: Maintained + MEMORY TECHNOLOGY DEVICES P: David Woodhouse M: dwmw2@infradead.org @@ -1724,8 +1754,11 @@ S: Maintained IPVS P: Wensong Zhang M: wensong@linux-vs.org +P: Simon Horman +M: horms@verge.net.au P: Julian Anastasov M: ja@ssi.bg +L: netdev@vger.kernel.org S: Maintained NFS CLIENT @@ -1896,6 +1929,13 @@ M: joern@wh.fh-wedel.de L: linux-mtd@lists.infradead.org S: Maintained +PKTCDVD DRIVER +P: Peter Osterlund +M: petero2@telia.com +L: linux-kernel@vger.kernel.org +L: packet-writing@suse.com +S: Maintained + POSIX CLOCKS and TIMERS P: George Anzinger M: george@mvista.com diff --git a/Makefile b/Makefile index 8cf6becf68dc..f1d121f23025 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 14 -EXTRAVERSION =-rc2 +EXTRAVERSION = NAME=Affluent Albatross # *DOCUMENTATION* @@ -334,7 +334,7 @@ KALLSYMS = scripts/kallsyms PERL = perl CHECK = sparse -CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ $(CF) +CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ -Wbitwise $(CF) MODFLAGS = -DMODULE CFLAGS_MODULE = $(MODFLAGS) AFLAGS_MODULE = $(MODFLAGS) @@ -372,7 +372,7 @@ export MODVERDIR := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_ve # Files to ignore in find ... statements RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o -name CVS -o -name .pc -o -name .hg \) -prune -o -RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exclude CVS --exclude .pc --exclude .hg +export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exclude CVS --exclude .pc --exclude .hg # =========================================================================== # Rules shared between *config targets and build targets @@ -660,8 +660,10 @@ quiet_cmd_sysmap = SYSMAP # Link of vmlinux # If CONFIG_KALLSYMS is set .version is already updated # Generate System.map and verify that the content is consistent - +# Use + in front of the vmlinux_version rule to silent warning with make -j2 +# First command is ':' to allow us to use + in front of the rule define rule_vmlinux__ + : $(if $(CONFIG_KALLSYMS),,+$(call cmd,vmlinux_version)) $(call cmd,vmlinux__) diff --git a/README b/README index 2b5844d8cfa0..d1edcc7adabe 100644 --- a/README +++ b/README @@ -151,7 +151,7 @@ CONFIGURING the kernel: your existing ./.config file. "make silentoldconfig" Like above, but avoids cluttering the screen - with question already answered. + with questions already answered. NOTES on "make config": - having unnecessary drivers will make the kernel bigger, and can @@ -199,9 +199,9 @@ COMPILING the kernel: are installing a new kernel with the same version number as your working kernel, make a backup of your modules directory before you do a "make modules_install". - In alternative, before compiling, edit your Makefile and change the - "EXTRAVERSION" line - its content is appended to the regular kernel - version. + Alternatively, before compiling, use the kernel config option + "LOCALVERSION" to append a unique suffix to the regular kernel version. + LOCALVERSION can be set in the "General Setup" menu. - In order to boot your new kernel, you'll need to copy the kernel image (e.g. .../linux/arch/i386/boot/bzImage after compilation) diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index 76cc0cb5fc2e..e38671c922bc 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S @@ -196,6 +196,7 @@ entUna: stq $26, 208($sp) stq $27, 216($sp) stq $28, 224($sp) + mov $sp, $19 stq $gp, 232($sp) lda $8, 0x3fff stq $31, 248($sp) diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c index 582a3519fb28..9903e3a79102 100644 --- a/arch/alpha/kernel/pci-noop.c +++ b/arch/alpha/kernel/pci-noop.c @@ -154,7 +154,7 @@ pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask) void * dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, int gfp) + dma_addr_t *dma_handle, gfp_t gfp) { void *ret; diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 7cb23f12ecbd..c468e312e5f8 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -397,7 +397,7 @@ pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp) { void *cpu_addr; long order = get_order(size); - int gfp = GFP_ATOMIC; + gfp_t gfp = GFP_ATOMIC; try_again: cpu_addr = (void *)__get_free_pages(gfp, order); diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index fa98dae3cd98..eb20c3afff58 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -127,6 +127,10 @@ common_shutdown_1(void *generic_ptr) /* If booted from SRM, reset some of the original environment. */ if (alpha_using_srm) { #ifdef CONFIG_DUMMY_CONSOLE + /* If we've gotten here after SysRq-b, leave interrupt + context before taking over the console. */ + if (in_interrupt()) + irq_exit(); /* This has the effect of resetting the VGA video origin. */ take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1); #endif diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index 6f509a644bdd..f9d12319e0fb 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -446,16 +446,15 @@ struct unaligned_stat { /* Macro for exception fixup code to access integer registers. */ -#define una_reg(r) (regs.regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)]) +#define una_reg(r) (regs->regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)]) asmlinkage void do_entUna(void * va, unsigned long opcode, unsigned long reg, - unsigned long a3, unsigned long a4, unsigned long a5, - struct allregs regs) + struct allregs *regs) { long error, tmp1, tmp2, tmp3, tmp4; - unsigned long pc = regs.pc - 4; + unsigned long pc = regs->pc - 4; const struct exception_table_entry *fixup; unaligned[0].count++; @@ -636,7 +635,7 @@ got_exception: printk("Forwarding unaligned exception at %lx (%lx)\n", pc, newpc); - (®s)->pc = newpc; + regs->pc = newpc; return; } @@ -650,7 +649,7 @@ got_exception: current->comm, current->pid); printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n", - pc, una_reg(26), regs.ps); + pc, una_reg(26), regs->ps); printk("r0 = %016lx r1 = %016lx r2 = %016lx\n", una_reg(0), una_reg(1), una_reg(2)); printk("r3 = %016lx r4 = %016lx r5 = %016lx\n", @@ -670,10 +669,10 @@ got_exception: una_reg(22), una_reg(23), una_reg(24)); printk("r25= %016lx r27= %016lx r28= %016lx\n", una_reg(25), una_reg(27), una_reg(28)); - printk("gp = %016lx sp = %p\n", regs.gp, ®s+1); + printk("gp = %016lx sp = %p\n", regs->gp, regs+1); dik_show_code((unsigned int *)pc); - dik_show_trace((unsigned long *)(®s+1)); + dik_show_trace((unsigned long *)(regs+1)); if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) { printk("die_if_kernel recursion detected.\n"); diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 130e6228b587..299bc0468702 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -53,7 +53,7 @@ tune-$(CONFIG_CPU_ARM926T) :=-mtune=arm9tdmi tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110 tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100 tune-$(CONFIG_CPU_XSCALE) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale -tune-$(CONFIG_CPU_V6) :=-mtune=strongarm +tune-$(CONFIG_CPU_V6) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) # Need -Uarm for gcc < 3.x CFLAGS_ABI :=$(call cc-option,-mapcs-32,-mabi=apcs-gnu) $(call cc-option,-mno-thumb-interwork,) @@ -175,10 +175,10 @@ else endif @touch $@ -archprepare: maketools include/asm-arm/.arch +archprepare: maketools .PHONY: maketools FORCE -maketools: include/linux/version.h FORCE +maketools: include/linux/version.h include/asm-arm/.arch FORCE $(Q)$(MAKE) $(build)=arch/arm/tools include/asm-arm/mach-types.h # Convert bzImage to zImage diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index d74990717559..c02dc8116a18 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c @@ -68,6 +68,7 @@ static void gic_unmask_irq(unsigned int irq) writel(mask, gic_dist_base + GIC_DIST_ENABLE_SET + (irq / 32) * 4); } +#ifdef CONFIG_SMP static void gic_set_cpu(struct irqdesc *desc, unsigned int irq, unsigned int cpu) { void __iomem *reg = gic_dist_base + GIC_DIST_TARGET + (irq & ~3); @@ -78,6 +79,7 @@ static void gic_set_cpu(struct irqdesc *desc, unsigned int irq, unsigned int cpu val |= 1 << (cpu + shift); writel(val, reg); } +#endif static struct irqchip gic_chip = { .ack = gic_ack_irq, diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c index a7bd85700152..e8053d16829b 100644 --- a/arch/arm/common/locomo.c +++ b/arch/arm/common/locomo.c @@ -27,7 +27,6 @@ #include #include -#include #include #include #include diff --git a/arch/arm/common/scoop.c b/arch/arm/common/scoop.c index d3a04c2a2c85..9e5245c702de 100644 --- a/arch/arm/common/scoop.c +++ b/arch/arm/common/scoop.c @@ -26,6 +26,8 @@ struct scoop_pcmcia_dev *scoop_devs; struct scoop_dev { void *base; spinlock_t scoop_lock; + unsigned short suspend_clr; + unsigned short suspend_set; u32 scoop_gpwr; }; @@ -90,14 +92,24 @@ EXPORT_SYMBOL(reset_scoop); EXPORT_SYMBOL(read_scoop_reg); EXPORT_SYMBOL(write_scoop_reg); +static void check_scoop_reg(struct scoop_dev *sdev) +{ + unsigned short mcr; + + mcr = SCOOP_REG(sdev->base, SCOOP_MCR); + if ((mcr & 0x100) == 0) + SCOOP_REG(sdev->base, SCOOP_MCR) = 0x0101; +} + #ifdef CONFIG_PM static int scoop_suspend(struct device *dev, pm_message_t state, uint32_t level) { if (level == SUSPEND_POWER_DOWN) { struct scoop_dev *sdev = dev_get_drvdata(dev); - sdev->scoop_gpwr = SCOOP_REG(sdev->base,SCOOP_GPWR); - SCOOP_REG(sdev->base,SCOOP_GPWR) = 0; + check_scoop_reg(sdev); + sdev->scoop_gpwr = SCOOP_REG(sdev->base, SCOOP_GPWR); + SCOOP_REG(sdev->base, SCOOP_GPWR) = (sdev->scoop_gpwr & ~sdev->suspend_clr) | sdev->suspend_set; } return 0; } @@ -107,6 +119,7 @@ static int scoop_resume(struct device *dev, uint32_t level) if (level == RESUME_POWER_ON) { struct scoop_dev *sdev = dev_get_drvdata(dev); + check_scoop_reg(sdev); SCOOP_REG(sdev->base,SCOOP_GPWR) = sdev->scoop_gpwr; } return 0; @@ -151,6 +164,9 @@ int __init scoop_probe(struct device *dev) SCOOP_REG(devptr->base, SCOOP_GPCR) = inf->io_dir & 0xffff; SCOOP_REG(devptr->base, SCOOP_GPWR) = inf->io_out & 0xffff; + devptr->suspend_clr = inf->suspend_clr; + devptr->suspend_set = inf->suspend_set; + return 0; } diff --git a/arch/arm/configs/collie_defconfig b/arch/arm/configs/collie_defconfig new file mode 100644 index 000000000000..40dfe07a8bce --- /dev/null +++ b/arch/arm/configs/collie_defconfig @@ -0,0 +1,888 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.14-rc3 +# Sun Oct 9 16:55:14 2005 +# +CONFIG_ARM=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_CALIBRATE_DELAY=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +# CONFIG_CLEAN_COMPILE is not set +CONFIG_BROKEN=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_LOCK_KERNEL=y +CONFIG_INIT_ENV_ARG_LIMIT=32 + +# +# General setup +# +CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +CONFIG_BSD_PROCESS_ACCT=y +# CONFIG_BSD_PROCESS_ACCT_V3 is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_HOTPLUG=y +CONFIG_KOBJECT_UEVENT=y +# CONFIG_IKCONFIG is not set +CONFIG_INITRAMFS_SOURCE="" +CONFIG_EMBEDDED=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SHMEM=y +CONFIG_CC_ALIGN_FUNCTIONS=0 +CONFIG_CC_ALIGN_LABELS=0 +CONFIG_CC_ALIGN_LOOPS=0 +CONFIG_CC_ALIGN_JUMPS=0 +# CONFIG_TINY_SHMEM is not set +CONFIG_BASE_SMALL=0 + +# +# Loadable module support +# +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_OBSOLETE_MODPARM=y +CONFIG_MODVERSIONS=y +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_KMOD=y + +# +# System Type +# +# CONFIG_ARCH_CLPS7500 is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CO285 is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_CAMELOT is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_IOP3XX is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_L7200 is not set +# CONFIG_ARCH_PXA is not set +# CONFIG_ARCH_RPC is not set +CONFIG_ARCH_SA1100=y +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_IMX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_AAEC2000 is not set + +# +# SA11x0 Implementations +# +# CONFIG_SA1100_ASSABET is not set +# CONFIG_SA1100_CERF is not set +CONFIG_SA1100_COLLIE=y +# CONFIG_SA1100_H3100 is not set +# CONFIG_SA1100_H3600 is not set +# CONFIG_SA1100_H3800 is not set +# CONFIG_SA1100_BADGE4 is not set +# CONFIG_SA1100_JORNADA720 is not set +# CONFIG_SA1100_HACKKIT is not set +# CONFIG_SA1100_LART is not set +# CONFIG_SA1100_PLEB is not set +# CONFIG_SA1100_SHANNON is not set +# CONFIG_SA1100_SIMPAD is not set +# CONFIG_SA1100_SSP is not set + +# +# Processor Type +# +CONFIG_CPU_32=y +CONFIG_CPU_SA1100=y +CONFIG_CPU_32v4=y +CONFIG_CPU_ABRT_EV4=y +CONFIG_CPU_CACHE_V4WB=y +CONFIG_CPU_CACHE_VIVT=y +CONFIG_CPU_TLB_V4WB=y + +# +# Processor Features +# +CONFIG_SHARP_LOCOMO=y +CONFIG_SHARP_PARAM=y +CONFIG_SHARP_SCOOP=y + +# +# Bus support +# +CONFIG_ISA=y +CONFIG_ISA_DMA_API=y + +# +# PCCARD (PCMCIA/CardBus) support +# +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +# CONFIG_SMP is not set +CONFIG_PREEMPT=y +# CONFIG_NO_IDLE_HZ is not set +CONFIG_ARCH_DISCONTIGMEM_ENABLE=y +CONFIG_SELECT_MEMORY_MODEL=y +# CONFIG_FLATMEM_MANUAL is not set +CONFIG_DISCONTIGMEM_MANUAL=y +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_DISCONTIGMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_NEED_MULTIPLE_NODES=y +# CONFIG_SPARSEMEM_STATIC is not set +# CONFIG_LEDS is not set +CONFIG_ALIGNMENT_TRAP=y + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="console=ttyS0,115200n8 console=tty1 noinitrd root=/dev/mtdblock2 rootfstype=jffs2 debug" +# CONFIG_XIP_KERNEL is not set + +# +# CPU Frequency scaling +# +# CONFIG_CPU_FREQ is not set + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_FPE_NWFPE=y +# CONFIG_FPE_NWFPE_XP is not set +# CONFIG_FPE_FASTFPE is not set + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_AOUT=m +CONFIG_BINFMT_MISC=m +# CONFIG_ARTHUR is not set + +# +# Power management options +# +CONFIG_PM=y +CONFIG_APM=y + +# +# Networking +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_MMAP=y +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +CONFIG_SYN_COOKIES=y +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_TUNNEL is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_BIC=y +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set + +# +# DCCP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_DCCP is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_SCHED is not set +# CONFIG_NET_CLS_ROUTE is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_IEEE80211 is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=m +# CONFIG_DEBUG_DRIVER is not set + +# +# Memory Technology Devices (MTD) +# +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AFS_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +CONFIG_MTD_OBSOLETE_CHIPS=y +# CONFIG_MTD_AMDSTD is not set +CONFIG_MTD_SHARP=y +# CONFIG_MTD_JEDEC is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLKMTD is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set + +# +# NAND Flash Device Drivers +# +# CONFIG_MTD_NAND is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# +# CONFIG_PNP is not set + +# +# Block devices +# +# CONFIG_BLK_DEV_XD is not set +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=1024 +CONFIG_BLK_DEV_INITRD=y +# CONFIG_CDROM_PKTCDVD is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_ATA_OVER_ETH=m + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# + +# +# Network device support +# +# CONFIG_NETDEVICES is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_TSDEV=y +CONFIG_INPUT_TSDEV_SCREEN_X=240 +CONFIG_INPUT_TSDEV_SCREEN_Y=320 +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_EVBUG=y + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_LKKBD is not set +CONFIG_KEYBOARD_LOCOMO=y +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_SERIO_LIBPS2 is not set +# CONFIG_SERIO_RAW is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_SA1100=y +CONFIG_SERIAL_SA1100_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_NVRAM is not set +# CONFIG_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_RAW_DRIVER is not set + +# +# TPM devices +# + +# +# I2C support +# +CONFIG_I2C=m +# CONFIG_I2C_CHARDEV is not set + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=m +# CONFIG_I2C_ALGOPCF is not set +# CONFIG_I2C_ALGOPCA is not set + +# +# I2C Hardware Bus support +# +# CONFIG_I2C_ELEKTOR is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_PCA_ISA is not set + +# +# Miscellaneous I2C Chip support +# +# CONFIG_SENSORS_DS1337 is not set +# CONFIG_SENSORS_DS1374 is not set +# CONFIG_SENSORS_EEPROM is not set +# CONFIG_SENSORS_PCF8574 is not set +# CONFIG_SENSORS_PCA9539 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_SENSORS_RTC8564 is not set +# CONFIG_SENSORS_MAX6875 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set + +# +# Hardware Monitoring support +# +CONFIG_HWMON=y +# CONFIG_HWMON_VID is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ASB100 is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_FSCHER is not set +# CONFIG_SENSORS_FSCPOS is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Misc devices +# + +# +# Multimedia Capabilities Port drivers +# +# CONFIG_MCP_SA11X0 is not set + +# +# Multimedia devices +# +CONFIG_VIDEO_DEV=m + +# +# Video For Linux +# + +# +# Video Adapters +# +# CONFIG_VIDEO_PMS is not set +# CONFIG_VIDEO_CPIA is not set +# CONFIG_VIDEO_SAA5246A is not set +# CONFIG_VIDEO_SAA5249 is not set +# CONFIG_TUNER_3036 is not set +# CONFIG_VIDEO_OVCAMCHIP is not set + +# +# Radio Adapters +# +# CONFIG_RADIO_CADET is not set +# CONFIG_RADIO_RTRACK is not set +# CONFIG_RADIO_RTRACK2 is not set +# CONFIG_RADIO_AZTECH is not set +# CONFIG_RADIO_GEMTEK is not set +# CONFIG_RADIO_MAESTRO is not set +# CONFIG_RADIO_SF16FMI is not set +# CONFIG_RADIO_SF16FMR2 is not set +# CONFIG_RADIO_TERRATEC is not set +# CONFIG_RADIO_TRUST is not set +# CONFIG_RADIO_TYPHOON is not set +# CONFIG_RADIO_ZOLTRIX is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +CONFIG_FB=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SOFT_CURSOR=y +# CONFIG_FB_MACMODES is not set +CONFIG_FB_MODE_HELPERS=y +# CONFIG_FB_TILEBLITTING is not set +CONFIG_FB_SA1100=y +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FONTS=y +CONFIG_FONT_8x8=y +# CONFIG_FONT_8x16 is not set +# CONFIG_FONT_6x11 is not set +# CONFIG_FONT_7x14 is not set +# CONFIG_FONT_PEARL_8x8 is not set +# CONFIG_FONT_ACORN_8x8 is not set +# CONFIG_FONT_MINI_4x6 is not set +# CONFIG_FONT_SUN8x16 is not set +# CONFIG_FONT_SUN12x22 is not set +# CONFIG_FONT_10x18 is not set + +# +# Logo configuration +# +# CONFIG_LOGO is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +# CONFIG_USB is not set + +# +# USB Gadget Support +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_NET2280 is not set +# CONFIG_USB_GADGET_PXA2XX is not set +# CONFIG_USB_GADGET_GOKU is not set +# CONFIG_USB_GADGET_LH7A40X is not set +# CONFIG_USB_GADGET_OMAP is not set +# CONFIG_USB_GADGET_DUMMY_HCD is not set +# CONFIG_USB_GADGET_DUALSPEED is not set + +# +# MMC/SD Card support +# +# CONFIG_MMC is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +# CONFIG_EXT2_FS_XIP is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_FS_POSIX_ACL=y +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +CONFIG_ROMFS_FS=y +CONFIG_INOTIFY=y +# CONFIG_QUOTA is not set +# CONFIG_DNOTIFY is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_HUGETLBFS is not set +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y +# CONFIG_RELAYFS_FS is not set + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS_FS is not set +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set +CONFIG_JFFS2_ZLIB=y +CONFIG_JFFS2_RTIME=y +# CONFIG_JFFS2_RUBIN is not set +CONFIG_CRAMFS=y +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +# CONFIG_9P_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y + +# +# Native Language Support +# +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="cp437" +CONFIG_NLS_CODEPAGE_437=m +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=m +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=m + +# +# Profiling support +# +# CONFIG_PROFILING is not set + +# +# Kernel hacking +# +# CONFIG_PRINTK_TIME is not set +CONFIG_DEBUG_KERNEL=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_SCHEDSTATS is not set +# CONFIG_DEBUG_SLAB is not set +CONFIG_DEBUG_PREEMPT=y +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_KOBJECT is not set +# CONFIG_DEBUG_BUGVERBOSE is not set +# CONFIG_DEBUG_INFO is not set +# CONFIG_DEBUG_FS is not set +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_USER is not set +# CONFIG_DEBUG_WAITQ is not set +CONFIG_DEBUG_ERRORS=y +# CONFIG_DEBUG_LL is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Hardware crypto devices +# + +# +# Library routines +# +# CONFIG_CRC_CCITT is not set +# CONFIG_CRC16 is not set +CONFIG_CRC32=y +# CONFIG_LIBCRC32C is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig new file mode 100644 index 000000000000..24987c89609a --- /dev/null +++ b/arch/arm/configs/corgi_defconfig @@ -0,0 +1,1523 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.14-rc3 +# Sun Oct 9 15:46:42 2005 +# +CONFIG_ARM=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_CALIBRATE_DELAY=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_LOCK_KERNEL=y +CONFIG_INIT_ENV_ARG_LIMIT=32 + +# +# General setup +# +CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +CONFIG_BSD_PROCESS_ACCT=y +# CONFIG_BSD_PROCESS_ACCT_V3 is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_HOTPLUG=y +CONFIG_KOBJECT_UEVENT=y +# CONFIG_IKCONFIG is not set +CONFIG_INITRAMFS_SOURCE="" +CONFIG_EMBEDDED=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SHMEM=y +CONFIG_CC_ALIGN_FUNCTIONS=0 +CONFIG_CC_ALIGN_LABELS=0 +CONFIG_CC_ALIGN_LOOPS=0 +CONFIG_CC_ALIGN_JUMPS=0 +# CONFIG_TINY_SHMEM is not set +CONFIG_BASE_SMALL=0 + +# +# Loadable module support +# +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_OBSOLETE_MODPARM=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_KMOD=y + +# +# System Type +# +# CONFIG_ARCH_CLPS7500 is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CO285 is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_CAMELOT is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_IOP3XX is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_L7200 is not set +CONFIG_ARCH_PXA=y +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_IMX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_AAEC2000 is not set + +# +# Intel PXA2xx Implementations +# +# CONFIG_ARCH_LUBBOCK is not set +# CONFIG_MACH_MAINSTONE is not set +# CONFIG_ARCH_PXA_IDP is not set +CONFIG_PXA_SHARPSL=y +CONFIG_PXA_SHARPSL_25x=y +# CONFIG_PXA_SHARPSL_27x is not set +# CONFIG_MACH_POODLE is not set +CONFIG_MACH_CORGI=y +CONFIG_MACH_SHEPHERD=y +CONFIG_MACH_HUSKY=y +CONFIG_PXA25x=y +CONFIG_PXA_SHARP_C7xx=y + +# +# Processor Type +# +CONFIG_CPU_32=y +CONFIG_CPU_XSCALE=y +CONFIG_CPU_32v5=y +CONFIG_CPU_ABRT_EV5T=y +CONFIG_CPU_CACHE_VIVT=y +CONFIG_CPU_TLB_V4WBI=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +CONFIG_XSCALE_PMU=y +CONFIG_SHARP_PARAM=y +CONFIG_SHARP_SCOOP=y + +# +# Bus support +# +CONFIG_ISA_DMA_API=y + +# +# PCCARD (PCMCIA/CardBus) support +# +CONFIG_PCCARD=y +# CONFIG_PCMCIA_DEBUG is not set +CONFIG_PCMCIA=y +CONFIG_PCMCIA_LOAD_CIS=y +CONFIG_PCMCIA_IOCTL=y + +# +# PC-card bridges +# +CONFIG_PCMCIA_PXA2XX=y + +# +# Kernel Features +# +CONFIG_PREEMPT=y +# CONFIG_NO_IDLE_HZ is not set +# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +# CONFIG_SPARSEMEM_STATIC is not set +CONFIG_ALIGNMENT_TRAP=y + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="console=ttyS0,115200n8 console=tty1 noinitrd root=/dev/mtdblock2 rootfstype=jffs2 debug" +# CONFIG_XIP_KERNEL is not set + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_FPE_NWFPE=y +# CONFIG_FPE_NWFPE_XP is not set +# CONFIG_FPE_FASTFPE is not set + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_AOUT=m +CONFIG_BINFMT_MISC=m +# CONFIG_ARTHUR is not set + +# +# Power management options +# +CONFIG_PM=y +CONFIG_APM=y + +# +# Networking +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_MMAP=y +CONFIG_UNIX=y +CONFIG_XFRM=y +CONFIG_XFRM_USER=m +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +CONFIG_SYN_COOKIES=y +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_TUNNEL is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_BIC=y + +# +# IP: Virtual Server Configuration +# +# CONFIG_IP_VS is not set +CONFIG_IPV6=m +# CONFIG_IPV6_PRIVACY is not set +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_TUNNEL=m +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +# CONFIG_NETFILTER_NETLINK is not set + +# +# IP: Netfilter Configuration +# +CONFIG_IP_NF_CONNTRACK=m +# CONFIG_IP_NF_CT_ACCT is not set +# CONFIG_IP_NF_CONNTRACK_MARK is not set +# CONFIG_IP_NF_CONNTRACK_EVENTS is not set +CONFIG_IP_NF_CT_PROTO_SCTP=m +CONFIG_IP_NF_FTP=m +CONFIG_IP_NF_IRC=m +# CONFIG_IP_NF_NETBIOS_NS is not set +CONFIG_IP_NF_TFTP=m +CONFIG_IP_NF_AMANDA=m +# CONFIG_IP_NF_PPTP is not set +CONFIG_IP_NF_QUEUE=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_LIMIT=m +CONFIG_IP_NF_MATCH_IPRANGE=m +CONFIG_IP_NF_MATCH_MAC=m +CONFIG_IP_NF_MATCH_PKTTYPE=m +CONFIG_IP_NF_MATCH_MARK=m +CONFIG_IP_NF_MATCH_MULTIPORT=m +CONFIG_IP_NF_MATCH_TOS=m +CONFIG_IP_NF_MATCH_RECENT=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_DSCP=m +CONFIG_IP_NF_MATCH_AH_ESP=m +CONFIG_IP_NF_MATCH_LENGTH=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_MATCH_TCPMSS=m +CONFIG_IP_NF_MATCH_HELPER=m +CONFIG_IP_NF_MATCH_STATE=m +CONFIG_IP_NF_MATCH_CONNTRACK=m +CONFIG_IP_NF_MATCH_OWNER=m +CONFIG_IP_NF_MATCH_ADDRTYPE=m +CONFIG_IP_NF_MATCH_REALM=m +CONFIG_IP_NF_MATCH_SCTP=m +# CONFIG_IP_NF_MATCH_DCCP is not set +CONFIG_IP_NF_MATCH_COMMENT=m +CONFIG_IP_NF_MATCH_HASHLIMIT=m +# CONFIG_IP_NF_MATCH_STRING is not set +CONFIG_IP_NF_FILTER=m +# CONFIG_IP_NF_TARGET_REJECT is not set +CONFIG_IP_NF_TARGET_LOG=m +CONFIG_IP_NF_TARGET_ULOG=m +CONFIG_IP_NF_TARGET_TCPMSS=m +# CONFIG_IP_NF_TARGET_NFQUEUE is not set +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_NAT_NEEDED=y +# CONFIG_IP_NF_TARGET_MASQUERADE is not set +# CONFIG_IP_NF_TARGET_REDIRECT is not set +# CONFIG_IP_NF_TARGET_NETMAP is not set +# CONFIG_IP_NF_TARGET_SAME is not set +# CONFIG_IP_NF_NAT_SNMP_BASIC is not set +CONFIG_IP_NF_NAT_IRC=m +CONFIG_IP_NF_NAT_FTP=m +CONFIG_IP_NF_NAT_TFTP=m +CONFIG_IP_NF_NAT_AMANDA=m +CONFIG_IP_NF_MANGLE=m +# CONFIG_IP_NF_TARGET_TOS is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_DSCP is not set +# CONFIG_IP_NF_TARGET_MARK is not set +# CONFIG_IP_NF_TARGET_CLASSIFY is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=m +# CONFIG_IP_NF_TARGET_NOTRACK is not set +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m + +# +# IPv6: Netfilter Configuration (EXPERIMENTAL) +# +CONFIG_IP6_NF_QUEUE=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_LIMIT=m +CONFIG_IP6_NF_MATCH_MAC=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_MULTIPORT=m +CONFIG_IP6_NF_MATCH_OWNER=m +CONFIG_IP6_NF_MATCH_MARK=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_AHESP=m +CONFIG_IP6_NF_MATCH_LENGTH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_FILTER=m +# CONFIG_IP6_NF_TARGET_LOG is not set +# CONFIG_IP6_NF_TARGET_REJECT is not set +# CONFIG_IP6_NF_TARGET_NFQUEUE is not set +CONFIG_IP6_NF_MANGLE=m +# CONFIG_IP6_NF_TARGET_MARK is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_RAW=m + +# +# DCCP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_DCCP is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_SCHED is not set +CONFIG_NET_CLS_ROUTE=y + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +CONFIG_IRDA=m + +# +# IrDA protocols +# +CONFIG_IRLAN=m +CONFIG_IRNET=m +CONFIG_IRCOMM=m +# CONFIG_IRDA_ULTRA is not set + +# +# IrDA options +# +# CONFIG_IRDA_CACHE_LAST_LSAP is not set +# CONFIG_IRDA_FAST_RR is not set +# CONFIG_IRDA_DEBUG is not set + +# +# Infrared-port device drivers +# + +# +# SIR device drivers +# +# CONFIG_IRTTY_SIR is not set + +# +# Dongle support +# + +# +# Old SIR device drivers +# +# CONFIG_IRPORT_SIR is not set + +# +# Old Serial dongle support +# + +# +# FIR device drivers +# +# CONFIG_USB_IRDA is not set +# CONFIG_SIGMATEL_FIR is not set +# CONFIG_NSC_FIR is not set +# CONFIG_WINBOND_FIR is not set +# CONFIG_SMC_IRCC_FIR is not set +# CONFIG_ALI_FIR is not set +# CONFIG_VIA_FIR is not set +CONFIG_BT=m +CONFIG_BT_L2CAP=m +CONFIG_BT_SCO=m +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=m + +# +# Bluetooth device drivers +# +CONFIG_BT_HCIUSB=m +# CONFIG_BT_HCIUSB_SCO is not set +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_BCSP_TXCRC=y +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIDTL1=m +CONFIG_BT_HCIBT3C=m +CONFIG_BT_HCIBLUECARD=m +CONFIG_BT_HCIBTUART=m +CONFIG_BT_HCIVHCI=m +CONFIG_IEEE80211=m +# CONFIG_IEEE80211_DEBUG is not set +CONFIG_IEEE80211_CRYPT_WEP=m +# CONFIG_IEEE80211_CRYPT_CCMP is not set +# CONFIG_IEEE80211_CRYPT_TKIP is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_DEBUG_DRIVER is not set + +# +# Memory Technology Devices (MTD) +# +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AFS_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +CONFIG_MTD_ROM=y +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_SHARP_SL=y +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLKMTD is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set + +# +# NAND Flash Device Drivers +# +CONFIG_MTD_NAND=y +CONFIG_MTD_NAND_VERIFY_WRITE=y +# CONFIG_MTD_NAND_H1900 is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND_DISKONCHIP is not set +CONFIG_MTD_NAND_SHARPSL=y +# CONFIG_MTD_NAND_NANDSIM is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_UB is not set +# CONFIG_BLK_DEV_RAM is not set +CONFIG_BLK_DEV_RAM_COUNT=16 +# CONFIG_CDROM_PKTCDVD is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_ATA_OVER_ETH is not set + +# +# ATA/ATAPI/MFM/RLL support +# +CONFIG_IDE=y +CONFIG_BLK_DEV_IDE=y + +# +# Please see Documentation/ide.txt for help/info on IDE drives +# +# CONFIG_BLK_DEV_IDE_SATA is not set +CONFIG_BLK_DEV_IDEDISK=y +# CONFIG_IDEDISK_MULTI_MODE is not set +CONFIG_BLK_DEV_IDECS=y +# CONFIG_BLK_DEV_IDECD is not set +# CONFIG_BLK_DEV_IDETAPE is not set +# CONFIG_BLK_DEV_IDEFLOPPY is not set +# CONFIG_BLK_DEV_IDESCSI is not set +# CONFIG_IDE_TASK_IOCTL is not set + +# +# IDE chipset support/bugfixes +# +CONFIG_IDE_GENERIC=y +# CONFIG_IDE_ARM is not set +# CONFIG_BLK_DEV_IDEDMA is not set +# CONFIG_IDEDMA_AUTO is not set +# CONFIG_BLK_DEV_HD is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +CONFIG_SCSI=m +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_CHR_DEV_OSST=m +CONFIG_BLK_DEV_SR=m +# CONFIG_BLK_DEV_SR_VENDOR is not set +CONFIG_CHR_DEV_SG=m +# CONFIG_CHR_DEV_SCH is not set + +# +# Some SCSI devices (e.g. CD jukebox) support multiple LUNs +# +CONFIG_SCSI_MULTI_LUN=y +# CONFIG_SCSI_CONSTANTS is not set +# CONFIG_SCSI_LOGGING is not set + +# +# SCSI Transport Attributes +# +# CONFIG_SCSI_SPI_ATTRS is not set +# CONFIG_SCSI_FC_ATTRS is not set +# CONFIG_SCSI_ISCSI_ATTRS is not set +# CONFIG_SCSI_SAS_ATTRS is not set + +# +# SCSI low-level drivers +# +# CONFIG_SCSI_SATA is not set +# CONFIG_SCSI_DEBUG is not set + +# +# PCMCIA SCSI adapter support +# +# CONFIG_PCMCIA_AHA152X is not set +# CONFIG_PCMCIA_FDOMAIN is not set +# CONFIG_PCMCIA_NINJA_SCSI is not set +# CONFIG_PCMCIA_QLOGIC is not set +# CONFIG_PCMCIA_SYM53C500 is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# + +# +# I2O device support +# + +# +# Network device support +# +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# PHY device support +# +# CONFIG_PHYLIB is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +CONFIG_MII=m +# CONFIG_SMC91X is not set +# CONFIG_DM9000 is not set + +# +# Ethernet (1000 Mbit) +# + +# +# Ethernet (10000 Mbit) +# + +# +# Token Ring devices +# + +# +# Wireless LAN (non-hamradio) +# +CONFIG_NET_RADIO=y + +# +# Obsolete Wireless cards support (pre-802.11) +# +# CONFIG_STRIP is not set +# CONFIG_PCMCIA_WAVELAN is not set +# CONFIG_PCMCIA_NETWAVE is not set + +# +# Wireless 802.11 Frequency Hopping cards support +# +# CONFIG_PCMCIA_RAYCS is not set + +# +# Wireless 802.11b ISA/PCI cards support +# +CONFIG_HERMES=m +# CONFIG_ATMEL is not set + +# +# Wireless 802.11b Pcmcia/Cardbus cards support +# +CONFIG_PCMCIA_HERMES=m +CONFIG_PCMCIA_SPECTRUM=m +# CONFIG_AIRO_CS is not set +# CONFIG_PCMCIA_WL3501 is not set +CONFIG_HOSTAP=m +CONFIG_HOSTAP_FIRMWARE=y +CONFIG_HOSTAP_CS=m +CONFIG_NET_WIRELESS=y + +# +# PCMCIA network device support +# +CONFIG_NET_PCMCIA=y +# CONFIG_PCMCIA_3C589 is not set +# CONFIG_PCMCIA_3C574 is not set +# CONFIG_PCMCIA_FMVJ18X is not set +CONFIG_PCMCIA_PCNET=m +# CONFIG_PCMCIA_NMCLAN is not set +# CONFIG_PCMCIA_SMC91C92 is not set +# CONFIG_PCMCIA_XIRC2PS is not set +# CONFIG_PCMCIA_AXNET is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +CONFIG_PPP=m +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPP_FILTER is not set +CONFIG_PPP_ASYNC=m +# CONFIG_PPP_SYNC_TTY is not set +# CONFIG_PPP_DEFLATE is not set +CONFIG_PPP_BSDCOMP=m +# CONFIG_PPPOE is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +CONFIG_KEYBOARD_CORGI=y +CONFIG_KEYBOARD_SPITZ=y +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_CORGI=y +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=m + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +CONFIG_SERIAL_8250=m +CONFIG_SERIAL_8250_CS=m +CONFIG_SERIAL_8250_NR_UARTS=4 +# CONFIG_SERIAL_8250_EXTENDED is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_PXA=y +CONFIG_SERIAL_PXA_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_NVRAM is not set +# CONFIG_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set + +# +# Ftape, the floppy tape device driver +# + +# +# PCMCIA character devices +# +# CONFIG_SYNCLINK_CS is not set +# CONFIG_RAW_DRIVER is not set + +# +# TPM devices +# + +# +# I2C support +# +CONFIG_I2C=y +# CONFIG_I2C_CHARDEV is not set + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=y +# CONFIG_I2C_ALGOPCF is not set +# CONFIG_I2C_ALGOPCA is not set + +# +# I2C Hardware Bus support +# +CONFIG_I2C_PXA=y +# CONFIG_I2C_PXA_SLAVE is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_PCA_ISA is not set + +# +# Miscellaneous I2C Chip support +# +# CONFIG_SENSORS_DS1337 is not set +# CONFIG_SENSORS_DS1374 is not set +# CONFIG_SENSORS_EEPROM is not set +# CONFIG_SENSORS_PCF8574 is not set +# CONFIG_SENSORS_PCA9539 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_SENSORS_RTC8564 is not set +# CONFIG_SENSORS_MAX6875 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set + +# +# Hardware Monitoring support +# +CONFIG_HWMON=y +# CONFIG_HWMON_VID is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ASB100 is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_FSCHER is not set +# CONFIG_SENSORS_FSCPOS is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Misc devices +# + +# +# Multimedia Capabilities Port drivers +# + +# +# Multimedia devices +# +CONFIG_VIDEO_DEV=m + +# +# Video For Linux +# + +# +# Video Adapters +# +# CONFIG_VIDEO_CPIA is not set +# CONFIG_VIDEO_SAA5246A is not set +# CONFIG_VIDEO_SAA5249 is not set +# CONFIG_TUNER_3036 is not set +# CONFIG_VIDEO_OVCAMCHIP is not set + +# +# Radio Adapters +# +# CONFIG_RADIO_MAESTRO is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +CONFIG_FB=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SOFT_CURSOR=y +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set +# CONFIG_FB_PXA is not set +CONFIG_FB_W100=y +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FONTS=y +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +# CONFIG_FONT_6x11 is not set +# CONFIG_FONT_7x14 is not set +# CONFIG_FONT_PEARL_8x8 is not set +# CONFIG_FONT_ACORN_8x8 is not set +# CONFIG_FONT_MINI_4x6 is not set +# CONFIG_FONT_SUN8x16 is not set +# CONFIG_FONT_SUN12x22 is not set +# CONFIG_FONT_10x18 is not set + +# +# Logo configuration +# +# CONFIG_LOGO is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_DEVICE=y +# CONFIG_LCD_CLASS_DEVICE is not set +CONFIG_BACKLIGHT_CORGI=y + +# +# Sound +# +CONFIG_SOUND=y + +# +# Advanced Linux Sound Architecture +# +# CONFIG_SND is not set + +# +# Open Sound System +# +CONFIG_SOUND_PRIME=y +# CONFIG_SOUND_MSNDCLAS is not set +# CONFIG_SOUND_MSNDPIN is not set +CONFIG_SOUND_OSS=y +# CONFIG_SOUND_TRACEINIT is not set +# CONFIG_SOUND_DMAP is not set +# CONFIG_SOUND_AD1816 is not set +# CONFIG_SOUND_SGALAXY is not set +# CONFIG_SOUND_ADLIB is not set +# CONFIG_SOUND_ACI_MIXER is not set +# CONFIG_SOUND_CS4232 is not set +# CONFIG_SOUND_SSCAPE is not set +# CONFIG_SOUND_GUS is not set +# CONFIG_SOUND_VMIDI is not set +# CONFIG_SOUND_TRIX is not set +# CONFIG_SOUND_MSS is not set +# CONFIG_SOUND_MPU401 is not set +# CONFIG_SOUND_NM256 is not set +# CONFIG_SOUND_MAD16 is not set +# CONFIG_SOUND_PAS is not set +# CONFIG_SOUND_PSS is not set +# CONFIG_SOUND_SB is not set +# CONFIG_SOUND_AWE32_SYNTH is not set +# CONFIG_SOUND_WAVEFRONT is not set +# CONFIG_SOUND_MAUI is not set +# CONFIG_SOUND_YM3812 is not set +# CONFIG_SOUND_OPL3SA1 is not set +# CONFIG_SOUND_OPL3SA2 is not set +# CONFIG_SOUND_UART6850 is not set +# CONFIG_SOUND_AEDSP16 is not set +# CONFIG_SOUND_TVMIXER is not set +# CONFIG_SOUND_AD1980 is not set + +# +# USB support +# +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +CONFIG_USB=m +# CONFIG_USB_DEBUG is not set + +# +# Miscellaneous USB options +# +CONFIG_USB_DEVICEFS=y +# CONFIG_USB_BANDWIDTH is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_SUSPEND is not set +# CONFIG_USB_OTG is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_ISP116X_HCD is not set +CONFIG_USB_SL811_HCD=m +CONFIG_USB_SL811_CS=m + +# +# USB Device Class drivers +# +# CONFIG_OBSOLETE_OSS_USB_DRIVER is not set + +# +# USB Bluetooth TTY can only be used with disabled Bluetooth subsystem +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m + +# +# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_DPCM is not set +# CONFIG_USB_STORAGE_USBAT is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set + +# +# USB Input Devices +# +CONFIG_USB_HID=m +CONFIG_USB_HIDINPUT=y +# CONFIG_HID_FF is not set +# CONFIG_USB_HIDDEV is not set + +# +# USB HID Boot Protocol drivers +# +CONFIG_USB_KBD=m +CONFIG_USB_MOUSE=m +CONFIG_USB_AIPTEK=m +CONFIG_USB_WACOM=m +# CONFIG_USB_ACECAD is not set +CONFIG_USB_KBTAB=m +CONFIG_USB_POWERMATE=m +CONFIG_USB_MTOUCH=m +# CONFIG_USB_ITMTOUCH is not set +CONFIG_USB_EGALAX=m +# CONFIG_USB_YEALINK is not set +CONFIG_USB_XPAD=m +CONFIG_USB_ATI_REMOTE=m +# CONFIG_USB_KEYSPAN_REMOTE is not set +# CONFIG_USB_APPLETOUCH is not set + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m + +# +# USB Multimedia devices +# +CONFIG_USB_DABUSB=m +CONFIG_USB_VICAM=m +CONFIG_USB_DSBR=m +CONFIG_USB_IBMCAM=m +CONFIG_USB_KONICAWC=m +CONFIG_USB_OV511=m +CONFIG_USB_SE401=m +CONFIG_USB_SN9C102=m +CONFIG_USB_STV680=m +# CONFIG_USB_PWC is not set + +# +# USB Network Adapters +# +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_CDCETHER=m +# CONFIG_USB_NET_GL620A is not set +CONFIG_USB_NET_NET1080=m +# CONFIG_USB_NET_PLUSB is not set +# CONFIG_USB_NET_RNDIS_HOST is not set +# CONFIG_USB_NET_CDC_SUBSET is not set +CONFIG_USB_NET_ZAURUS=m +# CONFIG_USB_ZD1201 is not set +CONFIG_USB_MON=y + +# +# USB port drivers +# + +# +# USB Serial Converter support +# +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_AIRPRIME is not set +CONFIG_USB_SERIAL_BELKIN=m +# CONFIG_USB_SERIAL_WHITEHEAT is not set +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +# CONFIG_USB_SERIAL_CP2101 is not set +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +# CONFIG_USB_SERIAL_KEYSPAN_MPR is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA28 is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA28X is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA28XA is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA28XB is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA19 is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA18X is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA19W is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA19QW is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA19QI is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA49W is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA49WLC is not set +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_PL2303=m +# CONFIG_USB_SERIAL_HP4X is not set +CONFIG_USB_SERIAL_SAFE=m +# CONFIG_USB_SERIAL_SAFE_PADDED is not set +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_XIRCOM=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_EZUSB=y + +# +# USB Miscellaneous drivers +# +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_AUERSWALD=m +CONFIG_USB_RIO500=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +CONFIG_USB_LED=m +CONFIG_USB_CYTHERM=m +CONFIG_USB_PHIDGETKIT=m +CONFIG_USB_PHIDGETSERVO=m +CONFIG_USB_IDMOUSE=m +# CONFIG_USB_LD is not set +# CONFIG_USB_TEST is not set + +# +# USB DSL modem support +# + +# +# USB Gadget Support +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG_FILES is not set +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_NET2280 is not set +CONFIG_USB_GADGET_PXA2XX=y +CONFIG_USB_PXA2XX=y +# CONFIG_USB_PXA2XX_SMALL is not set +# CONFIG_USB_GADGET_GOKU is not set +# CONFIG_USB_GADGET_LH7A40X is not set +# CONFIG_USB_GADGET_OMAP is not set +# CONFIG_USB_GADGET_DUMMY_HCD is not set +# CONFIG_USB_GADGET_DUALSPEED is not set +CONFIG_USB_ZERO=m +CONFIG_USB_ETH=m +CONFIG_USB_ETH_RNDIS=y +CONFIG_USB_GADGETFS=m +CONFIG_USB_FILE_STORAGE=m +# CONFIG_USB_FILE_STORAGE_TEST is not set +CONFIG_USB_G_SERIAL=m + +# +# MMC/SD Card support +# +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_BLOCK=y +CONFIG_MMC_PXA=y +# CONFIG_MMC_WBSD is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT2_FS_XIP is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_FS_POSIX_ACL is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_INOTIFY=y +# CONFIG_QUOTA is not set +CONFIG_DNOTIFY=y +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y +# CONFIG_RELAYFS_FS is not set + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS_FS is not set +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +CONFIG_JFFS2_COMPRESSION_OPTIONS=y +CONFIG_JFFS2_ZLIB=y +CONFIG_JFFS2_RTIME=y +CONFIG_JFFS2_RUBIN=y +# CONFIG_JFFS2_CMODE_NONE is not set +CONFIG_JFFS2_CMODE_PRIORITY=y +# CONFIG_JFFS2_CMODE_SIZE is not set +CONFIG_CRAMFS=m +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=m +CONFIG_NFS_V3=y +# CONFIG_NFS_V3_ACL is not set +CONFIG_NFS_V4=y +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_RPCSEC_GSS_KRB5=m +# CONFIG_RPCSEC_GSS_SPKM3 is not set +CONFIG_SMB_FS=m +CONFIG_SMB_NLS_DEFAULT=y +CONFIG_SMB_NLS_REMOTE="cp437" +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +# CONFIG_9P_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set + +# +# Native Language Support +# +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="cp437" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=y + +# +# Profiling support +# +CONFIG_PROFILING=y +CONFIG_OPROFILE=m + +# +# Kernel hacking +# +# CONFIG_PRINTK_TIME is not set +CONFIG_DEBUG_KERNEL=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_SCHEDSTATS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_INFO is not set +# CONFIG_DEBUG_FS is not set +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_USER is not set +# CONFIG_DEBUG_WAITQ is not set +CONFIG_DEBUG_ERRORS=y +CONFIG_DEBUG_LL=y +# CONFIG_DEBUG_ICEDCC is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +CONFIG_CRYPTO=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=m +CONFIG_CRYPTO_SHA1=m +CONFIG_CRYPTO_SHA256=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_WP512=m +# CONFIG_CRYPTO_TGR192 is not set +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_AES=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_CRC32C=m +CONFIG_CRYPTO_TEST=m + +# +# Hardware crypto devices +# + +# +# Library routines +# +CONFIG_CRC_CCITT=y +# CONFIG_CRC16 is not set +CONFIG_CRC32=y +CONFIG_LIBCRC32C=m +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y diff --git a/arch/arm/configs/ixp4xx_defconfig b/arch/arm/configs/ixp4xx_defconfig index 94aafec5fb46..c279e41ed10e 100644 --- a/arch/arm/configs/ixp4xx_defconfig +++ b/arch/arm/configs/ixp4xx_defconfig @@ -1,14 +1,13 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.12-rc1-bk2 -# Sun Mar 27 22:53:40 2005 +# Linux kernel version: 2.6.14-rc1-git5 +# Tue Sep 20 17:26:28 2005 # CONFIG_ARM=y CONFIG_MMU=y CONFIG_UID16=y CONFIG_RWSEM_GENERIC_SPINLOCK=y CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_GENERIC_IOMAP=y # # Code maturity level options @@ -16,11 +15,13 @@ CONFIG_GENERIC_IOMAP=y CONFIG_EXPERIMENTAL=y CONFIG_CLEAN_COMPILE=y CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 # # General setup # CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y CONFIG_SWAP=y CONFIG_SYSVIPC=y # CONFIG_POSIX_MQUEUE is not set @@ -31,10 +32,13 @@ CONFIG_SYSCTL=y # CONFIG_HOTPLUG is not set CONFIG_KOBJECT_UEVENT=y # CONFIG_IKCONFIG is not set +CONFIG_INITRAMFS_SOURCE="" CONFIG_EMBEDDED=y CONFIG_KALLSYMS=y # CONFIG_KALLSYMS_ALL is not set # CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_PRINTK=y +CONFIG_BUG=y CONFIG_BASE_FULL=y CONFIG_FUTEX=y CONFIG_EPOLL=y @@ -81,6 +85,7 @@ CONFIG_ARCH_IXP4XX=y # CONFIG_ARCH_VERSATILE is not set # CONFIG_ARCH_IMX is not set # CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_AAEC2000 is not set CONFIG_ARCH_SUPPORTS_BIG_ENDIAN=y # @@ -90,15 +95,16 @@ CONFIG_ARCH_SUPPORTS_BIG_ENDIAN=y # # IXP4xx Platforms # -# CONFIG_ARCH_AVILA is not set +CONFIG_ARCH_AVILA=y CONFIG_ARCH_ADI_COYOTE=y CONFIG_ARCH_IXDP425=y -# CONFIG_MACH_IXDPG425 is not set -# CONFIG_MACH_IXDP465 is not set +CONFIG_MACH_IXDPG425=y +CONFIG_MACH_IXDP465=y CONFIG_ARCH_IXCDP1100=y CONFIG_ARCH_PRPMC1100=y CONFIG_ARCH_IXDP4XX=y -# CONFIG_MACH_GTWX5715 is not set +CONFIG_CPU_IXP46X=y +CONFIG_MACH_GTWX5715=y # # IXP4xx Options @@ -114,7 +120,6 @@ CONFIG_CPU_32v5=y CONFIG_CPU_ABRT_EV5T=y CONFIG_CPU_CACHE_VIVT=y CONFIG_CPU_TLB_V4WBI=y -CONFIG_CPU_MINICACHE=y # # Processor Features @@ -127,9 +132,10 @@ CONFIG_DMABOUNCE=y # # Bus support # +CONFIG_ISA_DMA_API=y CONFIG_PCI=y CONFIG_PCI_LEGACY_PROC=y -CONFIG_PCI_NAMES=y +# CONFIG_PCI_DEBUG is not set # # PCCARD (PCMCIA/CardBus) support @@ -140,6 +146,15 @@ CONFIG_PCI_NAMES=y # Kernel Features # # CONFIG_PREEMPT is not set +# CONFIG_NO_IDLE_HZ is not set +# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +# CONFIG_SPARSEMEM_STATIC is not set CONFIG_ALIGNMENT_TRAP=y # @@ -174,6 +189,241 @@ CONFIG_BINFMT_ELF=y CONFIG_PM=y CONFIG_APM=y +# +# Networking +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=m +CONFIG_PACKET_MMAP=y +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_NET_KEY is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_ASK_IP_FIB_HASH=y +# CONFIG_IP_FIB_TRIE is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_FWMARK=y +CONFIG_IP_ROUTE_MULTIPATH=y +# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +# CONFIG_ARPD is not set +CONFIG_SYN_COOKIES=y +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_BIC=y + +# +# IP: Virtual Server Configuration +# +CONFIG_IP_VS=m +CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +# CONFIG_IP_VS_PROTO_TCP is not set +# CONFIG_IP_VS_PROTO_UDP is not set +# CONFIG_IP_VS_PROTO_ESP is not set +# CONFIG_IP_VS_PROTO_AH is not set + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +# CONFIG_IP_VS_SED is not set +# CONFIG_IP_VS_NQ is not set + +# +# IPVS application helper +# +# CONFIG_IPV6 is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_BRIDGE_NETFILTER=y +# CONFIG_NETFILTER_NETLINK is not set + +# +# IP: Netfilter Configuration +# +CONFIG_IP_NF_CONNTRACK=m +# CONFIG_IP_NF_CT_ACCT is not set +# CONFIG_IP_NF_CONNTRACK_MARK is not set +# CONFIG_IP_NF_CONNTRACK_EVENTS is not set +# CONFIG_IP_NF_CT_PROTO_SCTP is not set +CONFIG_IP_NF_FTP=m +CONFIG_IP_NF_IRC=m +# CONFIG_IP_NF_NETBIOS_NS is not set +# CONFIG_IP_NF_TFTP is not set +# CONFIG_IP_NF_AMANDA is not set +CONFIG_IP_NF_QUEUE=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_LIMIT=m +# CONFIG_IP_NF_MATCH_IPRANGE is not set +CONFIG_IP_NF_MATCH_MAC=m +# CONFIG_IP_NF_MATCH_PKTTYPE is not set +CONFIG_IP_NF_MATCH_MARK=m +CONFIG_IP_NF_MATCH_MULTIPORT=m +CONFIG_IP_NF_MATCH_TOS=m +# CONFIG_IP_NF_MATCH_RECENT is not set +# CONFIG_IP_NF_MATCH_ECN is not set +# CONFIG_IP_NF_MATCH_DSCP is not set +CONFIG_IP_NF_MATCH_AH_ESP=m +CONFIG_IP_NF_MATCH_LENGTH=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_MATCH_TCPMSS=m +# CONFIG_IP_NF_MATCH_HELPER is not set +CONFIG_IP_NF_MATCH_STATE=m +# CONFIG_IP_NF_MATCH_CONNTRACK is not set +CONFIG_IP_NF_MATCH_OWNER=m +# CONFIG_IP_NF_MATCH_PHYSDEV is not set +# CONFIG_IP_NF_MATCH_ADDRTYPE is not set +# CONFIG_IP_NF_MATCH_REALM is not set +# CONFIG_IP_NF_MATCH_SCTP is not set +# CONFIG_IP_NF_MATCH_DCCP is not set +# CONFIG_IP_NF_MATCH_COMMENT is not set +# CONFIG_IP_NF_MATCH_HASHLIMIT is not set +# CONFIG_IP_NF_MATCH_STRING is not set +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_LOG=m +CONFIG_IP_NF_TARGET_ULOG=m +CONFIG_IP_NF_TARGET_TCPMSS=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_REDIRECT=m +# CONFIG_IP_NF_TARGET_NETMAP is not set +# CONFIG_IP_NF_TARGET_SAME is not set +CONFIG_IP_NF_NAT_SNMP_BASIC=m +CONFIG_IP_NF_NAT_IRC=m +CONFIG_IP_NF_NAT_FTP=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_TOS=m +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_DSCP is not set +CONFIG_IP_NF_TARGET_MARK=m +# CONFIG_IP_NF_TARGET_CLASSIFY is not set +# CONFIG_IP_NF_TARGET_TTL is not set +# CONFIG_IP_NF_RAW is not set +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +# CONFIG_IP_NF_ARP_MANGLE is not set + +# +# Bridge: Netfilter Configuration +# +# CONFIG_BRIDGE_NF_EBTABLES is not set + +# +# DCCP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_DCCP is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +CONFIG_ATM=y +CONFIG_ATM_CLIP=y +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +# CONFIG_DECNET is not set +CONFIG_LLC=m +# CONFIG_LLC2 is not set +CONFIG_IPX=m +# CONFIG_IPX_INTERN is not set +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=y +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_IPDDP_DECAP=y +CONFIG_X25=m +CONFIG_LAPB=m +# CONFIG_NET_DIVERT is not set +CONFIG_ECONET=m +CONFIG_ECONET_AUNUDP=y +CONFIG_ECONET_NATIVE=y +CONFIG_WAN_ROUTER=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CLK_JIFFIES=y +# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set +# CONFIG_NET_SCH_CLK_CPU is not set +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +# CONFIG_NET_SCH_HFSC is not set +# CONFIG_NET_SCH_ATM is not set +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +# CONFIG_NET_SCH_NETEM is not set +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_QOS=y +CONFIG_NET_ESTIMATOR=y +CONFIG_NET_CLS=y +# CONFIG_NET_CLS_BASIC is not set +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_ROUTE=y +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +# CONFIG_CLS_U32_PERF is not set +# CONFIG_NET_CLS_IND is not set +# CONFIG_CLS_U32_MARK is not set +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +# CONFIG_NET_EMATCH is not set +# CONFIG_NET_CLS_ACT is not set +CONFIG_NET_CLS_POLICE=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_IEEE80211 is not set + # # Device Drivers # @@ -244,6 +494,7 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y CONFIG_MTD_IXP4XX=y # CONFIG_MTD_EDB7312 is not set # CONFIG_MTD_PCI is not set +# CONFIG_MTD_PLATRAM is not set # # Self-contained MTD device drivers @@ -283,7 +534,6 @@ CONFIG_MTD_NAND_IDS=m # # Block devices # -# CONFIG_BLK_DEV_FD is not set # CONFIG_BLK_CPQ_DA is not set # CONFIG_BLK_CPQ_CISS_DA is not set # CONFIG_BLK_DEV_DAC960 is not set @@ -297,7 +547,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=8192 CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" # CONFIG_CDROM_PKTCDVD is not set # @@ -351,6 +600,7 @@ CONFIG_BLK_DEV_CMD64X=y CONFIG_BLK_DEV_HPT366=y # CONFIG_BLK_DEV_SC1200 is not set # CONFIG_BLK_DEV_PIIX is not set +# CONFIG_BLK_DEV_IT821X is not set # CONFIG_BLK_DEV_NS87415 is not set # CONFIG_BLK_DEV_PDC202XX_OLD is not set CONFIG_BLK_DEV_PDC202XX_NEW=y @@ -369,6 +619,7 @@ CONFIG_BLK_DEV_IDEDMA=y # # SCSI device support # +# CONFIG_RAID_ATTRS is not set # CONFIG_SCSI is not set # @@ -379,6 +630,7 @@ CONFIG_BLK_DEV_IDEDMA=y # # Fusion MPT device support # +# CONFIG_FUSION is not set # # IEEE 1394 (FireWire) support @@ -391,241 +643,24 @@ CONFIG_BLK_DEV_IDEDMA=y # CONFIG_I2O is not set # -# Networking support +# Network device support # -CONFIG_NET=y - -# -# Networking options -# -CONFIG_PACKET=m -CONFIG_PACKET_MMAP=y -CONFIG_NETLINK_DEV=m -CONFIG_UNIX=y -# CONFIG_NET_KEY is not set -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_FWMARK=y -CONFIG_IP_ROUTE_MULTIPATH=y -# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -# CONFIG_IP_PNP_RARP is not set -# CONFIG_NET_IPIP is not set -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y -CONFIG_IP_MROUTE=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -# CONFIG_ARPD is not set -CONFIG_SYN_COOKIES=y -# CONFIG_INET_AH is not set -# CONFIG_INET_ESP is not set -# CONFIG_INET_IPCOMP is not set -CONFIG_INET_TUNNEL=m -# CONFIG_IP_TCPDIAG is not set -# CONFIG_IP_TCPDIAG_IPV6 is not set - -# -# IP: Virtual Server Configuration -# -CONFIG_IP_VS=m -CONFIG_IP_VS_DEBUG=y -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# -# CONFIG_IP_VS_PROTO_TCP is not set -# CONFIG_IP_VS_PROTO_UDP is not set -# CONFIG_IP_VS_PROTO_ESP is not set -# CONFIG_IP_VS_PROTO_AH is not set - -# -# IPVS scheduler -# -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -# CONFIG_IP_VS_SED is not set -# CONFIG_IP_VS_NQ is not set - -# -# IPVS application helper -# -# CONFIG_IPV6 is not set -CONFIG_NETFILTER=y -# CONFIG_NETFILTER_DEBUG is not set -CONFIG_BRIDGE_NETFILTER=y - -# -# IP: Netfilter Configuration -# -CONFIG_IP_NF_CONNTRACK=m -# CONFIG_IP_NF_CT_ACCT is not set -# CONFIG_IP_NF_CONNTRACK_MARK is not set -# CONFIG_IP_NF_CT_PROTO_SCTP is not set -CONFIG_IP_NF_FTP=m -CONFIG_IP_NF_IRC=m -# CONFIG_IP_NF_TFTP is not set -# CONFIG_IP_NF_AMANDA is not set -CONFIG_IP_NF_QUEUE=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_LIMIT=m -# CONFIG_IP_NF_MATCH_IPRANGE is not set -CONFIG_IP_NF_MATCH_MAC=m -# CONFIG_IP_NF_MATCH_PKTTYPE is not set -CONFIG_IP_NF_MATCH_MARK=m -CONFIG_IP_NF_MATCH_MULTIPORT=m -CONFIG_IP_NF_MATCH_TOS=m -# CONFIG_IP_NF_MATCH_RECENT is not set -# CONFIG_IP_NF_MATCH_ECN is not set -# CONFIG_IP_NF_MATCH_DSCP is not set -CONFIG_IP_NF_MATCH_AH_ESP=m -CONFIG_IP_NF_MATCH_LENGTH=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_MATCH_TCPMSS=m -# CONFIG_IP_NF_MATCH_HELPER is not set -CONFIG_IP_NF_MATCH_STATE=m -# CONFIG_IP_NF_MATCH_CONNTRACK is not set -CONFIG_IP_NF_MATCH_OWNER=m -# CONFIG_IP_NF_MATCH_PHYSDEV is not set -# CONFIG_IP_NF_MATCH_ADDRTYPE is not set -# CONFIG_IP_NF_MATCH_REALM is not set -# CONFIG_IP_NF_MATCH_SCTP is not set -# CONFIG_IP_NF_MATCH_COMMENT is not set -# CONFIG_IP_NF_MATCH_HASHLIMIT is not set -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m -CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_IP_NF_TARGET_TCPMSS=m -CONFIG_IP_NF_NAT=m -CONFIG_IP_NF_NAT_NEEDED=y -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_REDIRECT=m -# CONFIG_IP_NF_TARGET_NETMAP is not set -# CONFIG_IP_NF_TARGET_SAME is not set -CONFIG_IP_NF_NAT_SNMP_BASIC=m -CONFIG_IP_NF_NAT_IRC=m -CONFIG_IP_NF_NAT_FTP=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_TOS=m -# CONFIG_IP_NF_TARGET_ECN is not set -# CONFIG_IP_NF_TARGET_DSCP is not set -CONFIG_IP_NF_TARGET_MARK=m -# CONFIG_IP_NF_TARGET_CLASSIFY is not set -# CONFIG_IP_NF_RAW is not set -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -# CONFIG_IP_NF_ARP_MANGLE is not set - -# -# Bridge: Netfilter Configuration -# -# CONFIG_BRIDGE_NF_EBTABLES is not set -CONFIG_XFRM=y -# CONFIG_XFRM_USER is not set - -# -# SCTP Configuration (EXPERIMENTAL) -# -# CONFIG_IP_SCTP is not set -CONFIG_ATM=y -CONFIG_ATM_CLIP=y -# CONFIG_ATM_CLIP_NO_ICMP is not set -CONFIG_ATM_LANE=m -CONFIG_ATM_MPOA=m -CONFIG_ATM_BR2684=m -# CONFIG_ATM_BR2684_IPFILTER is not set -CONFIG_BRIDGE=m -CONFIG_VLAN_8021Q=m -# CONFIG_DECNET is not set -CONFIG_LLC=m -# CONFIG_LLC2 is not set -CONFIG_IPX=m -# CONFIG_IPX_INTERN is not set -CONFIG_ATALK=m -CONFIG_DEV_APPLETALK=y -CONFIG_IPDDP=m -CONFIG_IPDDP_ENCAP=y -CONFIG_IPDDP_DECAP=y -CONFIG_X25=m -CONFIG_LAPB=m -# CONFIG_NET_DIVERT is not set -CONFIG_ECONET=m -CONFIG_ECONET_AUNUDP=y -CONFIG_ECONET_NATIVE=y -CONFIG_WAN_ROUTER=m - -# -# QoS and/or fair queueing -# -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_CLK_JIFFIES=y -# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set -# CONFIG_NET_SCH_CLK_CPU is not set -CONFIG_NET_SCH_CBQ=m -CONFIG_NET_SCH_HTB=m -# CONFIG_NET_SCH_HFSC is not set -# CONFIG_NET_SCH_ATM is not set -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m -# CONFIG_NET_SCH_NETEM is not set -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_QOS=y -CONFIG_NET_ESTIMATOR=y -CONFIG_NET_CLS=y -# CONFIG_NET_CLS_BASIC is not set -CONFIG_NET_CLS_TCINDEX=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_ROUTE=y -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -# CONFIG_CLS_U32_PERF is not set -# CONFIG_NET_CLS_IND is not set -# CONFIG_CLS_U32_MARK is not set -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m -# CONFIG_NET_EMATCH is not set -# CONFIG_NET_CLS_ACT is not set -CONFIG_NET_CLS_POLICE=y - -# -# Network testing -# -CONFIG_NET_PKTGEN=m -# CONFIG_NETPOLL is not set -# CONFIG_NET_POLL_CONTROLLER is not set -# CONFIG_HAMRADIO is not set -# CONFIG_IRDA is not set -# CONFIG_BT is not set CONFIG_NETDEVICES=y CONFIG_DUMMY=y # CONFIG_BONDING is not set # CONFIG_EQUALIZER is not set # CONFIG_TUN is not set -# CONFIG_ETHERTAP is not set # # ARCnet devices # # CONFIG_ARCNET is not set +# +# PHY device support +# +# CONFIG_PHYLIB is not set + # # Ethernet (10 or 100Mbit) # @@ -635,6 +670,7 @@ CONFIG_MII=y # CONFIG_SUNGEM is not set # CONFIG_NET_VENDOR_3COM is not set # CONFIG_SMC91X is not set +# CONFIG_DM9000 is not set # # Tulip family network device support @@ -671,13 +707,17 @@ CONFIG_EEPRO100=y # CONFIG_HAMACHI is not set # CONFIG_YELLOWFIN is not set # CONFIG_R8169 is not set +# CONFIG_SIS190 is not set +# CONFIG_SKGE is not set # CONFIG_SK98LIN is not set # CONFIG_VIA_VELOCITY is not set # CONFIG_TIGON3 is not set +# CONFIG_BNX2 is not set # # Ethernet (10000 Mbit) # +# CONFIG_CHELSIO_T1 is not set # CONFIG_IXGB is not set # CONFIG_S2IO is not set @@ -702,6 +742,7 @@ CONFIG_NET_RADIO=y CONFIG_HERMES=y # CONFIG_PLX_HERMES is not set # CONFIG_TMD_HERMES is not set +# CONFIG_NORTEL_HERMES is not set CONFIG_PCI_HERMES=y # CONFIG_ATMEL is not set @@ -709,6 +750,7 @@ CONFIG_PCI_HERMES=y # Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support # # CONFIG_PRISM54 is not set +# CONFIG_HOSTAP is not set CONFIG_NET_WIRELESS=y # @@ -758,6 +800,8 @@ CONFIG_ATM_TCP=m # CONFIG_SLIP is not set # CONFIG_SHAPER is not set # CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set # # ISDN subsystem @@ -795,7 +839,6 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 # # CONFIG_SERIO is not set # CONFIG_GAMEPORT is not set -CONFIG_SOUND_GAMEPORT=y # # Character devices @@ -816,6 +859,7 @@ CONFIG_SERIAL_8250_NR_UARTS=2 # CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_SERIAL_JSM is not set CONFIG_UNIX98_PTYS=y CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=256 @@ -882,12 +926,11 @@ CONFIG_I2C_ALGOBIT=y # CONFIG_I2C_AMD8111 is not set # CONFIG_I2C_I801 is not set # CONFIG_I2C_I810 is not set +# CONFIG_I2C_PIIX4 is not set # CONFIG_I2C_IOP3XX is not set -# CONFIG_I2C_ISA is not set CONFIG_I2C_IXP4XX=y # CONFIG_I2C_NFORCE2 is not set # CONFIG_I2C_PARPORT_LIGHT is not set -# CONFIG_I2C_PIIX4 is not set # CONFIG_I2C_PROSAVAGE is not set # CONFIG_I2C_SAVAGE4 is not set # CONFIG_SCx200_ACB is not set @@ -901,14 +944,33 @@ CONFIG_I2C_IXP4XX=y # CONFIG_I2C_PCA_ISA is not set # -# Hardware Sensors Chip support +# Miscellaneous I2C Chip support # -CONFIG_I2C_SENSOR=y +# CONFIG_SENSORS_DS1337 is not set +# CONFIG_SENSORS_DS1374 is not set +CONFIG_SENSORS_EEPROM=y +# CONFIG_SENSORS_PCF8574 is not set +# CONFIG_SENSORS_PCA9539 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_SENSORS_RTC8564 is not set +# CONFIG_SENSORS_MAX6875 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set + +# +# Hardware Monitoring support +# +CONFIG_HWMON=y +# CONFIG_HWMON_VID is not set # CONFIG_SENSORS_ADM1021 is not set # CONFIG_SENSORS_ADM1025 is not set # CONFIG_SENSORS_ADM1026 is not set # CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set # CONFIG_SENSORS_ASB100 is not set +# CONFIG_SENSORS_ATXP1 is not set # CONFIG_SENSORS_DS1621 is not set # CONFIG_SENSORS_FSCHER is not set # CONFIG_SENSORS_FSCPOS is not set @@ -924,32 +986,28 @@ CONFIG_I2C_SENSOR=y # CONFIG_SENSORS_LM85 is not set # CONFIG_SENSORS_LM87 is not set # CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set # CONFIG_SENSORS_MAX1619 is not set # CONFIG_SENSORS_PC87360 is not set -# CONFIG_SENSORS_SMSC47B397 is not set # CONFIG_SENSORS_SIS5595 is not set # CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47B397 is not set # CONFIG_SENSORS_VIA686A is not set # CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83792D is not set # CONFIG_SENSORS_W83L785TS is not set # CONFIG_SENSORS_W83627HF is not set - -# -# Other I2C Chip support -# -CONFIG_SENSORS_EEPROM=y -# CONFIG_SENSORS_PCF8574 is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_SENSORS_RTC8564 is not set -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -# CONFIG_I2C_DEBUG_CHIP is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_HWMON_DEBUG_CHIP is not set # # Misc devices # +# +# Multimedia Capabilities Port drivers +# + # # Multimedia devices # @@ -994,6 +1052,7 @@ CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y # CONFIG_EXT2_FS_SECURITY is not set +# CONFIG_EXT2_FS_XIP is not set CONFIG_EXT3_FS=y CONFIG_EXT3_FS_XATTR=y CONFIG_EXT3_FS_POSIX_ACL=y @@ -1004,17 +1063,15 @@ CONFIG_FS_MBCACHE=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set CONFIG_FS_POSIX_ACL=y - -# -# XFS support -# # CONFIG_XFS_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_ROMFS_FS is not set +CONFIG_INOTIFY=y # CONFIG_QUOTA is not set CONFIG_DNOTIFY=y # CONFIG_AUTOFS_FS is not set # CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set # # CD-ROM/DVD Filesystems @@ -1034,12 +1091,10 @@ CONFIG_DNOTIFY=y # CONFIG_PROC_FS=y CONFIG_SYSFS=y -# CONFIG_DEVFS_FS is not set -# CONFIG_DEVPTS_FS_XATTR is not set CONFIG_TMPFS=y -# CONFIG_TMPFS_XATTR is not set # CONFIG_HUGETLB_PAGE is not set CONFIG_RAMFS=y +# CONFIG_RELAYFS_FS is not set # # Miscellaneous filesystems @@ -1054,8 +1109,7 @@ CONFIG_RAMFS=y # CONFIG_JFFS_FS is not set CONFIG_JFFS2_FS=y CONFIG_JFFS2_FS_DEBUG=0 -# CONFIG_JFFS2_FS_NAND is not set -# CONFIG_JFFS2_FS_NOR_ECC is not set +CONFIG_JFFS2_FS_WRITEBUFFER=y # CONFIG_JFFS2_COMPRESSION_OPTIONS is not set CONFIG_JFFS2_ZLIB=y CONFIG_JFFS2_RTIME=y @@ -1072,12 +1126,14 @@ CONFIG_JFFS2_RTIME=y # CONFIG_NFS_FS=y CONFIG_NFS_V3=y +# CONFIG_NFS_V3_ACL is not set # CONFIG_NFS_V4 is not set # CONFIG_NFS_DIRECTIO is not set # CONFIG_NFSD is not set CONFIG_ROOT_NFS=y CONFIG_LOCKD=y CONFIG_LOCKD_V4=y +CONFIG_NFS_COMMON=y CONFIG_SUNRPC=y # CONFIG_RPCSEC_GSS_KRB5 is not set # CONFIG_RPCSEC_GSS_SPKM3 is not set @@ -1086,6 +1142,7 @@ CONFIG_SUNRPC=y # CONFIG_NCP_FS is not set # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set +# CONFIG_9P_FS is not set # # Partition Types @@ -1124,6 +1181,7 @@ CONFIG_MSDOS_PARTITION=y CONFIG_DEBUG_KERNEL=y CONFIG_MAGIC_SYSRQ=y CONFIG_LOG_BUF_SHIFT=14 +CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_SCHEDSTATS is not set # CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_SPINLOCK is not set @@ -1158,6 +1216,7 @@ CONFIG_DEBUG_LL=y # Library routines # # CONFIG_CRC_CCITT is not set +# CONFIG_CRC16 is not set CONFIG_CRC32=y # CONFIG_LIBCRC32C is not set CONFIG_ZLIB_INFLATE=y diff --git a/arch/arm/configs/poodle_defconfig b/arch/arm/configs/poodle_defconfig new file mode 100644 index 000000000000..72822907759f --- /dev/null +++ b/arch/arm/configs/poodle_defconfig @@ -0,0 +1,1015 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.14-rc3 +# Sun Oct 9 17:04:29 2005 +# +CONFIG_ARM=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_CALIBRATE_DELAY=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_LOCK_KERNEL=y +CONFIG_INIT_ENV_ARG_LIMIT=32 + +# +# General setup +# +CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +CONFIG_BSD_PROCESS_ACCT=y +# CONFIG_BSD_PROCESS_ACCT_V3 is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_HOTPLUG=y +CONFIG_KOBJECT_UEVENT=y +# CONFIG_IKCONFIG is not set +CONFIG_INITRAMFS_SOURCE="" +CONFIG_EMBEDDED=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SHMEM=y +CONFIG_CC_ALIGN_FUNCTIONS=0 +CONFIG_CC_ALIGN_LABELS=0 +CONFIG_CC_ALIGN_LOOPS=0 +CONFIG_CC_ALIGN_JUMPS=0 +# CONFIG_TINY_SHMEM is not set +CONFIG_BASE_SMALL=0 + +# +# Loadable module support +# +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_OBSOLETE_MODPARM=y +CONFIG_MODVERSIONS=y +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_KMOD=y + +# +# System Type +# +# CONFIG_ARCH_CLPS7500 is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CO285 is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_CAMELOT is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_IOP3XX is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_L7200 is not set +CONFIG_ARCH_PXA=y +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_IMX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_AAEC2000 is not set + +# +# Intel PXA2xx Implementations +# +# CONFIG_ARCH_LUBBOCK is not set +# CONFIG_MACH_MAINSTONE is not set +# CONFIG_ARCH_PXA_IDP is not set +CONFIG_PXA_SHARPSL=y +CONFIG_PXA_SHARPSL_25x=y +# CONFIG_PXA_SHARPSL_27x is not set +CONFIG_MACH_POODLE=y +# CONFIG_MACH_CORGI is not set +# CONFIG_MACH_SHEPHERD is not set +# CONFIG_MACH_HUSKY is not set +CONFIG_PXA25x=y + +# +# Processor Type +# +CONFIG_CPU_32=y +CONFIG_CPU_XSCALE=y +CONFIG_CPU_32v5=y +CONFIG_CPU_ABRT_EV5T=y +CONFIG_CPU_CACHE_VIVT=y +CONFIG_CPU_TLB_V4WBI=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +CONFIG_XSCALE_PMU=y +CONFIG_SHARP_LOCOMO=y +CONFIG_SHARP_PARAM=y +CONFIG_SHARP_SCOOP=y + +# +# Bus support +# +CONFIG_ISA_DMA_API=y + +# +# PCCARD (PCMCIA/CardBus) support +# +CONFIG_PCCARD=y +# CONFIG_PCMCIA_DEBUG is not set +CONFIG_PCMCIA=y +CONFIG_PCMCIA_LOAD_CIS=y +CONFIG_PCMCIA_IOCTL=y + +# +# PC-card bridges +# +CONFIG_PCMCIA_PXA2XX=y + +# +# Kernel Features +# +CONFIG_PREEMPT=y +# CONFIG_NO_IDLE_HZ is not set +# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +# CONFIG_SPARSEMEM_STATIC is not set +CONFIG_ALIGNMENT_TRAP=y + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="console=ttyS0,115200n8 console=tty1 noinitrd root=/dev/mtdblock2 rootfstype=jffs2 debug" +# CONFIG_XIP_KERNEL is not set + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_FPE_NWFPE=y +# CONFIG_FPE_NWFPE_XP is not set +# CONFIG_FPE_FASTFPE is not set + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_AOUT=m +CONFIG_BINFMT_MISC=m +# CONFIG_ARTHUR is not set + +# +# Power management options +# +CONFIG_PM=y +CONFIG_APM=y + +# +# Networking +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_MMAP=y +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +CONFIG_SYN_COOKIES=y +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_TUNNEL is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_BIC=y +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set + +# +# DCCP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_DCCP is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_SCHED is not set +# CONFIG_NET_CLS_ROUTE is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_IEEE80211 is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_DEBUG_DRIVER is not set + +# +# Memory Technology Devices (MTD) +# +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AFS_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_SHARP_SL=y +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLKMTD is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set + +# +# NAND Flash Device Drivers +# +CONFIG_MTD_NAND=y +CONFIG_MTD_NAND_VERIFY_WRITE=y +# CONFIG_MTD_NAND_H1900 is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND_DISKONCHIP is not set +CONFIG_MTD_NAND_SHARPSL=y +# CONFIG_MTD_NAND_NANDSIM is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set +CONFIG_BLK_DEV_RAM_COUNT=16 +# CONFIG_CDROM_PKTCDVD is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_ATA_OVER_ETH is not set + +# +# ATA/ATAPI/MFM/RLL support +# +CONFIG_IDE=y +CONFIG_BLK_DEV_IDE=y + +# +# Please see Documentation/ide.txt for help/info on IDE drives +# +# CONFIG_BLK_DEV_IDE_SATA is not set +CONFIG_BLK_DEV_IDEDISK=y +# CONFIG_IDEDISK_MULTI_MODE is not set +CONFIG_BLK_DEV_IDECS=y +# CONFIG_BLK_DEV_IDECD is not set +# CONFIG_BLK_DEV_IDETAPE is not set +# CONFIG_BLK_DEV_IDEFLOPPY is not set +# CONFIG_IDE_TASK_IOCTL is not set + +# +# IDE chipset support/bugfixes +# +CONFIG_IDE_GENERIC=y +# CONFIG_IDE_ARM is not set +# CONFIG_BLK_DEV_IDEDMA is not set +# CONFIG_IDEDMA_AUTO is not set +# CONFIG_BLK_DEV_HD is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# + +# +# I2O device support +# + +# +# Network device support +# +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# PHY device support +# +# CONFIG_PHYLIB is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +# CONFIG_MII is not set +# CONFIG_SMC91X is not set +# CONFIG_DM9000 is not set + +# +# Ethernet (1000 Mbit) +# + +# +# Ethernet (10000 Mbit) +# + +# +# Token Ring devices +# + +# +# Wireless LAN (non-hamradio) +# +CONFIG_NET_RADIO=y + +# +# Obsolete Wireless cards support (pre-802.11) +# +# CONFIG_STRIP is not set +# CONFIG_PCMCIA_WAVELAN is not set +# CONFIG_PCMCIA_NETWAVE is not set + +# +# Wireless 802.11 Frequency Hopping cards support +# +# CONFIG_PCMCIA_RAYCS is not set + +# +# Wireless 802.11b ISA/PCI cards support +# +# CONFIG_HERMES is not set +# CONFIG_ATMEL is not set + +# +# Wireless 802.11b Pcmcia/Cardbus cards support +# +# CONFIG_AIRO_CS is not set +# CONFIG_PCMCIA_WL3501 is not set +# CONFIG_HOSTAP is not set +CONFIG_NET_WIRELESS=y + +# +# PCMCIA network device support +# +CONFIG_NET_PCMCIA=y +# CONFIG_PCMCIA_3C589 is not set +# CONFIG_PCMCIA_3C574 is not set +# CONFIG_PCMCIA_FMVJ18X is not set +CONFIG_PCMCIA_PCNET=y +# CONFIG_PCMCIA_NMCLAN is not set +# CONFIG_PCMCIA_SMC91C92 is not set +# CONFIG_PCMCIA_XIRC2PS is not set +# CONFIG_PCMCIA_AXNET is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +CONFIG_PPP=m +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPP_FILTER is not set +CONFIG_PPP_ASYNC=m +# CONFIG_PPP_SYNC_TTY is not set +# CONFIG_PPP_DEFLATE is not set +CONFIG_PPP_BSDCOMP=m +# CONFIG_PPPOE is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_TSDEV=y +CONFIG_INPUT_TSDEV_SCREEN_X=240 +CONFIG_INPUT_TSDEV_SCREEN_Y=320 +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_EVBUG=y + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_LKKBD is not set +CONFIG_KEYBOARD_LOCOMO=y +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_CORGI is not set +CONFIG_KEYBOARD_SPITZ=y +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_PXA=y +CONFIG_SERIAL_PXA_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_NVRAM is not set +# CONFIG_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set + +# +# Ftape, the floppy tape device driver +# + +# +# PCMCIA character devices +# +# CONFIG_SYNCLINK_CS is not set +# CONFIG_RAW_DRIVER is not set + +# +# TPM devices +# + +# +# I2C support +# +CONFIG_I2C=y +# CONFIG_I2C_CHARDEV is not set + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=y +# CONFIG_I2C_ALGOPCF is not set +# CONFIG_I2C_ALGOPCA is not set + +# +# I2C Hardware Bus support +# +# CONFIG_I2C_PXA is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_STUB is not set +# CONFIG_I2C_PCA_ISA is not set + +# +# Miscellaneous I2C Chip support +# +# CONFIG_SENSORS_DS1337 is not set +# CONFIG_SENSORS_DS1374 is not set +# CONFIG_SENSORS_EEPROM is not set +# CONFIG_SENSORS_PCF8574 is not set +# CONFIG_SENSORS_PCA9539 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_SENSORS_RTC8564 is not set +# CONFIG_SENSORS_MAX6875 is not set +CONFIG_I2C_DEBUG_CORE=y +CONFIG_I2C_DEBUG_ALGO=y +CONFIG_I2C_DEBUG_BUS=y +# CONFIG_I2C_DEBUG_CHIP is not set + +# +# Hardware Monitoring support +# +CONFIG_HWMON=y +# CONFIG_HWMON_VID is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ASB100 is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_FSCHER is not set +# CONFIG_SENSORS_FSCPOS is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Misc devices +# + +# +# Multimedia Capabilities Port drivers +# + +# +# Multimedia devices +# +CONFIG_VIDEO_DEV=m + +# +# Video For Linux +# + +# +# Video Adapters +# +# CONFIG_VIDEO_CPIA is not set +# CONFIG_VIDEO_SAA5246A is not set +# CONFIG_VIDEO_SAA5249 is not set +# CONFIG_TUNER_3036 is not set +# CONFIG_VIDEO_OVCAMCHIP is not set + +# +# Radio Adapters +# +# CONFIG_RADIO_MAESTRO is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +CONFIG_FB=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SOFT_CURSOR=y +# CONFIG_FB_MACMODES is not set +CONFIG_FB_MODE_HELPERS=y +# CONFIG_FB_TILEBLITTING is not set +CONFIG_FB_PXA=y +# CONFIG_FB_W100 is not set +# CONFIG_FB_PXA_PARAMETERS is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FONTS=y +CONFIG_FONT_8x8=y +# CONFIG_FONT_8x16 is not set +# CONFIG_FONT_6x11 is not set +# CONFIG_FONT_7x14 is not set +# CONFIG_FONT_PEARL_8x8 is not set +# CONFIG_FONT_ACORN_8x8 is not set +# CONFIG_FONT_MINI_4x6 is not set +# CONFIG_FONT_SUN8x16 is not set +# CONFIG_FONT_SUN12x22 is not set +# CONFIG_FONT_10x18 is not set + +# +# Logo configuration +# +# CONFIG_LOGO is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# +CONFIG_USB_ARCH_HAS_HCD=y +# CONFIG_USB_ARCH_HAS_OHCI is not set +# CONFIG_USB is not set + +# +# USB Gadget Support +# +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG_FILES is not set +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_NET2280 is not set +CONFIG_USB_GADGET_PXA2XX=y +CONFIG_USB_PXA2XX=y +# CONFIG_USB_PXA2XX_SMALL is not set +# CONFIG_USB_GADGET_GOKU is not set +# CONFIG_USB_GADGET_LH7A40X is not set +# CONFIG_USB_GADGET_OMAP is not set +# CONFIG_USB_GADGET_DUMMY_HCD is not set +# CONFIG_USB_GADGET_DUALSPEED is not set +# CONFIG_USB_ZERO is not set +CONFIG_USB_ETH=y +CONFIG_USB_ETH_RNDIS=y +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FILE_STORAGE is not set +# CONFIG_USB_G_SERIAL is not set + +# +# MMC/SD Card support +# +CONFIG_MMC=y +CONFIG_MMC_DEBUG=y +CONFIG_MMC_BLOCK=y +CONFIG_MMC_PXA=y +# CONFIG_MMC_WBSD is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +# CONFIG_EXT2_FS_XIP is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_FS_POSIX_ACL=y +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_INOTIFY=y +# CONFIG_QUOTA is not set +CONFIG_DNOTIFY=y +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y +# CONFIG_RELAYFS_FS is not set + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS_FS is not set +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +CONFIG_JFFS2_COMPRESSION_OPTIONS=y +CONFIG_JFFS2_ZLIB=y +CONFIG_JFFS2_RTIME=y +CONFIG_JFFS2_RUBIN=y +# CONFIG_JFFS2_CMODE_NONE is not set +CONFIG_JFFS2_CMODE_PRIORITY=y +# CONFIG_JFFS2_CMODE_SIZE is not set +CONFIG_CRAMFS=m +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +# CONFIG_9P_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set + +# +# Native Language Support +# +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="cp437" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=y + +# +# Profiling support +# +# CONFIG_PROFILING is not set + +# +# Kernel hacking +# +# CONFIG_PRINTK_TIME is not set +CONFIG_DEBUG_KERNEL=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_SCHEDSTATS is not set +# CONFIG_DEBUG_SLAB is not set +CONFIG_DEBUG_PREEMPT=y +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_KOBJECT is not set +# CONFIG_DEBUG_BUGVERBOSE is not set +# CONFIG_DEBUG_INFO is not set +# CONFIG_DEBUG_FS is not set +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_USER is not set +# CONFIG_DEBUG_WAITQ is not set +CONFIG_DEBUG_ERRORS=y +# CONFIG_DEBUG_LL is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Hardware crypto devices +# + +# +# Library routines +# +CONFIG_CRC_CCITT=y +# CONFIG_CRC16 is not set +CONFIG_CRC32=y +# CONFIG_LIBCRC32C is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig new file mode 100644 index 000000000000..900e04f8e38c --- /dev/null +++ b/arch/arm/configs/spitz_defconfig @@ -0,0 +1,1401 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.14-rc3 +# Sun Oct 9 17:11:19 2005 +# +CONFIG_ARM=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_CALIBRATE_DELAY=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_LOCK_KERNEL=y +CONFIG_INIT_ENV_ARG_LIMIT=32 + +# +# General setup +# +CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +CONFIG_BSD_PROCESS_ACCT=y +# CONFIG_BSD_PROCESS_ACCT_V3 is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_HOTPLUG=y +CONFIG_KOBJECT_UEVENT=y +# CONFIG_IKCONFIG is not set +CONFIG_INITRAMFS_SOURCE="" +CONFIG_EMBEDDED=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SHMEM=y +CONFIG_CC_ALIGN_FUNCTIONS=0 +CONFIG_CC_ALIGN_LABELS=0 +CONFIG_CC_ALIGN_LOOPS=0 +CONFIG_CC_ALIGN_JUMPS=0 +# CONFIG_TINY_SHMEM is not set +CONFIG_BASE_SMALL=0 + +# +# Loadable module support +# +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_OBSOLETE_MODPARM=y +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_KMOD=y + +# +# System Type +# +# CONFIG_ARCH_CLPS7500 is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CO285 is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_CAMELOT is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_IOP3XX is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_L7200 is not set +CONFIG_ARCH_PXA=y +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_IMX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_AAEC2000 is not set + +# +# Intel PXA2xx Implementations +# +# CONFIG_ARCH_LUBBOCK is not set +# CONFIG_MACH_MAINSTONE is not set +# CONFIG_ARCH_PXA_IDP is not set +CONFIG_PXA_SHARPSL=y +# CONFIG_PXA_SHARPSL_25x is not set +CONFIG_PXA_SHARPSL_27x=y +CONFIG_MACH_SPITZ=y +CONFIG_MACH_BORZOI=y +CONFIG_PXA27x=y +CONFIG_PXA_SHARP_Cxx00=y + +# +# Processor Type +# +CONFIG_CPU_32=y +CONFIG_CPU_XSCALE=y +CONFIG_CPU_32v5=y +CONFIG_CPU_ABRT_EV5T=y +CONFIG_CPU_CACHE_VIVT=y +CONFIG_CPU_TLB_V4WBI=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +CONFIG_XSCALE_PMU=y +CONFIG_SHARP_PARAM=y +CONFIG_SHARP_SCOOP=y + +# +# Bus support +# +CONFIG_ISA_DMA_API=y + +# +# PCCARD (PCMCIA/CardBus) support +# +CONFIG_PCCARD=y +# CONFIG_PCMCIA_DEBUG is not set +CONFIG_PCMCIA=y +CONFIG_PCMCIA_LOAD_CIS=y +CONFIG_PCMCIA_IOCTL=y + +# +# PC-card bridges +# +CONFIG_PCMCIA_PXA2XX=y + +# +# Kernel Features +# +CONFIG_PREEMPT=y +# CONFIG_NO_IDLE_HZ is not set +# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +# CONFIG_SPARSEMEM_STATIC is not set +CONFIG_ALIGNMENT_TRAP=y + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="console=ttyS0,115200n8 console=tty1 noinitrd root=/dev/mtdblock2 rootfstype=jffs2 debug" +# CONFIG_XIP_KERNEL is not set + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_FPE_NWFPE=y +# CONFIG_FPE_NWFPE_XP is not set +# CONFIG_FPE_FASTFPE is not set + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_AOUT=m +CONFIG_BINFMT_MISC=m +# CONFIG_ARTHUR is not set + +# +# Power management options +# +CONFIG_PM=y +CONFIG_APM=y + +# +# Networking +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_MMAP=y +CONFIG_UNIX=y +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +CONFIG_SYN_COOKIES=y +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_TUNNEL is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_BIC=y + +# +# IP: Virtual Server Configuration +# +# CONFIG_IP_VS is not set +CONFIG_IPV6=m +# CONFIG_IPV6_PRIVACY is not set +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_TUNNEL=m +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +# CONFIG_NETFILTER_NETLINK is not set + +# +# IP: Netfilter Configuration +# +CONFIG_IP_NF_CONNTRACK=m +# CONFIG_IP_NF_CT_ACCT is not set +# CONFIG_IP_NF_CONNTRACK_MARK is not set +# CONFIG_IP_NF_CONNTRACK_EVENTS is not set +CONFIG_IP_NF_CT_PROTO_SCTP=m +CONFIG_IP_NF_FTP=m +CONFIG_IP_NF_IRC=m +# CONFIG_IP_NF_NETBIOS_NS is not set +CONFIG_IP_NF_TFTP=m +CONFIG_IP_NF_AMANDA=m +# CONFIG_IP_NF_PPTP is not set +CONFIG_IP_NF_QUEUE=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_LIMIT=m +CONFIG_IP_NF_MATCH_IPRANGE=m +CONFIG_IP_NF_MATCH_MAC=m +CONFIG_IP_NF_MATCH_PKTTYPE=m +CONFIG_IP_NF_MATCH_MARK=m +CONFIG_IP_NF_MATCH_MULTIPORT=m +CONFIG_IP_NF_MATCH_TOS=m +CONFIG_IP_NF_MATCH_RECENT=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_DSCP=m +CONFIG_IP_NF_MATCH_AH_ESP=m +CONFIG_IP_NF_MATCH_LENGTH=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_MATCH_TCPMSS=m +CONFIG_IP_NF_MATCH_HELPER=m +CONFIG_IP_NF_MATCH_STATE=m +CONFIG_IP_NF_MATCH_CONNTRACK=m +CONFIG_IP_NF_MATCH_OWNER=m +CONFIG_IP_NF_MATCH_ADDRTYPE=m +CONFIG_IP_NF_MATCH_REALM=m +CONFIG_IP_NF_MATCH_SCTP=m +# CONFIG_IP_NF_MATCH_DCCP is not set +CONFIG_IP_NF_MATCH_COMMENT=m +CONFIG_IP_NF_MATCH_HASHLIMIT=m +# CONFIG_IP_NF_MATCH_STRING is not set +CONFIG_IP_NF_FILTER=m +# CONFIG_IP_NF_TARGET_REJECT is not set +CONFIG_IP_NF_TARGET_LOG=m +CONFIG_IP_NF_TARGET_ULOG=m +CONFIG_IP_NF_TARGET_TCPMSS=m +# CONFIG_IP_NF_TARGET_NFQUEUE is not set +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_NAT_NEEDED=y +# CONFIG_IP_NF_TARGET_MASQUERADE is not set +# CONFIG_IP_NF_TARGET_REDIRECT is not set +# CONFIG_IP_NF_TARGET_NETMAP is not set +# CONFIG_IP_NF_TARGET_SAME is not set +# CONFIG_IP_NF_NAT_SNMP_BASIC is not set +CONFIG_IP_NF_NAT_IRC=m +CONFIG_IP_NF_NAT_FTP=m +CONFIG_IP_NF_NAT_TFTP=m +CONFIG_IP_NF_NAT_AMANDA=m +CONFIG_IP_NF_MANGLE=m +# CONFIG_IP_NF_TARGET_TOS is not set +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_DSCP is not set +# CONFIG_IP_NF_TARGET_MARK is not set +# CONFIG_IP_NF_TARGET_CLASSIFY is not set +# CONFIG_IP_NF_TARGET_TTL is not set +CONFIG_IP_NF_RAW=m +# CONFIG_IP_NF_TARGET_NOTRACK is not set +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m + +# +# IPv6: Netfilter Configuration (EXPERIMENTAL) +# +CONFIG_IP6_NF_QUEUE=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_LIMIT=m +CONFIG_IP6_NF_MATCH_MAC=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_MULTIPORT=m +CONFIG_IP6_NF_MATCH_OWNER=m +CONFIG_IP6_NF_MATCH_MARK=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_AHESP=m +CONFIG_IP6_NF_MATCH_LENGTH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_FILTER=m +# CONFIG_IP6_NF_TARGET_LOG is not set +# CONFIG_IP6_NF_TARGET_REJECT is not set +# CONFIG_IP6_NF_TARGET_NFQUEUE is not set +CONFIG_IP6_NF_MANGLE=m +# CONFIG_IP6_NF_TARGET_MARK is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_RAW=m + +# +# DCCP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_DCCP is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_SCHED is not set +CONFIG_NET_CLS_ROUTE=y + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +CONFIG_IRDA=m + +# +# IrDA protocols +# +CONFIG_IRLAN=m +CONFIG_IRNET=m +CONFIG_IRCOMM=m +# CONFIG_IRDA_ULTRA is not set + +# +# IrDA options +# +# CONFIG_IRDA_CACHE_LAST_LSAP is not set +# CONFIG_IRDA_FAST_RR is not set +# CONFIG_IRDA_DEBUG is not set + +# +# Infrared-port device drivers +# + +# +# SIR device drivers +# +# CONFIG_IRTTY_SIR is not set + +# +# Dongle support +# + +# +# Old SIR device drivers +# +# CONFIG_IRPORT_SIR is not set + +# +# Old Serial dongle support +# + +# +# FIR device drivers +# +# CONFIG_USB_IRDA is not set +# CONFIG_SIGMATEL_FIR is not set +# CONFIG_NSC_FIR is not set +# CONFIG_WINBOND_FIR is not set +# CONFIG_SMC_IRCC_FIR is not set +# CONFIG_ALI_FIR is not set +# CONFIG_VIA_FIR is not set +CONFIG_BT=m +CONFIG_BT_L2CAP=m +CONFIG_BT_SCO=m +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_HIDP=m + +# +# Bluetooth device drivers +# +CONFIG_BT_HCIUSB=m +# CONFIG_BT_HCIUSB_SCO is not set +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_BCSP_TXCRC=y +CONFIG_BT_HCIBCM203X=m +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIDTL1=m +CONFIG_BT_HCIBT3C=m +CONFIG_BT_HCIBLUECARD=m +CONFIG_BT_HCIBTUART=m +CONFIG_BT_HCIVHCI=m +CONFIG_IEEE80211=m +# CONFIG_IEEE80211_DEBUG is not set +CONFIG_IEEE80211_CRYPT_WEP=m +# CONFIG_IEEE80211_CRYPT_CCMP is not set +# CONFIG_IEEE80211_CRYPT_TKIP is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y +CONFIG_FW_LOADER=y +# CONFIG_DEBUG_DRIVER is not set + +# +# Memory Technology Devices (MTD) +# +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_REDBOOT_PARTS is not set +CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AFS_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +CONFIG_MTD_ROM=y +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_SHARP_SL=y +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLKMTD is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set + +# +# NAND Flash Device Drivers +# +CONFIG_MTD_NAND=y +CONFIG_MTD_NAND_VERIFY_WRITE=y +# CONFIG_MTD_NAND_H1900 is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND_DISKONCHIP is not set +CONFIG_MTD_NAND_SHARPSL=y +# CONFIG_MTD_NAND_NANDSIM is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_UB is not set +# CONFIG_BLK_DEV_RAM is not set +CONFIG_BLK_DEV_RAM_COUNT=16 +# CONFIG_CDROM_PKTCDVD is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_ATA_OVER_ETH is not set + +# +# ATA/ATAPI/MFM/RLL support +# +CONFIG_IDE=y +CONFIG_BLK_DEV_IDE=y + +# +# Please see Documentation/ide.txt for help/info on IDE drives +# +# CONFIG_BLK_DEV_IDE_SATA is not set +CONFIG_BLK_DEV_IDEDISK=y +# CONFIG_IDEDISK_MULTI_MODE is not set +CONFIG_BLK_DEV_IDECS=y +# CONFIG_BLK_DEV_IDECD is not set +# CONFIG_BLK_DEV_IDETAPE is not set +# CONFIG_BLK_DEV_IDEFLOPPY is not set +# CONFIG_BLK_DEV_IDESCSI is not set +# CONFIG_IDE_TASK_IOCTL is not set + +# +# IDE chipset support/bugfixes +# +CONFIG_IDE_GENERIC=y +# CONFIG_IDE_ARM is not set +# CONFIG_BLK_DEV_IDEDMA is not set +# CONFIG_IDEDMA_AUTO is not set +# CONFIG_BLK_DEV_HD is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +CONFIG_SCSI=m +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_CHR_DEV_OSST=m +CONFIG_BLK_DEV_SR=m +# CONFIG_BLK_DEV_SR_VENDOR is not set +CONFIG_CHR_DEV_SG=m +# CONFIG_CHR_DEV_SCH is not set + +# +# Some SCSI devices (e.g. CD jukebox) support multiple LUNs +# +CONFIG_SCSI_MULTI_LUN=y +# CONFIG_SCSI_CONSTANTS is not set +# CONFIG_SCSI_LOGGING is not set + +# +# SCSI Transport Attributes +# +# CONFIG_SCSI_SPI_ATTRS is not set +# CONFIG_SCSI_FC_ATTRS is not set +# CONFIG_SCSI_ISCSI_ATTRS is not set +# CONFIG_SCSI_SAS_ATTRS is not set + +# +# SCSI low-level drivers +# +# CONFIG_SCSI_SATA is not set +# CONFIG_SCSI_DEBUG is not set + +# +# PCMCIA SCSI adapter support +# +# CONFIG_PCMCIA_AHA152X is not set +# CONFIG_PCMCIA_FDOMAIN is not set +# CONFIG_PCMCIA_NINJA_SCSI is not set +# CONFIG_PCMCIA_QLOGIC is not set +# CONFIG_PCMCIA_SYM53C500 is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# + +# +# I2O device support +# + +# +# Network device support +# +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# PHY device support +# +# CONFIG_PHYLIB is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +CONFIG_MII=m +# CONFIG_SMC91X is not set +# CONFIG_DM9000 is not set + +# +# Ethernet (1000 Mbit) +# + +# +# Ethernet (10000 Mbit) +# + +# +# Token Ring devices +# + +# +# Wireless LAN (non-hamradio) +# +CONFIG_NET_RADIO=y + +# +# Obsolete Wireless cards support (pre-802.11) +# +# CONFIG_STRIP is not set +# CONFIG_PCMCIA_WAVELAN is not set +# CONFIG_PCMCIA_NETWAVE is not set + +# +# Wireless 802.11 Frequency Hopping cards support +# +# CONFIG_PCMCIA_RAYCS is not set + +# +# Wireless 802.11b ISA/PCI cards support +# +CONFIG_HERMES=m +# CONFIG_ATMEL is not set + +# +# Wireless 802.11b Pcmcia/Cardbus cards support +# +CONFIG_PCMCIA_HERMES=m +CONFIG_PCMCIA_SPECTRUM=m +# CONFIG_AIRO_CS is not set +# CONFIG_PCMCIA_WL3501 is not set +CONFIG_HOSTAP=m +CONFIG_HOSTAP_FIRMWARE=y +CONFIG_HOSTAP_CS=m +CONFIG_NET_WIRELESS=y + +# +# PCMCIA network device support +# +CONFIG_NET_PCMCIA=y +# CONFIG_PCMCIA_3C589 is not set +# CONFIG_PCMCIA_3C574 is not set +# CONFIG_PCMCIA_FMVJ18X is not set +CONFIG_PCMCIA_PCNET=m +# CONFIG_PCMCIA_NMCLAN is not set +# CONFIG_PCMCIA_SMC91C92 is not set +# CONFIG_PCMCIA_XIRC2PS is not set +# CONFIG_PCMCIA_AXNET is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +CONFIG_PPP=m +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPP_FILTER is not set +CONFIG_PPP_ASYNC=m +# CONFIG_PPP_SYNC_TTY is not set +# CONFIG_PPP_DEFLATE is not set +CONFIG_PPP_BSDCOMP=m +# CONFIG_PPPOE is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_CORGI is not set +CONFIG_KEYBOARD_SPITZ=y +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_CORGI=y +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=m + +# +# Hardware I/O ports +# +# CONFIG_SERIO is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +CONFIG_SERIAL_8250=m +CONFIG_SERIAL_8250_CS=m +CONFIG_SERIAL_8250_NR_UARTS=4 +# CONFIG_SERIAL_8250_EXTENDED is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_PXA=y +CONFIG_SERIAL_PXA_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_NVRAM is not set +# CONFIG_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set + +# +# Ftape, the floppy tape device driver +# + +# +# PCMCIA character devices +# +# CONFIG_SYNCLINK_CS is not set +# CONFIG_RAW_DRIVER is not set + +# +# TPM devices +# + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Hardware Monitoring support +# +CONFIG_HWMON=y +# CONFIG_HWMON_VID is not set +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Misc devices +# + +# +# Multimedia Capabilities Port drivers +# + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +CONFIG_FB=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SOFT_CURSOR=y +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set +CONFIG_FB_PXA=y +# CONFIG_FB_W100 is not set +# CONFIG_FB_PXA_PARAMETERS is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_VIRTUAL is not set + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FONTS=y +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +# CONFIG_FONT_6x11 is not set +# CONFIG_FONT_7x14 is not set +# CONFIG_FONT_PEARL_8x8 is not set +# CONFIG_FONT_ACORN_8x8 is not set +# CONFIG_FONT_MINI_4x6 is not set +# CONFIG_FONT_SUN8x16 is not set +# CONFIG_FONT_SUN12x22 is not set +# CONFIG_FONT_10x18 is not set + +# +# Logo configuration +# +# CONFIG_LOGO is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_DEVICE=y +CONFIG_LCD_CLASS_DEVICE=y +CONFIG_LCD_DEVICE=y +CONFIG_BACKLIGHT_CORGI=y + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB_ARCH_HAS_OHCI=y +CONFIG_USB=m +# CONFIG_USB_DEBUG is not set + +# +# Miscellaneous USB options +# +CONFIG_USB_DEVICEFS=y +# CONFIG_USB_BANDWIDTH is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_SUSPEND is not set +# CONFIG_USB_OTG is not set + +# +# USB Host Controller Drivers +# +# CONFIG_USB_ISP116X_HCD is not set +CONFIG_USB_OHCI_HCD=m +# CONFIG_USB_OHCI_BIG_ENDIAN is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SL811_HCD=m +CONFIG_USB_SL811_CS=m + +# +# USB Device Class drivers +# + +# +# USB Bluetooth TTY can only be used with disabled Bluetooth subsystem +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m + +# +# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +# CONFIG_USB_STORAGE_DATAFAB is not set +# CONFIG_USB_STORAGE_FREECOM is not set +# CONFIG_USB_STORAGE_ISD200 is not set +# CONFIG_USB_STORAGE_DPCM is not set +# CONFIG_USB_STORAGE_USBAT is not set +# CONFIG_USB_STORAGE_SDDR09 is not set +# CONFIG_USB_STORAGE_SDDR55 is not set +# CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set + +# +# USB Input Devices +# +CONFIG_USB_HID=m +CONFIG_USB_HIDINPUT=y +# CONFIG_HID_FF is not set +# CONFIG_USB_HIDDEV is not set + +# +# USB HID Boot Protocol drivers +# +CONFIG_USB_KBD=m +CONFIG_USB_MOUSE=m +CONFIG_USB_AIPTEK=m +CONFIG_USB_WACOM=m +# CONFIG_USB_ACECAD is not set +CONFIG_USB_KBTAB=m +CONFIG_USB_POWERMATE=m +CONFIG_USB_MTOUCH=m +# CONFIG_USB_ITMTOUCH is not set +CONFIG_USB_EGALAX=m +# CONFIG_USB_YEALINK is not set +CONFIG_USB_XPAD=m +CONFIG_USB_ATI_REMOTE=m +# CONFIG_USB_KEYSPAN_REMOTE is not set +# CONFIG_USB_APPLETOUCH is not set + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m + +# +# USB Multimedia devices +# +CONFIG_USB_DABUSB=m + +# +# Video4Linux support is needed for USB Multimedia device support +# + +# +# USB Network Adapters +# +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_CDCETHER=m +# CONFIG_USB_NET_GL620A is not set +CONFIG_USB_NET_NET1080=m +# CONFIG_USB_NET_PLUSB is not set +# CONFIG_USB_NET_RNDIS_HOST is not set +# CONFIG_USB_NET_CDC_SUBSET is not set +CONFIG_USB_NET_ZAURUS=m +# CONFIG_USB_ZD1201 is not set +CONFIG_USB_MON=y + +# +# USB port drivers +# + +# +# USB Serial Converter support +# +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_AIRPRIME is not set +CONFIG_USB_SERIAL_BELKIN=m +# CONFIG_USB_SERIAL_WHITEHEAT is not set +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +# CONFIG_USB_SERIAL_CP2101 is not set +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +# CONFIG_USB_SERIAL_KEYSPAN_MPR is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA28 is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA28X is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA28XA is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA28XB is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA19 is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA18X is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA19W is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA19QW is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA19QI is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA49W is not set +# CONFIG_USB_SERIAL_KEYSPAN_USA49WLC is not set +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_PL2303=m +# CONFIG_USB_SERIAL_HP4X is not set +CONFIG_USB_SERIAL_SAFE=m +# CONFIG_USB_SERIAL_SAFE_PADDED is not set +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_XIRCOM=m +# CONFIG_USB_SERIAL_OPTION is not set +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_EZUSB=y + +# +# USB Miscellaneous drivers +# +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_AUERSWALD=m +CONFIG_USB_RIO500=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +CONFIG_USB_LED=m +CONFIG_USB_CYTHERM=m +CONFIG_USB_PHIDGETKIT=m +CONFIG_USB_PHIDGETSERVO=m +CONFIG_USB_IDMOUSE=m +# CONFIG_USB_LD is not set +# CONFIG_USB_TEST is not set + +# +# USB DSL modem support +# + +# +# USB Gadget Support +# +CONFIG_USB_GADGET=m +# CONFIG_USB_GADGET_DEBUG_FILES is not set +CONFIG_USB_GADGET_SELECTED=y +# CONFIG_USB_GADGET_NET2280 is not set +# CONFIG_USB_GADGET_PXA2XX is not set +# CONFIG_USB_GADGET_GOKU is not set +# CONFIG_USB_GADGET_LH7A40X is not set +# CONFIG_USB_GADGET_OMAP is not set +CONFIG_USB_GADGET_DUMMY_HCD=y +CONFIG_USB_DUMMY_HCD=m +CONFIG_USB_GADGET_DUALSPEED=y +CONFIG_USB_ZERO=m +CONFIG_USB_ETH=m +CONFIG_USB_ETH_RNDIS=y +CONFIG_USB_GADGETFS=m +CONFIG_USB_FILE_STORAGE=m +# CONFIG_USB_FILE_STORAGE_TEST is not set +CONFIG_USB_G_SERIAL=m + +# +# MMC/SD Card support +# +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_BLOCK=y +CONFIG_MMC_PXA=y +# CONFIG_MMC_WBSD is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +# CONFIG_EXT2_FS_XIP is not set +CONFIG_EXT3_FS=y +# CONFIG_EXT3_FS_XATTR is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_FS_POSIX_ACL=y +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_INOTIFY=y +# CONFIG_QUOTA is not set +CONFIG_DNOTIFY=y +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y +# CONFIG_RELAYFS_FS is not set + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS_FS is not set +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +CONFIG_JFFS2_COMPRESSION_OPTIONS=y +CONFIG_JFFS2_ZLIB=y +CONFIG_JFFS2_RTIME=y +CONFIG_JFFS2_RUBIN=y +# CONFIG_JFFS2_CMODE_NONE is not set +CONFIG_JFFS2_CMODE_PRIORITY=y +# CONFIG_JFFS2_CMODE_SIZE is not set +CONFIG_CRAMFS=m +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=m +CONFIG_NFS_V3=y +# CONFIG_NFS_V3_ACL is not set +CONFIG_NFS_V4=y +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_RPCSEC_GSS_KRB5=m +# CONFIG_RPCSEC_GSS_SPKM3 is not set +CONFIG_SMB_FS=m +CONFIG_SMB_NLS_DEFAULT=y +CONFIG_SMB_NLS_REMOTE="cp437" +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +# CONFIG_9P_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set + +# +# Native Language Support +# +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="cp437" +CONFIG_NLS_CODEPAGE_437=y +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=y + +# +# Profiling support +# +CONFIG_PROFILING=y +CONFIG_OPROFILE=m + +# +# Kernel hacking +# +# CONFIG_PRINTK_TIME is not set +CONFIG_DEBUG_KERNEL=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_SCHEDSTATS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_PREEMPT is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_INFO is not set +# CONFIG_DEBUG_FS is not set +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_USER is not set +# CONFIG_DEBUG_WAITQ is not set +CONFIG_DEBUG_ERRORS=y +CONFIG_DEBUG_LL=y +# CONFIG_DEBUG_ICEDCC is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +CONFIG_CRYPTO=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=m +CONFIG_CRYPTO_SHA1=m +CONFIG_CRYPTO_SHA256=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_WP512=m +# CONFIG_CRYPTO_TGR192 is not set +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_AES=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_CRC32C=m +CONFIG_CRYPTO_TEST=m + +# +# Hardware crypto devices +# + +# +# Library routines +# +CONFIG_CRC_CCITT=y +# CONFIG_CRC16 is not set +CONFIG_CRC32=y +CONFIG_LIBCRC32C=m +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 835d450797a1..7b17a87a3311 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -45,8 +45,8 @@ extern void fp_enter(void); #define EXPORT_SYMBOL_ALIAS(sym,orig) \ EXPORT_CRC_ALIAS(sym) \ - const struct kernel_symbol __ksymtab_##sym \ - __attribute__((section("__ksymtab"))) = \ + static const struct kernel_symbol __ksymtab_##sym \ + __attribute_used__ __attribute__((section("__ksymtab"))) = \ { (unsigned long)&orig, #sym }; /* diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 7152bfbee581..93b5e8e5292e 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -537,7 +537,7 @@ ENTRY(__switch_to) #ifdef CONFIG_CPU_MPCORE clrex #else - strex r3, r4, [ip] @ Clear exclusive monitor + strex r5, r4, [ip] @ Clear exclusive monitor #endif #endif #if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT) diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 81d450ac3fab..066597f4345a 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -106,15 +106,10 @@ ENTRY(ret_from_fork) .endm .Larm700bug: - ldr r0, [sp, #S_PSR] @ Get calling cpsr - sub lr, lr, #4 - str lr, [r8] - msr spsr_cxsf, r0 ldmia sp, {r0 - lr}^ @ Get calling r0 - lr mov r0, r0 - ldr lr, [sp, #S_PC] @ Get PC add sp, sp, #S_FRAME_SIZE - movs pc, lr + subs pc, lr, #4 #else .macro arm710_bug_check, instr, temp .endm diff --git a/arch/arm/kernel/io.c b/arch/arm/kernel/io.c index 6c20c1188b60..1f6822dfae74 100644 --- a/arch/arm/kernel/io.c +++ b/arch/arm/kernel/io.c @@ -7,7 +7,7 @@ * Copy data from IO memory space to "real" memory space. * This needs to be optimized. */ -void _memcpy_fromio(void *to, void __iomem *from, size_t count) +void _memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) { unsigned char *t = to; while (count) { @@ -22,7 +22,7 @@ void _memcpy_fromio(void *to, void __iomem *from, size_t count) * Copy data from "real" memory space to IO memory space. * This needs to be optimized. */ -void _memcpy_toio(void __iomem *to, const void *from, size_t count) +void _memcpy_toio(volatile void __iomem *to, const void *from, size_t count) { const unsigned char *f = from; while (count) { @@ -37,7 +37,7 @@ void _memcpy_toio(void __iomem *to, const void *from, size_t count) * "memset" on IO memory space. * This needs to be optimized. */ -void _memset_io(void __iomem *dst, int c, size_t count) +void _memset_io(volatile void __iomem *dst, int c, size_t count) { while (count) { count--; diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index 42629ff84f5a..ea569ba482b1 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c @@ -305,7 +305,7 @@ long execve(const char *filename, char **argv, char **envp) "Ir" (THREAD_START_SP - sizeof(regs)), "r" (®s), "Ir" (sizeof(regs)) - : "r0", "r1", "r2", "r3", "ip", "memory"); + : "r0", "r1", "r2", "r3", "ip", "lr", "memory"); out: return ret; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index e7d22dbcb691..f6de76e0a45d 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -504,7 +504,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) bad_access: spin_unlock(&mm->page_table_lock); - /* simulate a read access fault */ + /* simulate a write access fault */ do_DataAbort(addr, 15 + (1 << 11), regs); return -1; } diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 08e58ecd44be..0d5db5279c5c 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -89,13 +89,6 @@ SECTIONS *(.got) /* Global offset table */ } - . = ALIGN(16); - __ex_table : { /* Exception table */ - __start___ex_table = .; - *(__ex_table) - __stop___ex_table = .; - } - RODATA _etext = .; /* End of text and rodata section */ @@ -137,6 +130,14 @@ SECTIONS . = ALIGN(32); *(.data.cacheline_aligned) + /* + * The exception fixup table (might need resorting at runtime) + */ + . = ALIGN(32); + __start___ex_table = .; + *(__ex_table) + __stop___ex_table = .; + /* * and the usual data section */ diff --git a/arch/arm/mach-clps711x/fortunet.c b/arch/arm/mach-clps711x/fortunet.c index f83a59761e02..3d88da0c287b 100644 --- a/arch/arm/mach-clps711x/fortunet.c +++ b/arch/arm/mach-clps711x/fortunet.c @@ -31,6 +31,8 @@ #include +#include + #include "common.h" struct meminfo memmap = { diff --git a/arch/arm/mach-imx/generic.c b/arch/arm/mach-imx/generic.c index 41e5849ae8da..f8a742bb2d5b 100644 --- a/arch/arm/mach-imx/generic.c +++ b/arch/arm/mach-imx/generic.c @@ -28,14 +28,15 @@ #include #include #include +#include #include void imx_gpio_mode(int gpio_mode) { unsigned int pin = gpio_mode & GPIO_PIN_MASK; - unsigned int port = (gpio_mode & GPIO_PORT_MASK) >> 5; - unsigned int ocr = (gpio_mode & GPIO_OCR_MASK) >> 10; + unsigned int port = (gpio_mode & GPIO_PORT_MASK) >> GPIO_PORT_SHIFT; + unsigned int ocr = (gpio_mode & GPIO_OCR_MASK) >> GPIO_OCR_SHIFT; unsigned int tmp; /* Pullup enable */ @@ -57,7 +58,7 @@ void imx_gpio_mode(int gpio_mode) GPR(port) &= ~(1<> GPIO_AOUT_SHIFT) & 3) << (pin * 2); + ICONFB1(port) &= ~( 3<<(pin*2)); + ICONFB1(port) |= ((gpio_mode >> GPIO_BOUT_SHIFT) & 3) << (pin * 2); } else { tmp = OCR2(port); tmp &= ~( 3<<((pin-16)*2)); tmp |= (ocr << ((pin-16)*2)); OCR2(port) = tmp; - if( gpio_mode & GPIO_AOUT ) - ICONFA2(port) &= ~( 3<<((pin-16)*2)); - if( gpio_mode & GPIO_BOUT ) - ICONFB2(port) &= ~( 3<<((pin-16)*2)); + ICONFA2(port) &= ~( 3<<((pin-16)*2)); + ICONFA2(port) |= ((gpio_mode >> GPIO_AOUT_SHIFT) & 3) << ((pin-16) * 2); + ICONFB2(port) &= ~( 3<<((pin-16)*2)); + ICONFB2(port) |= ((gpio_mode >> GPIO_BOUT_SHIFT) & 3) << ((pin-16) * 2); } } diff --git a/arch/arm/mach-imx/leds-mx1ads.c b/arch/arm/mach-imx/leds-mx1ads.c index e6399b06e4a4..79236404aec2 100644 --- a/arch/arm/mach-imx/leds-mx1ads.c +++ b/arch/arm/mach-imx/leds-mx1ads.c @@ -17,7 +17,6 @@ #include #include #include -#include #include "leds.h" /* diff --git a/arch/arm/mach-imx/mx1ads.c b/arch/arm/mach-imx/mx1ads.c index 5d25434d332c..a7511ddfe364 100644 --- a/arch/arm/mach-imx/mx1ads.c +++ b/arch/arm/mach-imx/mx1ads.c @@ -55,7 +55,7 @@ static void __init mx1ads_init(void) { #ifdef CONFIG_LEDS - imx_gpio_mode(GPIO_PORTA | GPIO_OUT | GPIO_GPIO | 2); + imx_gpio_mode(GPIO_PORTA | GPIO_OUT | 2); #endif platform_add_devices(devices, ARRAY_SIZE(devices)); } diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c index c3c2f17d030e..a1b153d1626c 100644 --- a/arch/arm/mach-integrator/impd1.c +++ b/arch/arm/mach-integrator/impd1.c @@ -67,7 +67,7 @@ static void impd1_setvco(struct clk *clk, struct icst525_vco vco) } writel(0, impd1->base + IMPD1_LOCK); -#if DEBUG +#ifdef DEBUG vco.v = val & 0x1ff; vco.r = (val >> 9) & 0x7f; vco.s = (val >> 16) & 7; @@ -427,17 +427,18 @@ static int impd1_probe(struct lm_device *dev) return ret; } +static int impd1_remove_one(struct device *dev, void *data) +{ + device_unregister(dev); + return 0; +} + static void impd1_remove(struct lm_device *dev) { struct impd1_module *impd1 = lm_get_drvdata(dev); - struct list_head *l, *n; int i; - list_for_each_safe(l, n, &dev->dev.children) { - struct device *d = list_to_dev(l); - - device_unregister(d); - } + device_for_each_child(&dev->dev, NULL, impd1_remove_one); for (i = 0; i < ARRAY_SIZE(impd1->vcos); i++) clk_unregister(&impd1->vcos[i]); diff --git a/arch/arm/mach-iop3xx/common.c b/arch/arm/mach-iop3xx/common.c index bda7394ec06c..fdeeef489a73 100644 --- a/arch/arm/mach-iop3xx/common.c +++ b/arch/arm/mach-iop3xx/common.c @@ -27,7 +27,6 @@ unsigned long iop3xx_pcibios_min_mem = 0; /* * Default power-off for EP80219 */ -#include static inline void ep80219_send_to_pic(__u8 c) { } diff --git a/arch/arm/mach-iop3xx/iop321-time.c b/arch/arm/mach-iop3xx/iop321-time.c index 0039793b694a..d67ac0e5d438 100644 --- a/arch/arm/mach-iop3xx/iop321-time.c +++ b/arch/arm/mach-iop3xx/iop321-time.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include diff --git a/arch/arm/mach-iop3xx/iop331-time.c b/arch/arm/mach-iop3xx/iop331-time.c index 8eddfac7e2b0..3c1f0ebbd636 100644 --- a/arch/arm/mach-iop3xx/iop331-time.c +++ b/arch/arm/mach-iop3xx/iop331-time.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include diff --git a/arch/arm/mach-iop3xx/iq31244-mm.c b/arch/arm/mach-iop3xx/iq31244-mm.c index b01042f7de71..55992ab586ba 100644 --- a/arch/arm/mach-iop3xx/iq31244-mm.c +++ b/arch/arm/mach-iop3xx/iq31244-mm.c @@ -21,7 +21,6 @@ #include #include -#include /* diff --git a/arch/arm/mach-iop3xx/iq80321-mm.c b/arch/arm/mach-iop3xx/iq80321-mm.c index 1580c7ed2b9d..bb3e9e5a9aff 100644 --- a/arch/arm/mach-iop3xx/iq80321-mm.c +++ b/arch/arm/mach-iop3xx/iq80321-mm.c @@ -21,7 +21,6 @@ #include #include -#include /* diff --git a/arch/arm/mach-iop3xx/iq80331-mm.c b/arch/arm/mach-iop3xx/iq80331-mm.c index ee8c333e115f..129eb49b0670 100644 --- a/arch/arm/mach-iop3xx/iq80331-mm.c +++ b/arch/arm/mach-iop3xx/iq80331-mm.c @@ -21,7 +21,6 @@ #include #include -#include /* diff --git a/arch/arm/mach-iop3xx/iq80332-mm.c b/arch/arm/mach-iop3xx/iq80332-mm.c index 084afcdfb1eb..2feaf7591f53 100644 --- a/arch/arm/mach-iop3xx/iq80332-mm.c +++ b/arch/arm/mach-iop3xx/iq80332-mm.c @@ -21,7 +21,6 @@ #include #include -#include /* diff --git a/arch/arm/mach-ixp2000/core.c b/arch/arm/mach-ixp2000/core.c index 74bd2fd602d4..f4d7f1f6ef85 100644 --- a/arch/arm/mach-ixp2000/core.c +++ b/arch/arm/mach-ixp2000/core.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -168,7 +167,7 @@ static struct plat_serial8250_port ixp2000_serial_port[] = { static struct resource ixp2000_uart_resource = { .start = IXP2000_UART_PHYS_BASE, - .end = IXP2000_UART_PHYS_BASE + 0xffff, + .end = IXP2000_UART_PHYS_BASE + 0x1f, .flags = IORESOURCE_MEM, }; diff --git a/arch/arm/mach-ixp2000/pci.c b/arch/arm/mach-ixp2000/pci.c index 0788fb2b5c10..522205acb316 100644 --- a/arch/arm/mach-ixp2000/pci.c +++ b/arch/arm/mach-ixp2000/pci.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 52ad11328e96..36b6045213ee 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -125,7 +125,8 @@ static int ixp4xx_set_irq_type(unsigned int irq, unsigned int type) } else if (type & IRQT_LOW) { int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW; irq_type = IXP4XX_IRQ_LEVEL; - } + } else + return -EINVAL; ixp4xx_config_irq(irq, irq_type); @@ -142,6 +143,8 @@ static int ixp4xx_set_irq_type(unsigned int irq, unsigned int type) /* Set the new style */ *int_reg |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE)); + + return 0; } static void ixp4xx_irq_mask(unsigned int irq) diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c index 39b06ed80646..0a41080d2266 100644 --- a/arch/arm/mach-ixp4xx/ixdp425-setup.c +++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c @@ -123,7 +123,7 @@ static void __init ixdp425_init(void) platform_add_devices(ixdp425_devices, ARRAY_SIZE(ixdp425_devices)); } -#ifdef CONFIG_ARCH_IXDP465 +#ifdef CONFIG_ARCH_IXDP425 MACHINE_START(IXDP425, "Intel IXDP425 Development Platform") /* Maintainer: MontaVista Software, Inc. */ .phys_ram = PHYS_OFFSET, diff --git a/arch/arm/mach-l7200/core.c b/arch/arm/mach-l7200/core.c index 2a7fee2a7635..03ed742ae2be 100644 --- a/arch/arm/mach-l7200/core.c +++ b/arch/arm/mach-l7200/core.c @@ -7,12 +7,17 @@ */ #include #include +#include +#include +#include +#include #include #include +#include #include -#include +#include /* * IRQ base register @@ -48,6 +53,12 @@ static void l7200_unmask_irq(unsigned int irq) { IRQ_ENABLE = 1 << irq; } + +static struct irqchip l7200_irq_chip = { + .ack = l7200_mask_irq, + .mask = l7200_mask_irq, + .unmask = l7200_unmask_irq +}; static void __init l7200_init_irq(void) { @@ -57,11 +68,9 @@ static void __init l7200_init_irq(void) FIQ_ENABLECLEAR = 0xffffffff; /* clear all fast interrupt enables */ for (irq = 0; irq < NR_IRQS; irq++) { - irq_desc[irq].valid = 1; - irq_desc[irq].probe_ok = 1; - irq_desc[irq].mask_ack = l7200_mask_irq; - irq_desc[irq].mask = l7200_mask_irq; - irq_desc[irq].unmask = l7200_unmask_irq; + set_irq_chip(irq, &l7200_irq_chip); + set_irq_flags(irq, IRQF_VALID); + set_irq_handler(irq, do_level_IRQ); } init_FIQ(); diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c index be37586cb1b0..60c8b9d8bb9c 100644 --- a/arch/arm/mach-pxa/corgi.c +++ b/arch/arm/mach-pxa/corgi.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm/mach-pxa/corgi_lcd.c b/arch/arm/mach-pxa/corgi_lcd.c index c5efcd04fcbc..370df113dc06 100644 --- a/arch/arm/mach-pxa/corgi_lcd.c +++ b/arch/arm/mach-pxa/corgi_lcd.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -468,6 +467,7 @@ void corgi_put_hsync(void) { if (get_hsync_time) symbol_put(w100fb_get_hsynclen); + get_hsync_time = NULL; } void corgi_wait_hsync(void) @@ -477,20 +477,39 @@ void corgi_wait_hsync(void) #endif #ifdef CONFIG_PXA_SHARP_Cxx00 +static struct device *spitz_pxafb_dev; + +static int is_pxafb_device(struct device * dev, void * data) +{ + struct platform_device *pdev = container_of(dev, struct platform_device, dev); + + return (strncmp(pdev->name, "pxa2xx-fb", 9) == 0); +} + unsigned long spitz_get_hsync_len(void) { +#ifdef CONFIG_FB_PXA + if (!spitz_pxafb_dev) { + spitz_pxafb_dev = bus_find_device(&platform_bus_type, NULL, NULL, is_pxafb_device); + if (!spitz_pxafb_dev) + return 0; + } if (!get_hsync_time) get_hsync_time = symbol_get(pxafb_get_hsync_time); if (!get_hsync_time) +#endif return 0; - return pxafb_get_hsync_time(&pxafb_device.dev); + return pxafb_get_hsync_time(spitz_pxafb_dev); } void spitz_put_hsync(void) { + put_device(spitz_pxafb_dev); if (get_hsync_time) symbol_put(pxafb_get_hsync_time); + spitz_pxafb_dev = NULL; + get_hsync_time = NULL; } void spitz_wait_hsync(void) diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c index a45aaa115a76..1d7677669a76 100644 --- a/arch/arm/mach-pxa/generic.c +++ b/arch/arm/mach-pxa/generic.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "generic.h" @@ -207,6 +208,11 @@ static struct platform_device pxafb_device = { .resource = pxafb_resources, }; +void __init set_pxa_fb_parent(struct device *parent_dev) +{ + pxafb_device.dev.parent = parent_dev; +} + static struct platform_device ffuart_device = { .name = "pxa2xx-uart", .id = 0, @@ -244,6 +250,25 @@ void __init pxa_set_i2c_info(struct i2c_pxa_platform_data *info) i2c_device.dev.platform_data = info; } +static struct resource i2s_resources[] = { + { + .start = 0x40400000, + .end = 0x40400083, + .flags = IORESOURCE_MEM, + }, { + .start = IRQ_I2S, + .end = IRQ_I2S, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device i2s_device = { + .name = "pxa2xx-i2s", + .id = -1, + .resource = i2c_resources, + .num_resources = ARRAY_SIZE(i2s_resources), +}; + static struct platform_device *devices[] __initdata = { &pxamci_device, &udc_device, @@ -252,6 +277,7 @@ static struct platform_device *devices[] __initdata = { &btuart_device, &stuart_device, &i2c_device, + &i2s_device, }; static int __init pxa_init(void) diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index 568afe3d6e1a..d0ab428c2d7d 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c @@ -36,7 +36,6 @@ #include #include #include -#include #include #include #include @@ -304,7 +303,6 @@ static struct platform_device *devices[] __initdata = { &spitzkbd_device, &spitzts_device, &spitzbl_device, - &spitzbattery_device, }; static void __init common_init(void) @@ -328,7 +326,7 @@ static void __init common_init(void) platform_add_devices(devices, ARRAY_SIZE(devices)); pxa_set_mci_info(&spitz_mci_platform_data); - pxafb_device.dev.parent = &spitzssp_device.dev; + set_pxa_fb_parent(&spitzssp_device.dev); set_pxa_fb_info(&spitz_pxafb_info); } diff --git a/arch/arm/mach-s3c2410/Kconfig b/arch/arm/mach-s3c2410/Kconfig index 06807c6ee68a..c796bcdd6158 100644 --- a/arch/arm/mach-s3c2410/Kconfig +++ b/arch/arm/mach-s3c2410/Kconfig @@ -12,6 +12,7 @@ config MACH_ANUBIS config ARCH_BAST bool "Simtec Electronics BAST (EB2410ITX)" select CPU_S3C2410 + select ISA help Say Y here if you are using the Simtec Electronics EB2410ITX development board (also known as BAST) diff --git a/arch/arm/mach-s3c2410/clock.c b/arch/arm/mach-s3c2410/clock.c index f59608268751..8b3d5dc35de5 100644 --- a/arch/arm/mach-s3c2410/clock.c +++ b/arch/arm/mach-s3c2410/clock.c @@ -98,7 +98,10 @@ struct clk *clk_get(struct device *dev, const char *id) struct clk *clk = ERR_PTR(-ENOENT); int idno; - idno = (dev == NULL) ? -1 : to_platform_device(dev)->id; + if (dev == NULL || dev->bus != &platform_bus_type) + idno = -1; + else + idno = to_platform_device(dev)->id; down(&clocks_sem); diff --git a/arch/arm/mach-s3c2410/mach-anubis.c b/arch/arm/mach-s3c2410/mach-anubis.c index 7c05f27fe1d6..5ae80f4e3e67 100644 --- a/arch/arm/mach-s3c2410/mach-anubis.c +++ b/arch/arm/mach-s3c2410/mach-anubis.c @@ -125,7 +125,7 @@ static int external_map[] = { 2 }; static int chip0_map[] = { 0 }; static int chip1_map[] = { 1 }; -struct mtd_partition anubis_default_nand_part[] = { +static struct mtd_partition anubis_default_nand_part[] = { [0] = { .name = "Boot Agent", .size = SZ_16K, diff --git a/arch/arm/mach-s3c2410/mach-bast.c b/arch/arm/mach-s3c2410/mach-bast.c index ed1f07d7252f..7b51bfd0ba6d 100644 --- a/arch/arm/mach-s3c2410/mach-bast.c +++ b/arch/arm/mach-s3c2410/mach-bast.c @@ -230,7 +230,7 @@ static int chip0_map[] = { 1 }; static int chip1_map[] = { 2 }; static int chip2_map[] = { 3 }; -struct mtd_partition bast_default_nand_part[] = { +static struct mtd_partition bast_default_nand_part[] = { [0] = { .name = "Boot Agent", .size = SZ_16K, @@ -307,9 +307,9 @@ static void bast_nand_select(struct s3c2410_nand_set *set, int slot) } static struct s3c2410_platform_nand bast_nand_info = { - .tacls = 40, - .twrph0 = 80, - .twrph1 = 80, + .tacls = 30, + .twrph0 = 60, + .twrph1 = 60, .nr_sets = ARRAY_SIZE(bast_nand_sets), .sets = bast_nand_sets, .select_chip = bast_nand_select, @@ -340,7 +340,7 @@ static struct resource bast_dm9k_resource[] = { * better IO routines can be written and tested */ -struct dm9000_plat_data bast_dm9k_platdata = { +static struct dm9000_plat_data bast_dm9k_platdata = { .flags = DM9000_PLATF_16BITONLY }; diff --git a/arch/arm/mach-s3c2410/mach-vr1000.c b/arch/arm/mach-s3c2410/mach-vr1000.c index 663a7f98fc0b..46b259673c18 100644 --- a/arch/arm/mach-s3c2410/mach-vr1000.c +++ b/arch/arm/mach-s3c2410/mach-vr1000.c @@ -288,7 +288,7 @@ static struct resource vr1000_dm9k1_resource[] = { * better IO routines can be written and tested */ -struct dm9000_plat_data vr1000_dm9k_platdata = { +static struct dm9000_plat_data vr1000_dm9k_platdata = { .flags = DM9000_PLATF_16BITONLY, }; diff --git a/arch/arm/mach-s3c2410/s3c2410.c b/arch/arm/mach-s3c2410/s3c2410.c index 0b88993dfd27..a8bf5ec82602 100644 --- a/arch/arm/mach-s3c2410/s3c2410.c +++ b/arch/arm/mach-s3c2410/s3c2410.c @@ -125,9 +125,6 @@ static struct platform_device *uart_devices[] __initdata = { &s3c_uart2 }; -/* store our uart devices for the serial driver console */ -struct platform_device *s3c2410_uart_devices[3]; - static int s3c2410_uart_count = 0; /* uart registration process */ diff --git a/arch/arm/mach-s3c2410/s3c2440.c b/arch/arm/mach-s3c2410/s3c2440.c index d4c8281b55f6..833fa36bce05 100644 --- a/arch/arm/mach-s3c2410/s3c2440.c +++ b/arch/arm/mach-s3c2410/s3c2440.c @@ -151,7 +151,7 @@ void __init s3c2440_init_uarts(struct s3c2410_uartcfg *cfg, int no) #ifdef CONFIG_PM -struct sleep_save s3c2440_sleep[] = { +static struct sleep_save s3c2440_sleep[] = { SAVE_ITEM(S3C2440_DSC0), SAVE_ITEM(S3C2440_DSC1), SAVE_ITEM(S3C2440_GPJDAT), @@ -260,7 +260,7 @@ void __init s3c2440_init_clocks(int xtal) * as a driver which may support both 2410 and 2440 may try and use it. */ -int __init s3c2440_core_init(void) +static int __init s3c2440_core_init(void) { return sysdev_class_register(&s3c2440_sysclass); } diff --git a/arch/arm/mach-s3c2410/time.c b/arch/arm/mach-s3c2410/time.c index c0acfb2ad790..8a00e3c3cd08 100644 --- a/arch/arm/mach-s3c2410/time.c +++ b/arch/arm/mach-s3c2410/time.c @@ -38,6 +38,7 @@ #include #include "clock.h" +#include "cpu.h" static unsigned long timer_startval; static unsigned long timer_usec_ticks; diff --git a/arch/arm/mach-s3c2410/usb-simtec.c b/arch/arm/mach-s3c2410/usb-simtec.c index f021fd82be52..5098b50158a3 100644 --- a/arch/arm/mach-s3c2410/usb-simtec.c +++ b/arch/arm/mach-s3c2410/usb-simtec.c @@ -40,7 +40,6 @@ #include #include #include -#include #include "devs.h" #include "usb-simtec.h" diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c index 25d6a4e27533..6ecab7e2c238 100644 --- a/arch/arm/mach-sa1100/collie.c +++ b/arch/arm/mach-sa1100/collie.c @@ -111,11 +111,11 @@ static struct mtd_partition collie_partitions[] = { static void collie_set_vpp(int vpp) { - write_scoop_reg(&colliescoop_device.dev, SCOOP_GPCR, read_scoop_reg(SCOOP_GPCR) | COLLIE_SCP_VPEN); + write_scoop_reg(&colliescoop_device.dev, SCOOP_GPCR, read_scoop_reg(&colliescoop_device.dev, SCOOP_GPCR) | COLLIE_SCP_VPEN); if (vpp) - write_scoop_reg(&colliescoop_device.dev, SCOOP_GPWR, read_scoop_reg(SCOOP_GPWR) | COLLIE_SCP_VPEN); + write_scoop_reg(&colliescoop_device.dev, SCOOP_GPWR, read_scoop_reg(&colliescoop_device.dev, SCOOP_GPWR) | COLLIE_SCP_VPEN); else - write_scoop_reg(&colliescoop_device.dev, SCOOP_GPWR, read_scoop_reg(SCOOP_GPWR) & ~COLLIE_SCP_VPEN); + write_scoop_reg(&colliescoop_device.dev, SCOOP_GPWR, read_scoop_reg(&colliescoop_device.dev, SCOOP_GPWR) & ~COLLIE_SCP_VPEN); } static struct flash_platform_data collie_flash_data = { diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index 3c8862fde51a..a30e0451df72 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -52,8 +51,9 @@ * * Setup a VA for the Versatile Vectored Interrupt Controller. */ -#define VA_VIC_BASE IO_ADDRESS(VERSATILE_VIC_BASE) -#define VA_SIC_BASE IO_ADDRESS(VERSATILE_SIC_BASE) +#define __io_address(n) __io(IO_ADDRESS(n)) +#define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE) +#define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE) static void vic_mask_irq(unsigned int irq) { @@ -214,7 +214,7 @@ void __init versatile_map_io(void) iotable_init(versatile_io_desc, ARRAY_SIZE(versatile_io_desc)); } -#define VERSATILE_REFCOUNTER (IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_24MHz_OFFSET) +#define VERSATILE_REFCOUNTER (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_24MHz_OFFSET) /* * This is the Versatile sched_clock implementation. This has @@ -231,7 +231,7 @@ unsigned long long sched_clock(void) } -#define VERSATILE_FLASHCTRL (IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_FLASH_OFFSET) +#define VERSATILE_FLASHCTRL (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_FLASH_OFFSET) static int versatile_flash_init(void) { @@ -309,7 +309,7 @@ static struct platform_device smc91x_device = { .resource = smc91x_resources, }; -#define VERSATILE_SYSMCI (IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_MCI_OFFSET) +#define VERSATILE_SYSMCI (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_MCI_OFFSET) unsigned int mmc_status(struct device *dev) { @@ -343,11 +343,11 @@ static const struct icst307_params versatile_oscvco_params = { static void versatile_oscvco_set(struct clk *clk, struct icst307_vco vco) { - unsigned long sys_lock = IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_LOCK_OFFSET; + void __iomem *sys_lock = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_LOCK_OFFSET; #if defined(CONFIG_ARCH_VERSATILE_PB) - unsigned long sys_osc = IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_OSC4_OFFSET; + void __iomem *sys_osc = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_OSC4_OFFSET; #elif defined(CONFIG_MACH_VERSATILE_AB) - unsigned long sys_osc = IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_OSC1_OFFSET; + void __iomem *sys_osc = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_OSC1_OFFSET; #endif u32 val; @@ -483,7 +483,7 @@ static struct clcd_panel epson_2_2_in = { */ static struct clcd_panel *versatile_clcd_panel(void) { - unsigned long sys_clcd = IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_CLCD_OFFSET; + void __iomem *sys_clcd = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_CLCD_OFFSET; struct clcd_panel *panel = &vga; u32 val; @@ -510,7 +510,7 @@ static struct clcd_panel *versatile_clcd_panel(void) */ static void versatile_clcd_disable(struct clcd_fb *fb) { - unsigned long sys_clcd = IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_CLCD_OFFSET; + void __iomem *sys_clcd = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_CLCD_OFFSET; u32 val; val = readl(sys_clcd); @@ -522,7 +522,7 @@ static void versatile_clcd_disable(struct clcd_fb *fb) * If the LCD is Sanyo 2x5 in on the IB2 board, turn the back-light off */ if (fb->panel == &sanyo_2_5_in) { - unsigned long versatile_ib2_ctrl = IO_ADDRESS(VERSATILE_IB2_CTRL); + void __iomem *versatile_ib2_ctrl = __io_address(VERSATILE_IB2_CTRL); unsigned long ctrl; ctrl = readl(versatile_ib2_ctrl); @@ -537,7 +537,7 @@ static void versatile_clcd_disable(struct clcd_fb *fb) */ static void versatile_clcd_enable(struct clcd_fb *fb) { - unsigned long sys_clcd = IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_CLCD_OFFSET; + void __iomem *sys_clcd = __io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_CLCD_OFFSET; u32 val; val = readl(sys_clcd); @@ -571,7 +571,7 @@ static void versatile_clcd_enable(struct clcd_fb *fb) * If the LCD is Sanyo 2x5 in on the IB2 board, turn the back-light on */ if (fb->panel == &sanyo_2_5_in) { - unsigned long versatile_ib2_ctrl = IO_ADDRESS(VERSATILE_IB2_CTRL); + void __iomem *versatile_ib2_ctrl = __io_address(VERSATILE_IB2_CTRL); unsigned long ctrl; ctrl = readl(versatile_ib2_ctrl); @@ -720,7 +720,7 @@ static struct amba_device *amba_devs[] __initdata = { }; #ifdef CONFIG_LEDS -#define VA_LEDS_BASE (IO_ADDRESS(VERSATILE_SYS_BASE) + VERSATILE_SYS_LED_OFFSET) +#define VA_LEDS_BASE (__io_address(VERSATILE_SYS_BASE) + VERSATILE_SYS_LED_OFFSET) static void versatile_leds_event(led_event_t ledevt) { @@ -778,11 +778,11 @@ void __init versatile_init(void) /* * Where is the timer (VA)? */ -#define TIMER0_VA_BASE IO_ADDRESS(VERSATILE_TIMER0_1_BASE) -#define TIMER1_VA_BASE (IO_ADDRESS(VERSATILE_TIMER0_1_BASE) + 0x20) -#define TIMER2_VA_BASE IO_ADDRESS(VERSATILE_TIMER2_3_BASE) -#define TIMER3_VA_BASE (IO_ADDRESS(VERSATILE_TIMER2_3_BASE) + 0x20) -#define VA_IC_BASE IO_ADDRESS(VERSATILE_VIC_BASE) +#define TIMER0_VA_BASE __io_address(VERSATILE_TIMER0_1_BASE) +#define TIMER1_VA_BASE (__io_address(VERSATILE_TIMER0_1_BASE) + 0x20) +#define TIMER2_VA_BASE __io_address(VERSATILE_TIMER2_3_BASE) +#define TIMER3_VA_BASE (__io_address(VERSATILE_TIMER2_3_BASE) + 0x20) +#define VA_IC_BASE __io_address(VERSATILE_VIC_BASE) /* * How long is the timer interval? @@ -877,12 +877,12 @@ static void __init versatile_timer_init(void) * VERSATILE_REFCLK is 32KHz * VERSATILE_TIMCLK is 1MHz */ - val = readl(IO_ADDRESS(VERSATILE_SCTL_BASE)); + val = readl(__io_address(VERSATILE_SCTL_BASE)); writel((VERSATILE_TIMCLK << VERSATILE_TIMER1_EnSel) | (VERSATILE_TIMCLK << VERSATILE_TIMER2_EnSel) | (VERSATILE_TIMCLK << VERSATILE_TIMER3_EnSel) | (VERSATILE_TIMCLK << VERSATILE_TIMER4_EnSel) | val, - IO_ADDRESS(VERSATILE_SCTL_BASE)); + __io_address(VERSATILE_SCTL_BASE)); /* * Initialise to a known state (all timers off) diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c index d1565e851f0e..b80d57d51699 100644 --- a/arch/arm/mach-versatile/pci.c +++ b/arch/arm/mach-versatile/pci.c @@ -29,7 +29,6 @@ #include #include #include -#include /* * these spaces are mapped using the following base registers: diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index db5e47dfc303..c54e04c995ee 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -370,21 +370,21 @@ config CPU_BIG_ENDIAN config CPU_ICACHE_DISABLE bool "Disable I-Cache" - depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 + depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6 help Say Y here to disable the processor instruction cache. Unless you have a reason not to or are unsure, say N. config CPU_DCACHE_DISABLE bool "Disable D-Cache" - depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 + depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6 help Say Y here to disable the processor data cache. Unless you have a reason not to or are unsure, say N. config CPU_DCACHE_WRITETHROUGH bool "Force write through D-cache" - depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020) && !CPU_DCACHE_DISABLE + depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6) && !CPU_DCACHE_DISABLE default y if CPU_ARM925T help Say Y here to use the data cache in writethrough mode. Unless you @@ -399,7 +399,7 @@ config CPU_CACHE_ROUND_ROBIN config CPU_BPREDICT_DISABLE bool "Disable branch prediction" - depends on CPU_ARM1020 + depends on CPU_ARM1020 || CPU_V6 help Say Y here to disable branch prediction. If unsure, say N. diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 8f76f3df7b4c..dbd346033122 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -20,6 +20,11 @@ */ .align 5 ENTRY(v6_early_abort) +#ifdef CONFIG_CPU_MPCORE + clrex +#else + strex r0, r1, [sp] @ Clear the exclusive monitor +#endif mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR /* diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 4b39d867ac14..705c98921c37 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -111,7 +111,7 @@ proc_alignment_read(char *page, char **start, off_t off, int count, int *eof, } static int proc_alignment_write(struct file *file, const char __user *buffer, - unsigned long count, void *data) + unsigned long count, void *data) { char mode; @@ -119,7 +119,7 @@ static int proc_alignment_write(struct file *file, const char __user *buffer, if (get_user(mode, buffer)) return -EFAULT; if (mode >= '0' && mode <= '5') - ai_usermode = mode - '0'; + ai_usermode = mode - '0'; } return count; } @@ -262,7 +262,7 @@ union offset_union { goto fault; \ } while (0) -#define put32_unaligned_check(val,addr) \ +#define put32_unaligned_check(val,addr) \ __put32_unaligned_check("strb", val, addr) #define put32t_unaligned_check(val,addr) \ @@ -306,19 +306,19 @@ do_alignment_ldrhstrh(unsigned long addr, unsigned long instr, struct pt_regs *r return TYPE_LDST; user: - if (LDST_L_BIT(instr)) { - unsigned long val; - get16t_unaligned_check(val, addr); + if (LDST_L_BIT(instr)) { + unsigned long val; + get16t_unaligned_check(val, addr); - /* signed half-word? */ - if (instr & 0x40) - val = (signed long)((signed short) val); + /* signed half-word? */ + if (instr & 0x40) + val = (signed long)((signed short) val); - regs->uregs[rd] = val; - } else - put16t_unaligned_check(regs->uregs[rd], addr); + regs->uregs[rd] = val; + } else + put16t_unaligned_check(regs->uregs[rd], addr); - return TYPE_LDST; + return TYPE_LDST; fault: return TYPE_FAULT; @@ -330,6 +330,9 @@ do_alignment_ldrdstrd(unsigned long addr, unsigned long instr, { unsigned int rd = RD_BITS(instr); + if (((rd & 1) == 1) || (rd == 14)) + goto bad; + ai_dword += 1; if (user_mode(regs)) @@ -339,11 +342,11 @@ do_alignment_ldrdstrd(unsigned long addr, unsigned long instr, unsigned long val; get32_unaligned_check(val, addr); regs->uregs[rd] = val; - get32_unaligned_check(val, addr+4); - regs->uregs[rd+1] = val; + get32_unaligned_check(val, addr + 4); + regs->uregs[rd + 1] = val; } else { put32_unaligned_check(regs->uregs[rd], addr); - put32_unaligned_check(regs->uregs[rd+1], addr+4); + put32_unaligned_check(regs->uregs[rd + 1], addr + 4); } return TYPE_LDST; @@ -353,15 +356,16 @@ do_alignment_ldrdstrd(unsigned long addr, unsigned long instr, unsigned long val; get32t_unaligned_check(val, addr); regs->uregs[rd] = val; - get32t_unaligned_check(val, addr+4); - regs->uregs[rd+1] = val; + get32t_unaligned_check(val, addr + 4); + regs->uregs[rd + 1] = val; } else { put32t_unaligned_check(regs->uregs[rd], addr); - put32t_unaligned_check(regs->uregs[rd+1], addr+4); + put32t_unaligned_check(regs->uregs[rd + 1], addr + 4); } return TYPE_LDST; - + bad: + return TYPE_ERROR; fault: return TYPE_FAULT; } @@ -439,7 +443,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg if (LDST_P_EQ_U(instr)) /* U = P */ eaddr += 4; - /* + /* * For alignment faults on the ARM922T/ARM920T the MMU makes * the FSR (and hence addr) equal to the updated base address * of the multiple access rather than the restored value. @@ -566,7 +570,7 @@ thumb2arm(u16 tinstr) /* 6.5.1 Format 3: */ case 0x4800 >> 11: /* 7.1.28 LDR(3) */ /* NOTE: This case is not technically possible. We're - * loading 32-bit memory data via PC relative + * loading 32-bit memory data via PC relative * addressing mode. So we can and should eliminate * this case. But I'll leave it here for now. */ @@ -638,7 +642,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (fault) { type = TYPE_FAULT; - goto bad_or_fault; + goto bad_or_fault; } if (user_mode(regs)) @@ -663,6 +667,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) else if ((instr & 0x001000f0) == 0x000000d0 || /* LDRD */ (instr & 0x001000f0) == 0x000000f0) /* STRD */ handler = do_alignment_ldrdstrd; + else if ((instr & 0x01f00ff0) == 0x01000090) /* SWP */ + goto swp; else goto bad; break; @@ -733,6 +739,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) do_bad_area(current, current->mm, addr, fsr, regs); return 0; + swp: + printk(KERN_ERR "Alignment trap: not handling swp instruction\n"); + bad: /* * Oops, we didn't handle the instruction. diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 85c10a71e7c6..72966d90e956 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -18,6 +18,7 @@ #define HARVARD_CACHE #define CACHE_LINE_SIZE 32 #define D_CACHE_LINE_SIZE 32 +#define BTB_FLUSH_SIZE 8 /* * v6_flush_cache_all() @@ -98,7 +99,13 @@ ENTRY(v6_coherent_user_range) mcr p15, 0, r0, c7, c5, 1 @ invalidate I line #endif mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry - add r0, r0, #CACHE_LINE_SIZE + add r0, r0, #BTB_FLUSH_SIZE + mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry + add r0, r0, #BTB_FLUSH_SIZE + mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry + add r0, r0, #BTB_FLUSH_SIZE + mcr p15, 0, r0, c7, c5, 7 @ invalidate BTB entry + add r0, r0, #BTB_FLUSH_SIZE cmp r0, r1 blo 1b #ifdef HARVARD_CACHE diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c index 26356ce4da54..82f4d5e27c54 100644 --- a/arch/arm/mm/consistent.c +++ b/arch/arm/mm/consistent.c @@ -75,7 +75,7 @@ static struct vm_region consistent_head = { }; static struct vm_region * -vm_region_alloc(struct vm_region *head, size_t size, int gfp) +vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) { unsigned long addr = head->vm_start, end = head->vm_end - size; unsigned long flags; @@ -133,7 +133,7 @@ static struct vm_region *vm_region_find(struct vm_region *head, unsigned long ad #endif static void * -__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, int gfp, +__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, pgprot_t prot) { struct page *page; @@ -251,7 +251,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, int gfp, * virtual and bus address for that space. */ void * -dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp) +dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) { return __dma_alloc(dev, size, handle, gfp, pgprot_noncached(pgprot_kernel)); @@ -263,7 +263,7 @@ EXPORT_SYMBOL(dma_alloc_coherent); * dma_alloc_coherent above. */ void * -dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int gfp) +dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) { return __dma_alloc(dev, size, handle, gfp, pgprot_writecombine(pgprot_kernel)); diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index b0208c992576..c9a03981b785 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -17,6 +17,24 @@ #ifdef CONFIG_CPU_CACHE_VIPT +#define ALIAS_FLUSH_START 0xffff4000 + +#define TOP_PTE(x) pte_offset_kernel(top_pmd, x) + +static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) +{ + unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); + + set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL)); + flush_tlb_kernel_page(to); + + asm( "mcrr p15, 0, %1, %0, c14\n" + " mcrr p15, 0, %1, %0, c5\n" + : + : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES) + : "cc"); +} + void flush_cache_mm(struct mm_struct *mm) { if (cache_is_vivt()) { @@ -67,24 +85,6 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig if (cache_is_vipt_aliasing()) flush_pfn_alias(pfn, user_addr); } - -#define ALIAS_FLUSH_START 0xffff4000 - -#define TOP_PTE(x) pte_offset_kernel(top_pmd, x) - -static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) -{ - unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); - - set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL)); - flush_tlb_kernel_page(to); - - asm( "mcrr p15, 0, %1, %0, c14\n" - " mcrr p15, 0, %1, %0, c5\n" - : - : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES) - : "cc"); -} #else #define flush_pfn_alias(pfn,vaddr) do { } while (0) #endif diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index caf3b19b167f..9bb5fff406fb 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -55,7 +55,14 @@ ENTRY(cpu_v6_proc_init) mov pc, lr ENTRY(cpu_v6_proc_fin) - mov pc, lr + stmfd sp!, {lr} + cpsid if @ disable interrupts + bl v6_flush_kern_cache_all + mrc p15, 0, r0, c1, c0, 0 @ ctrl register + bic r0, r0, #0x1000 @ ...i............ + bic r0, r0, #0x0006 @ .............ca. + mcr p15, 0, r0, c1, c0, 0 @ disable caches + ldmfd sp!, {pc} /* * cpu_v6_reset(loc) diff --git a/arch/arm/nwfpe/fpa11.c b/arch/arm/nwfpe/fpa11.c index 7690f731ee87..7b3d74d73c80 100644 --- a/arch/arm/nwfpe/fpa11.c +++ b/arch/arm/nwfpe/fpa11.c @@ -31,11 +31,6 @@ #include #include -/* forward declarations */ -unsigned int EmulateCPDO(const unsigned int); -unsigned int EmulateCPDT(const unsigned int); -unsigned int EmulateCPRT(const unsigned int); - /* Reset the FPA11 chip. Called to initialize and reset the emulator. */ static void resetFPA11(void) { diff --git a/arch/arm/nwfpe/fpa11.h b/arch/arm/nwfpe/fpa11.h index 93523ae4b7a1..9677ae8448e8 100644 --- a/arch/arm/nwfpe/fpa11.h +++ b/arch/arm/nwfpe/fpa11.h @@ -95,4 +95,24 @@ extern int8 SetRoundingMode(const unsigned int); extern int8 SetRoundingPrecision(const unsigned int); extern void nwfpe_init_fpa(union fp_state *fp); +extern unsigned int EmulateAll(unsigned int opcode); + +extern unsigned int EmulateCPDT(const unsigned int opcode); +extern unsigned int EmulateCPDO(const unsigned int opcode); +extern unsigned int EmulateCPRT(const unsigned int opcode); + +/* fpa11_cpdt.c */ +extern unsigned int PerformLDF(const unsigned int opcode); +extern unsigned int PerformSTF(const unsigned int opcode); +extern unsigned int PerformLFM(const unsigned int opcode); +extern unsigned int PerformSFM(const unsigned int opcode); + +/* single_cpdo.c */ + +extern unsigned int SingleCPDO(struct roundingData *roundData, + const unsigned int opcode, FPREG * rFd); +/* double_cpdo.c */ +extern unsigned int DoubleCPDO(struct roundingData *roundData, + const unsigned int opcode, FPREG * rFd); + #endif diff --git a/arch/arm/nwfpe/fpa11_cprt.c b/arch/arm/nwfpe/fpa11_cprt.c index adf8d3000540..7c67023655e4 100644 --- a/arch/arm/nwfpe/fpa11_cprt.c +++ b/arch/arm/nwfpe/fpa11_cprt.c @@ -26,12 +26,11 @@ #include "fpa11.inl" #include "fpmodule.h" #include "fpmodule.inl" +#include "softfloat.h" #ifdef CONFIG_FPE_NWFPE_XP extern flag floatx80_is_nan(floatx80); #endif -extern flag float64_is_nan(float64); -extern flag float32_is_nan(float32); unsigned int PerformFLT(const unsigned int opcode); unsigned int PerformFIX(const unsigned int opcode); diff --git a/arch/arm/nwfpe/fpopcode.h b/arch/arm/nwfpe/fpopcode.h index 1777e92a88e6..6528e081c83f 100644 --- a/arch/arm/nwfpe/fpopcode.h +++ b/arch/arm/nwfpe/fpopcode.h @@ -476,4 +476,10 @@ static inline unsigned int getDestinationSize(const unsigned int opcode) return (nRc); } +extern unsigned int checkCondition(const unsigned int opcode, + const unsigned int ccodes); + +extern const float64 float64Constant[]; +extern const float32 float32Constant[]; + #endif diff --git a/arch/arm/nwfpe/softfloat.h b/arch/arm/nwfpe/softfloat.h index 1c8799b9ee4d..14151700b6b2 100644 --- a/arch/arm/nwfpe/softfloat.h +++ b/arch/arm/nwfpe/softfloat.h @@ -265,4 +265,7 @@ static inline flag float64_lt_nocheck(float64 a, float64 b) return (a != b) && (aSign ^ (a < b)); } +extern flag float32_is_nan( float32 a ); +extern flag float64_is_nan( float64 a ); + #endif diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c index 6cb20aea7f51..02bcc6c1cd1b 100644 --- a/arch/arm/plat-omap/common.c +++ b/arch/arm/plat-omap/common.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include diff --git a/arch/arm/plat-omap/cpu-omap.c b/arch/arm/plat-omap/cpu-omap.c index 409aac2c4b9d..fd894bb00107 100644 --- a/arch/arm/plat-omap/cpu-omap.c +++ b/arch/arm/plat-omap/cpu-omap.c @@ -21,7 +21,6 @@ #include #include -#include #include #include diff --git a/arch/arm/plat-omap/usb.c b/arch/arm/plat-omap/usb.c index 98f1c76f8660..14a836d7ac25 100644 --- a/arch/arm/plat-omap/usb.c +++ b/arch/arm/plat-omap/usb.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index 6d3a79e5fef8..ae7c64b8cec3 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types @@ -2,11 +2,17 @@ # # This file is linux/arch/arm/tools/mach-types # +# Up to date versions of this file can be obtained from: +# +# http://www.arm.linux.org.uk/developer/machines/?action=download +# # Please do not send patches to this file; it is automatically generated! # To add an entry into this database, please see Documentation/arm/README, -# or contact rmk@arm.linux.org.uk +# or visit: # -# Last update: Thu Jun 23 20:19:33 2005 +# http://www.arm.linux.org.uk/developer/machines/?action=new +# +# Last update: Mon Oct 10 09:46:25 2005 # # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number # @@ -421,7 +427,7 @@ mt02 MACH_MT02 MT02 410 mport3s MACH_MPORT3S MPORT3S 411 ra_alpha MACH_RA_ALPHA RA_ALPHA 412 xcep MACH_XCEP XCEP 413 -arcom_mercury MACH_ARCOM_MERCURY ARCOM_MERCURY 414 +arcom_vulcan MACH_ARCOM_VULCAN ARCOM_VULCAN 414 stargate MACH_STARGATE STARGATE 415 armadilloj MACH_ARMADILLOJ ARMADILLOJ 416 elroy_jack MACH_ELROY_JACK ELROY_JACK 417 @@ -454,7 +460,7 @@ esl_sarva MACH_ESL_SARVA ESL_SARVA 443 xm250 MACH_XM250 XM250 444 t6tc1xb MACH_T6TC1XB T6TC1XB 445 ess710 MACH_ESS710 ESS710 446 -mx3ads MACH_MX3ADS MX3ADS 447 +mx31ads MACH_MX3ADS MX3ADS 447 himalaya MACH_HIMALAYA HIMALAYA 448 bolfenk MACH_BOLFENK BOLFENK 449 at91rm9200kr MACH_AT91RM9200KR AT91RM9200KR 450 @@ -787,3 +793,79 @@ ez_ixp42x MACH_EZ_IXP42X EZ_IXP42X 778 tapwave_zodiac MACH_TAPWAVE_ZODIAC TAPWAVE_ZODIAC 779 universalmeter MACH_UNIVERSALMETER UNIVERSALMETER 780 hicoarm9 MACH_HICOARM9 HICOARM9 781 +pnx4008 MACH_PNX4008 PNX4008 782 +kws6000 MACH_KWS6000 KWS6000 783 +portux920t MACH_PORTUX920T PORTUX920T 784 +ez_x5 MACH_EZ_X5 EZ_X5 785 +omap_rudolph MACH_OMAP_RUDOLPH OMAP_RUDOLPH 786 +cpuat91 MACH_CPUAT91 CPUAT91 787 +rea9200 MACH_REA9200 REA9200 788 +acts_pune_sa1110 MACH_ACTS_PUNE_SA1110 ACTS_PUNE_SA1110 789 +ixp425 MACH_IXP425 IXP425 790 +argonplusodyssey MACH_ODYSSEY ODYSSEY 791 +perch MACH_PERCH PERCH 792 +eis05r1 MACH_EIS05R1 EIS05R1 793 +pepperpad MACH_PEPPERPAD PEPPERPAD 794 +sb3010 MACH_SB3010 SB3010 795 +rm9200 MACH_RM9200 RM9200 796 +dma03 MACH_DMA03 DMA03 797 +road_s101 MACH_ROAD_S101 ROAD_S101 798 +iq_nextgen_a MACH_IQ_NEXTGEN_A IQ_NEXTGEN_A 799 +iq_nextgen_b MACH_IQ_NEXTGEN_B IQ_NEXTGEN_B 800 +iq_nextgen_c MACH_IQ_NEXTGEN_C IQ_NEXTGEN_C 801 +iq_nextgen_d MACH_IQ_NEXTGEN_D IQ_NEXTGEN_D 802 +iq_nextgen_e MACH_IQ_NEXTGEN_E IQ_NEXTGEN_E 803 +mallow_at91 MACH_MALLOW_AT91 MALLOW_AT91 804 +cybertracker MACH_CYBERTRACKER CYBERTRACKER 805 +gesbc931x MACH_GESBC931X GESBC931X 806 +centipad MACH_CENTIPAD CENTIPAD 807 +armsoc MACH_ARMSOC ARMSOC 808 +se4200 MACH_SE4200 SE4200 809 +ems197a MACH_EMS197A EMS197A 810 +micro9 MACH_MICRO9 MICRO9 811 +micro9l MACH_MICRO9L MICRO9L 812 +uc5471dsp MACH_UC5471DSP UC5471DSP 813 +sj5471eng MACH_SJ5471ENG SJ5471ENG 814 +none MACH_CMPXA26X CMPXA26X 815 +nc MACH_NC NC 816 +omap_palmte MACH_OMAP_PALMTE OMAP_PALMTE 817 +ajax52x MACH_AJAX52X AJAX52X 818 +siriustar MACH_SIRIUSTAR SIRIUSTAR 819 +iodata_hdlg MACH_IODATA_HDLG IODATA_HDLG 820 +at91rm9200utl MACH_AT91RM9200UTL AT91RM9200UTL 821 +biosafe MACH_BIOSAFE BIOSAFE 822 +mp1000 MACH_MP1000 MP1000 823 +parsy MACH_PARSY PARSY 824 +ccxp270 MACH_CCXP CCXP 825 +omap_gsample MACH_OMAP_GSAMPLE OMAP_GSAMPLE 826 +realview_eb MACH_REALVIEW_EB REALVIEW_EB 827 +samoa MACH_SAMOA SAMOA 828 +t3xscale MACH_T3XSCALE T3XSCALE 829 +i878 MACH_I878 I878 830 +borzoi MACH_BORZOI BORZOI 831 +gecko MACH_GECKO GECKO 832 +ds101 MACH_DS101 DS101 833 +omap_palmtt2 MACH_OMAP_PALMTT2 OMAP_PALMTT2 834 +xscale_palmld MACH_XSCALE_PALMLD XSCALE_PALMLD 835 +cc9c MACH_CC9C CC9C 836 +sbc1670 MACH_SBC1670 SBC1670 837 +ixdp28x5 MACH_IXDP28X5 IXDP28X5 838 +omap_palmtt MACH_OMAP_PALMTT OMAP_PALMTT 839 +ml696k MACH_ML696K ML696K 840 +arcom_zeus MACH_ARCOM_ZEUS ARCOM_ZEUS 841 +osiris MACH_OSIRIS OSIRIS 842 +maestro MACH_MAESTRO MAESTRO 843 +tunge2 MACH_TUNGE2 TUNGE2 844 +ixbbm MACH_IXBBM IXBBM 845 +mx27 MACH_MX27 MX27 846 +ax8004 MACH_AX8004 AX8004 847 +at91sam9261ek MACH_AT91SAM9261EK AT91SAM9261EK 848 +loft MACH_LOFT LOFT 849 +magpie MACH_MAGPIE MAGPIE 850 +mx21 MACH_MX21 MX21 851 +mb87m3400 MACH_MB87M3400 MB87M3400 852 +mguard_delta MACH_MGUARD_DELTA MGUARD_DELTA 853 +davinci_dvdp MACH_DAVINCI_DVDP DAVINCI_DVDP 854 +htcuniversal MACH_HTCUNIVERSAL HTCUNIVERSAL 855 +tpad MACH_TPAD TPAD 856 +roverp3 MACH_ROVERP3 ROVERP3 857 diff --git a/arch/cris/arch-v32/drivers/pci/dma.c b/arch/cris/arch-v32/drivers/pci/dma.c index 10329306d23c..426b09878a05 100644 --- a/arch/cris/arch-v32/drivers/pci/dma.c +++ b/arch/cris/arch-v32/drivers/pci/dma.c @@ -24,7 +24,7 @@ struct dma_coherent_mem { }; void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, unsigned int __nocast gfp) + dma_addr_t *dma_handle, gfp_t gfp) { void *ret; struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c index 2c5cae04a95c..957f551ba5ce 100644 --- a/arch/cris/arch-v32/kernel/smp.c +++ b/arch/cris/arch-v32/kernel/smp.c @@ -15,6 +15,7 @@ #include #include #include +#include #define IPI_SCHEDULE 1 #define IPI_CALL 2 @@ -28,6 +29,7 @@ spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED}; /* CPU masks */ cpumask_t cpu_online_map = CPU_MASK_NONE; cpumask_t phys_cpu_present_map = CPU_MASK_NONE; +EXPORT_SYMBOL(phys_cpu_present_map); /* Variables used during SMP boot */ volatile int cpu_now_booting = 0; diff --git a/arch/frv/mb93090-mb00/pci-dma-nommu.c b/arch/frv/mb93090-mb00/pci-dma-nommu.c index 819895cf0b9e..2082a9647f4f 100644 --- a/arch/frv/mb93090-mb00/pci-dma-nommu.c +++ b/arch/frv/mb93090-mb00/pci-dma-nommu.c @@ -33,7 +33,7 @@ struct dma_alloc_record { static DEFINE_SPINLOCK(dma_alloc_lock); static LIST_HEAD(dma_alloc_list); -void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, int gfp) +void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { struct dma_alloc_record *new; struct list_head *this = &dma_alloc_list; diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c index 27eb12066507..86fbdadc51b6 100644 --- a/arch/frv/mb93090-mb00/pci-dma.c +++ b/arch/frv/mb93090-mb00/pci-dma.c @@ -17,7 +17,7 @@ #include #include -void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, int gfp) +void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { void *ret; diff --git a/arch/frv/mm/dma-alloc.c b/arch/frv/mm/dma-alloc.c index 4b38d45435f6..cfc4f97490c6 100644 --- a/arch/frv/mm/dma-alloc.c +++ b/arch/frv/mm/dma-alloc.c @@ -81,7 +81,7 @@ static int map_page(unsigned long va, unsigned long pa, pgprot_t prot) * portions of the kernel with single large page TLB entries, and * still get unique uncached pages for consistent DMA. */ -void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) +void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle) { struct vm_struct *area; unsigned long page, va, pa; diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index a63351c085c6..b66c13c0cc0f 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c @@ -27,15 +27,14 @@ #include #include #include -#include #include #include +#include #include #include #include #include -#include #include #ifdef CONFIG_X86_64 diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index a22a866de8f9..5546ddebec33 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c @@ -18,7 +18,6 @@ #include #include -#include #include #include #include diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c index 73aeaf5a9d4e..53a1681cd964 100644 --- a/arch/i386/kernel/cpu/amd.c +++ b/arch/i386/kernel/cpu/amd.c @@ -28,6 +28,22 @@ static void __init init_amd(struct cpuinfo_x86 *c) int mbytes = num_physpages >> (20-PAGE_SHIFT); int r; +#ifdef CONFIG_SMP + unsigned long long value; + + /* Disable TLB flush filter by setting HWCR.FFDIS on K8 + * bit 6 of msr C001_0015 + * + * Errata 63 for SH-B3 steppings + * Errata 122 for all steppings (F+ have it disabled by default) + */ + if (c->x86 == 15) { + rdmsrl(MSR_K7_HWCR, value); + value |= 1 << 6; + wrmsrl(MSR_K7_HWCR, value); + } +#endif + /* * FIXME: We should handle the K5 here. Set up the write * range and also turn on MSR 83 bits 4 and 31 (write alloc, diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index ab6e0611303d..58ca98fdc2ca 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c @@ -44,7 +44,7 @@ #define PFX "powernow-k8: " #define BFX PFX "BIOS error: " -#define VERSION "version 1.50.3" +#define VERSION "version 1.50.4" #include "powernow-k8.h" /* serialize freq changes */ @@ -111,8 +111,8 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data) u32 i = 0; do { - if (i++ > 0x1000000) { - printk(KERN_ERR PFX "detected change pending stuck\n"); + if (i++ > 10000) { + dprintk("detected change pending stuck\n"); return 1; } rdmsr(MSR_FIDVID_STATUS, lo, hi); @@ -159,6 +159,7 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid) { u32 lo; u32 savevid = data->currvid; + u32 i = 0; if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) { printk(KERN_ERR PFX "internal error - overflow on fid write\n"); @@ -170,10 +171,13 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid) dprintk("writing fid 0x%x, lo 0x%x, hi 0x%x\n", fid, lo, data->plllock * PLL_LOCK_CONVERSION); - wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION); - - if (query_current_values_with_pending_wait(data)) - return 1; + do { + wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION); + if (i++ > 100) { + printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n"); + return 1; + } + } while (query_current_values_with_pending_wait(data)); count_off_irt(data); @@ -197,6 +201,7 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid) { u32 lo; u32 savefid = data->currfid; + int i = 0; if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) { printk(KERN_ERR PFX "internal error - overflow on vid write\n"); @@ -208,10 +213,13 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid) dprintk("writing vid 0x%x, lo 0x%x, hi 0x%x\n", vid, lo, STOP_GRANT_5NS); - wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS); - - if (query_current_values_with_pending_wait(data)) - return 1; + do { + wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS); + if (i++ > 100) { + printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n"); + return 1; + } + } while (query_current_values_with_pending_wait(data)); if (savefid != data->currfid) { printk(KERN_ERR PFX "fid changed on vid trans, old 0x%x new 0x%x\n", diff --git a/arch/i386/kernel/cpu/mcheck/k7.c b/arch/i386/kernel/cpu/mcheck/k7.c index c4abe7657397..7c6b9c73522f 100644 --- a/arch/i386/kernel/cpu/mcheck/k7.c +++ b/arch/i386/kernel/cpu/mcheck/k7.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include diff --git a/arch/i386/kernel/cpu/mcheck/non-fatal.c b/arch/i386/kernel/cpu/mcheck/non-fatal.c index 7864ddfccf07..82dffe0d4954 100644 --- a/arch/i386/kernel/cpu/mcheck/non-fatal.c +++ b/arch/i386/kernel/cpu/mcheck/non-fatal.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/i386/kernel/cpu/mcheck/p4.c b/arch/i386/kernel/cpu/mcheck/p4.c index 0abccb6fdf9e..1d1e885f500a 100644 --- a/arch/i386/kernel/cpu/mcheck/p4.c +++ b/arch/i386/kernel/cpu/mcheck/p4.c @@ -6,7 +6,6 @@ #include #include #include -#include #include #include diff --git a/arch/i386/kernel/cpu/mcheck/p5.c b/arch/i386/kernel/cpu/mcheck/p5.c index ec0614cd2925..3a2e24baddc7 100644 --- a/arch/i386/kernel/cpu/mcheck/p5.c +++ b/arch/i386/kernel/cpu/mcheck/p5.c @@ -6,7 +6,6 @@ #include #include #include -#include #include #include diff --git a/arch/i386/kernel/cpu/mcheck/p6.c b/arch/i386/kernel/cpu/mcheck/p6.c index f01b73f947e1..3c035b8fa3d9 100644 --- a/arch/i386/kernel/cpu/mcheck/p6.c +++ b/arch/i386/kernel/cpu/mcheck/p6.c @@ -6,7 +6,6 @@ #include #include #include -#include #include #include diff --git a/arch/i386/kernel/cpu/mcheck/winchip.c b/arch/i386/kernel/cpu/mcheck/winchip.c index 7bae68fa168f..5b9d2dd411d3 100644 --- a/arch/i386/kernel/cpu/mcheck/winchip.c +++ b/arch/i386/kernel/cpu/mcheck/winchip.c @@ -6,7 +6,6 @@ #include #include #include -#include #include #include diff --git a/arch/i386/kernel/crash.c b/arch/i386/kernel/crash.c index 913be77bb844..0248e084017c 100644 --- a/arch/i386/kernel/crash.c +++ b/arch/i386/kernel/crash.c @@ -11,10 +11,8 @@ #include #include #include -#include #include #include -#include #include #include #include diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c index 178f4e9bac9d..323ef8ab3244 100644 --- a/arch/i386/kernel/i8259.c +++ b/arch/i386/kernel/i8259.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -25,8 +24,6 @@ #include #include -#include - #include /* diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index 378313b0cce9..fb3991e8229e 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c @@ -21,7 +21,6 @@ */ #include -#include #include #include #include diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c index 15949fd08109..27aabfceb67e 100644 --- a/arch/i386/kernel/mpparse.c +++ b/arch/i386/kernel/mpparse.c @@ -14,7 +14,6 @@ */ #include -#include #include #include #include diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 0178457db721..72515b8a1b12 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -15,7 +15,6 @@ #include #include -#include #include #include #include diff --git a/arch/i386/kernel/pci-dma.c b/arch/i386/kernel/pci-dma.c index 1e51427cc9eb..25fe66853934 100644 --- a/arch/i386/kernel/pci-dma.c +++ b/arch/i386/kernel/pci-dma.c @@ -23,7 +23,7 @@ struct dma_coherent_mem { }; void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, unsigned int __nocast gfp) + dma_addr_t *dma_handle, gfp_t gfp) { void *ret; struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index b45cbf93d439..7a14fdfd3af9 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c @@ -47,13 +47,11 @@ #include #include #include -#include #include #ifdef CONFIG_MATH_EMULATION #include #endif -#include #include #include diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c index 61eb0c8a6e47..adcd069db91e 100644 --- a/arch/i386/kernel/signal.c +++ b/arch/i386/kernel/signal.c @@ -338,7 +338,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) esp = (unsigned long) ka->sa.sa_restorer; } - return (void __user *)((esp - frame_size) & -8ul); + esp -= frame_size; + /* Align the stack pointer according to the i386 ABI, + * i.e. so that on function entry ((sp + 4) & 15) == 0. */ + esp = ((esp + 4) & -16ul) - 4; + return (void __user *) esp; } /* These symbols are defined with the addresses in the vsyscall page. diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c index 48b55db3680f..218d725a5a1e 100644 --- a/arch/i386/kernel/smp.c +++ b/arch/i386/kernel/smp.c @@ -11,7 +11,6 @@ #include #include -#include #include #include #include diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 5f0a95d76a4f..1fb26d0e30b6 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c @@ -42,7 +42,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/i386/kernel/timers/timer_pit.c b/arch/i386/kernel/timers/timer_pit.c index eddb64038234..e42e46d35159 100644 --- a/arch/i386/kernel/timers/timer_pit.c +++ b/arch/i386/kernel/timers/timer_pit.c @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 431a551e46ea..19e90bdd84ea 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c @@ -52,7 +52,6 @@ #include #include -#include #include #include "mach_traps.h" diff --git a/arch/i386/mach-default/setup.c b/arch/i386/mach-default/setup.c index e5a1a83d09ef..b4a7455c6993 100644 --- a/arch/i386/mach-default/setup.c +++ b/arch/i386/mach-default/setup.c @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/i386/mach-visws/setup.c b/arch/i386/mach-visws/setup.c index 26ada6fc0d77..07fac7e749c7 100644 --- a/arch/i386/mach-visws/setup.c +++ b/arch/i386/mach-visws/setup.c @@ -5,7 +5,6 @@ #include #include -#include #include #include diff --git a/arch/i386/mach-visws/visws_apic.c b/arch/i386/mach-visws/visws_apic.c index 04e6585849a2..3e64fb721291 100644 --- a/arch/i386/mach-visws/visws_apic.c +++ b/arch/i386/mach-visws/visws_apic.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include diff --git a/arch/i386/mach-voyager/setup.c b/arch/i386/mach-voyager/setup.c index df123fc487bb..7d8a3acb9441 100644 --- a/arch/i386/mach-voyager/setup.c +++ b/arch/i386/mach-voyager/setup.c @@ -4,7 +4,6 @@ #include #include -#include #include #include #include diff --git a/arch/i386/mach-voyager/voyager_basic.c b/arch/i386/mach-voyager/voyager_basic.c index cc69875d979b..aa49a33a572c 100644 --- a/arch/i386/mach-voyager/voyager_basic.c +++ b/arch/i386/mach-voyager/voyager_basic.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c index 46b0cf4a31e0..72a1b9cae2e4 100644 --- a/arch/i386/mach-voyager/voyager_smp.c +++ b/arch/i386/mach-voyager/voyager_smp.c @@ -30,8 +30,6 @@ #include #include -#include - /* TLB state -- visible externally, indexed physically */ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 }; diff --git a/arch/i386/mach-voyager/voyager_thread.c b/arch/i386/mach-voyager/voyager_thread.c index a9341b0eebff..2b03884fdb2a 100644 --- a/arch/i386/mach-voyager/voyager_thread.c +++ b/arch/i386/mach-voyager/voyager_thread.c @@ -31,8 +31,6 @@ #include #include -#include - #define THREAD_NAME "kvoyagerd" /* external variables */ diff --git a/arch/i386/oprofile/nmi_timer_int.c b/arch/i386/oprofile/nmi_timer_int.c index ad93cdd55d63..930a1127bb30 100644 --- a/arch/i386/oprofile/nmi_timer_int.c +++ b/arch/i386/oprofile/nmi_timer_int.c @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include diff --git a/arch/i386/pci/acpi.c b/arch/i386/pci/acpi.c index 2941674f35eb..4c4522b43be5 100644 --- a/arch/i386/pci/acpi.c +++ b/arch/i386/pci/acpi.c @@ -2,7 +2,6 @@ #include #include #include -#include #include #include "pci.h" diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c index 326a2edc3834..cddafe33ff7c 100644 --- a/arch/i386/pci/irq.c +++ b/arch/i386/pci/irq.c @@ -11,12 +11,11 @@ #include #include #include -#include #include #include #include #include -#include +#include #include #include "pci.h" diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c index 7b0b9ad848e5..b27c5acc79d0 100644 --- a/arch/i386/power/cpu.c +++ b/arch/i386/power/cpu.c @@ -8,25 +8,8 @@ */ #include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include - -#include -#include -#include -#include static struct saved_context saved_context; diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c index 80f8ef013939..1ba02baf2f94 100644 --- a/arch/ia64/hp/common/hwsw_iommu.c +++ b/arch/ia64/hp/common/hwsw_iommu.c @@ -71,7 +71,7 @@ hwsw_init (void) } void * -hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flags) +hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) { if (use_swiotlb(dev)) return swiotlb_alloc_coherent(dev, size, dma_handle, flags); diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 11957598a8b9..21bffba78b6d 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -1076,7 +1076,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) * See Documentation/DMA-mapping.txt */ void * -sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flags) +sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) { struct ioc *ioc; void *addr; diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c index 56405dbfd739..a18983a3c934 100644 --- a/arch/ia64/hp/sim/simscsi.c +++ b/arch/ia64/hp/sim/simscsi.c @@ -233,6 +233,23 @@ simscsi_readwrite10 (struct scsi_cmnd *sc, int mode) simscsi_readwrite(sc, mode, offset, ((sc->cmnd[7] << 8) | sc->cmnd[8])*512); } +static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len) +{ + + int scatterlen = sc->use_sg; + struct scatterlist *slp; + + if (scatterlen == 0) + memcpy(sc->request_buffer, buf, len); + else for (slp = (struct scatterlist *)sc->buffer; scatterlen-- > 0 && len > 0; slp++) { + unsigned thislen = min(len, slp->length); + + memcpy(page_address(slp->page) + slp->offset, buf, thislen); + slp++; + len -= thislen; + } +} + static int simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) { @@ -240,6 +257,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) char fname[MAX_ROOT_LEN+16]; size_t disk_size; char *buf; + char localbuf[36]; #if DEBUG_SIMSCSI register long sp asm ("sp"); @@ -263,7 +281,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) /* disk doesn't exist... */ break; } - buf = sc->request_buffer; + buf = localbuf; buf[0] = 0; /* magnetic disk */ buf[1] = 0; /* not a removable medium */ buf[2] = 2; /* SCSI-2 compliant device */ @@ -273,6 +291,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) buf[6] = 0; /* reserved */ buf[7] = 0; /* various flags */ memcpy(buf + 8, "HP SIMULATED DISK 0.00", 28); + simscsi_fillresult(sc, buf, 36); sc->result = GOOD; break; @@ -304,16 +323,13 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) simscsi_readwrite10(sc, SSC_WRITE); break; - case READ_CAPACITY: if (desc[target_id] < 0 || sc->request_bufflen < 8) { break; } - buf = sc->request_buffer; - + buf = localbuf; disk_size = simscsi_get_disk_size(desc[target_id]); - /* pretend to be a 1GB disk (partition table contains real stuff): */ buf[0] = (disk_size >> 24) & 0xff; buf[1] = (disk_size >> 16) & 0xff; buf[2] = (disk_size >> 8) & 0xff; @@ -323,13 +339,14 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) buf[5] = 0; buf[6] = 2; buf[7] = 0; + simscsi_fillresult(sc, buf, 8); sc->result = GOOD; break; case MODE_SENSE: case MODE_SENSE_10: /* sd.c uses this to determine whether disk does write-caching. */ - memset(sc->request_buffer, 0, 128); + simscsi_fillresult(sc, (char *)empty_zero_page, sc->request_bufflen); sc->result = GOOD; break; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 6dc726ad7137..d0a5106fba24 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1016,6 +1016,11 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs) cmc_polling_enabled = 1; spin_unlock(&cmc_history_lock); + /* If we're being hit with CMC interrupts, we won't + * ever execute the schedule_work() below. Need to + * disable CMC interrupts on this processor now. + */ + ia64_mca_cmc_vector_disable(NULL); schedule_work(&cmc_disable_work); /* diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index 499a065f4e60..db32fc1d3935 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S @@ -489,24 +489,27 @@ ia64_state_save: ;; st8 [temp1]=r17,16 // pal_min_state st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT + mov r6=IA64_KR(CURRENT_STACK) + ;; + st8 [temp1]=r6,16 // prev_IA64_KR_CURRENT_STACK + st8 [temp2]=r0,16 // prev_task, starts off as NULL mov r6=cr.ifa ;; - st8 [temp1]=r0,16 // prev_task, starts off as NULL - st8 [temp2]=r12,16 // cr.isr + st8 [temp1]=r12,16 // cr.isr + st8 [temp2]=r6,16 // cr.ifa mov r12=cr.itir ;; - st8 [temp1]=r6,16 // cr.ifa - st8 [temp2]=r12,16 // cr.itir + st8 [temp1]=r12,16 // cr.itir + st8 [temp2]=r11,16 // cr.iipa mov r12=cr.iim ;; - st8 [temp1]=r11,16 // cr.iipa - st8 [temp2]=r12,16 // cr.iim - mov r6=cr.iha + st8 [temp1]=r12,16 // cr.iim (p1) mov r12=IA64_MCA_COLD_BOOT (p2) mov r12=IA64_INIT_WARM_BOOT + mov r6=cr.iha ;; - st8 [temp1]=r6,16 // cr.iha - st8 [temp2]=r12 // os_status, default is cold boot + st8 [temp2]=r6,16 // cr.iha + st8 [temp1]=r12 // os_status, default is cold boot mov r6=IA64_MCA_SAME_CONTEXT ;; st8 [temp1]=r6 // context, default is same context @@ -823,9 +826,12 @@ ia64_state_restore: ld8 r12=[temp1],16 // sal_ra ld8 r9=[temp2],16 // sal_gp ;; - ld8 r22=[temp1],24 // pal_min_state, virtual. skip prev_task + ld8 r22=[temp1],16 // pal_min_state, virtual ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT ;; + ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK + ld8 r20=[temp2],16 // prev_task + ;; ld8 temp3=[temp1],16 // cr.isr ld8 temp4=[temp2],16 // cr.ifa ;; @@ -846,6 +852,45 @@ ia64_state_restore: ld8 r8=[temp1] // os_status ld8 r10=[temp2] // context + /* Wire IA64_TR_CURRENT_STACK to the stack that we are resuming to. To + * avoid any dependencies on the algorithm in ia64_switch_to(), just + * purge any existing CURRENT_STACK mapping and insert the new one. + * + * r16 contains prev_IA64_KR_CURRENT_STACK, r21 contains + * prev_IA64_KR_CURRENT, these values may have been changed by the C + * code. Do not use r8, r9, r10, r22, they contain values ready for + * the return to SAL. + */ + + mov r15=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK + ;; + shl r15=r15,IA64_GRANULE_SHIFT + ;; + dep r15=-1,r15,61,3 // virtual granule + mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps + ;; + ptr.d r15,r18 + ;; + srlz.d + + extr.u r19=r21,61,3 // r21 = prev_IA64_KR_CURRENT + shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK + movl r21=PAGE_KERNEL // page properties + ;; + mov IA64_KR(CURRENT_STACK)=r16 + cmp.ne p6,p0=RGN_KERNEL,r19 // new stack is in the kernel region? + or r21=r20,r21 // construct PA | page properties +(p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:( + ;; + mov cr.itir=r18 + mov cr.ifa=r21 + mov r20=IA64_TR_CURRENT_STACK + ;; + itr.d dtr[r20]=r21 + ;; + srlz.d +1: + br.sptk b0 //EndStub////////////////////////////////////////////////////////////////////// @@ -982,6 +1027,7 @@ ia64_set_kernel_registers: add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack add r13=temp1, r3 // set current to start of MCA/INIT stack + add r20=temp1, r3 // physical start of MCA/INIT stack ;; ld8 r1=[temp4] // OS GP from SAL OS state ;; @@ -991,7 +1037,35 @@ ia64_set_kernel_registers: ;; mov IA64_KR(CURRENT)=r13 - // FIXME: do I need to wire IA64_KR_CURRENT_STACK and IA64_TR_CURRENT_STACK? + /* Wire IA64_TR_CURRENT_STACK to the MCA/INIT handler stack. To avoid + * any dependencies on the algorithm in ia64_switch_to(), just purge + * any existing CURRENT_STACK mapping and insert the new one. + */ + + mov r16=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK + ;; + shl r16=r16,IA64_GRANULE_SHIFT + ;; + dep r16=-1,r16,61,3 // virtual granule + mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps + ;; + ptr.d r16,r18 + ;; + srlz.d + + shr.u r16=r20,IA64_GRANULE_SHIFT // r20 = physical start of MCA/INIT stack + movl r21=PAGE_KERNEL // page properties + ;; + mov IA64_KR(CURRENT_STACK)=r16 + or r21=r20,r21 // construct PA | page properties + ;; + mov cr.itir=r18 + mov cr.ifa=r13 + mov r20=IA64_TR_CURRENT_STACK + ;; + itr.d dtr[r20]=r21 + ;; + srlz.d br.sptk b0 diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index 80f83d6cdbfc..f081c60ab206 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -56,8 +56,9 @@ static struct page *page_isolate[MAX_PAGE_ISOLATE]; static int num_page_isolate = 0; typedef enum { - ISOLATE_NG = 0, - ISOLATE_OK = 1 + ISOLATE_NG, + ISOLATE_OK, + ISOLATE_NONE } isolate_status_t; /* @@ -74,7 +75,7 @@ static struct { * @paddr: poisoned memory location * * Return value: - * ISOLATE_OK / ISOLATE_NG + * one of isolate_status_t, ISOLATE_OK/NG/NONE. */ static isolate_status_t @@ -85,7 +86,10 @@ mca_page_isolate(unsigned long paddr) /* whether physical address is valid or not */ if (!ia64_phys_addr_valid(paddr)) - return ISOLATE_NG; + return ISOLATE_NONE; + + if (!pfn_valid(paddr)) + return ISOLATE_NONE; /* convert physical address to physical page number */ p = pfn_to_page(paddr>>PAGE_SHIFT); @@ -122,10 +126,15 @@ mca_handler_bh(unsigned long paddr) current->pid, current->comm); spin_lock(&mca_bh_lock); - if (mca_page_isolate(paddr) == ISOLATE_OK) { + switch (mca_page_isolate(paddr)) { + case ISOLATE_OK: printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr); - } else { + break; + case ISOLATE_NG: printk(KERN_DEBUG "Page isolation: ( %lx ) failure.\n", paddr); + break; + default: + break; } spin_unlock(&mca_bh_lock); diff --git a/arch/ia64/lib/swiotlb.c b/arch/ia64/lib/swiotlb.c index dbc0b3e449c5..3ebbb3c8ba36 100644 --- a/arch/ia64/lib/swiotlb.c +++ b/arch/ia64/lib/swiotlb.c @@ -123,8 +123,8 @@ swiotlb_init_with_default_size (size_t default_size) /* * Get IO TLB memory from the low pages */ - io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * - (1 << IO_TLB_SHIFT)); + io_tlb_start = alloc_bootmem_low_pages_limit(io_tlb_nslabs * + (1 << IO_TLB_SHIFT), 0x100000000); if (!io_tlb_start) panic("Cannot allocate SWIOTLB buffer"); io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); @@ -314,7 +314,7 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir) void * swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, int flags) + dma_addr_t *dma_handle, gfp_t flags) { unsigned long dev_addr; void *ret; diff --git a/arch/ia64/sn/kernel/xpc.h b/arch/ia64/sn/kernel/xpc.h index d0ee635daf2e..e5f5a4e51f70 100644 --- a/arch/ia64/sn/kernel/xpc.h +++ b/arch/ia64/sn/kernel/xpc.h @@ -939,7 +939,7 @@ xpc_map_bte_errors(bte_result_t error) static inline void * -xpc_kmalloc_cacheline_aligned(size_t size, int flags, void **base) +xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) { /* see if kmalloc will give us cachline aligned memory by default */ *base = kmalloc(size, flags); diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 0e4b9ad9ef02..75e6e874bebf 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -75,7 +75,7 @@ EXPORT_SYMBOL(sn_dma_set_mask); * more information. */ void *sn_dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, int flags) + dma_addr_t * dma_handle, gfp_t flags) { void *cpuaddr; unsigned long phys_addr; diff --git a/arch/m32r/Makefile b/arch/m32r/Makefile index dd4418d846e9..983d438b14b6 100644 --- a/arch/m32r/Makefile +++ b/arch/m32r/Makefile @@ -24,7 +24,7 @@ aflags-$(CONFIG_ISA_M32R) += -DNO_FPU -Wa,-no-bitinst CFLAGS += $(cflags-y) AFLAGS += $(aflags-y) -CHECKFLAGS := $(CHECK) -D__m32r__ +CHECKFLAGS += -D__m32r__ -D__BIG_ENDIAN__=1 head-y := arch/m32r/kernel/head.o arch/m32r/kernel/init_task.o diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S index dddbf6b5ed2c..85920fb8d08c 100644 --- a/arch/m32r/kernel/entry.S +++ b/arch/m32r/kernel/entry.S @@ -681,6 +681,15 @@ ENTRY(debug_trap) bl do_debug_trap bra error_code +ENTRY(ill_trap) + /* void ill_trap(void) */ + SWITCH_TO_KERNEL_STACK + SAVE_ALL + ldi r1, #0 ; error_code ; FIXME + mv r0, sp ; pt_regs + bl do_ill_trap + bra error_code + /* Cache flushing handler */ ENTRY(cache_flushing_handler) diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index a4576ac7e870..8b1f6eb76870 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c @@ -275,12 +275,14 @@ static void flush_tlb_all_ipi(void *info) *==========================================================================*/ void smp_flush_tlb_mm(struct mm_struct *mm) { - int cpu_id = smp_processor_id(); + int cpu_id; cpumask_t cpu_mask; - unsigned long *mmc = &mm->context[cpu_id]; + unsigned long *mmc; unsigned long flags; preempt_disable(); + cpu_id = smp_processor_id(); + mmc = &mm->context[cpu_id]; cpu_mask = mm->cpu_vm_mask; cpu_clear(cpu_id, cpu_mask); @@ -343,12 +345,14 @@ void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) { struct mm_struct *mm = vma->vm_mm; - int cpu_id = smp_processor_id(); + int cpu_id; cpumask_t cpu_mask; - unsigned long *mmc = &mm->context[cpu_id]; + unsigned long *mmc; unsigned long flags; preempt_disable(); + cpu_id = smp_processor_id(); + mmc = &mm->context[cpu_id]; cpu_mask = mm->cpu_vm_mask; cpu_clear(cpu_id, cpu_mask); diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index 01922271d17e..5fe8ed6d62dc 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c @@ -5,8 +5,6 @@ * Hitoshi Yamamoto */ -/* $Id$ */ - /* * 'traps.c' handles hardware traps and faults after we have saved some * state in 'entry.S'. @@ -35,6 +33,7 @@ asmlinkage void ei_handler(void); asmlinkage void rie_handler(void); asmlinkage void debug_trap(void); asmlinkage void cache_flushing_handler(void); +asmlinkage void ill_trap(void); #ifdef CONFIG_SMP extern void smp_reschedule_interrupt(void); @@ -77,22 +76,22 @@ void set_eit_vector_entries(void) eit_vector[5] = BRA_INSN(default_eit_handler, 5); eit_vector[8] = BRA_INSN(rie_handler, 8); eit_vector[12] = BRA_INSN(alignment_check, 12); - eit_vector[16] = 0xff000000UL; + eit_vector[16] = BRA_INSN(ill_trap, 16); eit_vector[17] = BRA_INSN(debug_trap, 17); eit_vector[18] = BRA_INSN(system_call, 18); - eit_vector[19] = 0xff000000UL; - eit_vector[20] = 0xff000000UL; - eit_vector[21] = 0xff000000UL; - eit_vector[22] = 0xff000000UL; - eit_vector[23] = 0xff000000UL; - eit_vector[24] = 0xff000000UL; - eit_vector[25] = 0xff000000UL; - eit_vector[26] = 0xff000000UL; - eit_vector[27] = 0xff000000UL; + eit_vector[19] = BRA_INSN(ill_trap, 19); + eit_vector[20] = BRA_INSN(ill_trap, 20); + eit_vector[21] = BRA_INSN(ill_trap, 21); + eit_vector[22] = BRA_INSN(ill_trap, 22); + eit_vector[23] = BRA_INSN(ill_trap, 23); + eit_vector[24] = BRA_INSN(ill_trap, 24); + eit_vector[25] = BRA_INSN(ill_trap, 25); + eit_vector[26] = BRA_INSN(ill_trap, 26); + eit_vector[27] = BRA_INSN(ill_trap, 27); eit_vector[28] = BRA_INSN(cache_flushing_handler, 28); - eit_vector[29] = 0xff000000UL; - eit_vector[30] = 0xff000000UL; - eit_vector[31] = 0xff000000UL; + eit_vector[29] = BRA_INSN(ill_trap, 29); + eit_vector[30] = BRA_INSN(ill_trap, 30); + eit_vector[31] = BRA_INSN(ill_trap, 31); eit_vector[32] = BRA_INSN(ei_handler, 32); eit_vector[64] = BRA_INSN(pie_handler, 64); #ifdef CONFIG_MMU @@ -286,7 +285,8 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ DO_ERROR( 1, SIGTRAP, "debug trap", debug_trap) DO_ERROR_INFO(0x20, SIGILL, "reserved instruction ", rie_handler, ILL_ILLOPC, regs->bpc) -DO_ERROR_INFO(0x100, SIGILL, "privilege instruction", pie_handler, ILL_PRVOPC, regs->bpc) +DO_ERROR_INFO(0x100, SIGILL, "privileged instruction", pie_handler, ILL_PRVOPC, regs->bpc) +DO_ERROR_INFO(-1, SIGILL, "illegal trap", ill_trap, ILL_ILLTRP, regs->bpc) extern int handle_unaligned_access(unsigned long, struct pt_regs *); @@ -329,4 +329,3 @@ asmlinkage void do_alignment_check(struct pt_regs *regs, long error_code) set_fs(oldfs); } } - diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c index 6c6855f1aa05..ce16bbe26a52 100644 --- a/arch/m32r/lib/usercopy.c +++ b/arch/m32r/lib/usercopy.c @@ -13,7 +13,7 @@ #include unsigned long -__generic_copy_to_user(void *to, const void *from, unsigned long n) +__generic_copy_to_user(void __user *to, const void *from, unsigned long n) { prefetch(from); if (access_ok(VERIFY_WRITE, to, n)) @@ -22,7 +22,7 @@ __generic_copy_to_user(void *to, const void *from, unsigned long n) } unsigned long -__generic_copy_from_user(void *to, const void *from, unsigned long n) +__generic_copy_from_user(void *to, const void __user *from, unsigned long n) { prefetchw(to); if (access_ok(VERIFY_READ, from, n)) @@ -111,7 +111,7 @@ do { \ #endif /* CONFIG_ISA_DUAL_ISSUE */ long -__strncpy_from_user(char *dst, const char *src, long count) +__strncpy_from_user(char *dst, const char __user *src, long count) { long res; __do_strncpy_from_user(dst, src, count, res); @@ -119,7 +119,7 @@ __strncpy_from_user(char *dst, const char *src, long count) } long -strncpy_from_user(char *dst, const char *src, long count) +strncpy_from_user(char *dst, const char __user *src, long count) { long res = -EFAULT; if (access_ok(VERIFY_READ, src, 1)) @@ -222,7 +222,7 @@ do { \ #endif /* not CONFIG_ISA_DUAL_ISSUE */ unsigned long -clear_user(void *to, unsigned long n) +clear_user(void __user *to, unsigned long n) { if (access_ok(VERIFY_WRITE, to, n)) __do_clear_user(to, n); @@ -230,7 +230,7 @@ clear_user(void *to, unsigned long n) } unsigned long -__clear_user(void *to, unsigned long n) +__clear_user(void __user *to, unsigned long n) { __do_clear_user(to, n); return n; @@ -244,7 +244,7 @@ __clear_user(void *to, unsigned long n) #ifdef CONFIG_ISA_DUAL_ISSUE -long strnlen_user(const char *s, long n) +long strnlen_user(const char __user *s, long n) { unsigned long mask = -__addr_ok(s); unsigned long res; @@ -313,7 +313,7 @@ long strnlen_user(const char *s, long n) #else /* not CONFIG_ISA_DUAL_ISSUE */ -long strnlen_user(const char *s, long n) +long strnlen_user(const char __user *s, long n) { unsigned long mask = -__addr_ok(s); unsigned long res; diff --git a/arch/mips/mm/dma-coherent.c b/arch/mips/mm/dma-coherent.c index 97a50d38c98f..a617f8c327e8 100644 --- a/arch/mips/mm/dma-coherent.c +++ b/arch/mips/mm/dma-coherent.c @@ -18,7 +18,7 @@ #include void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, int gfp) + dma_addr_t * dma_handle, gfp_t gfp) { void *ret; /* ignore region specifiers */ @@ -39,7 +39,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, EXPORT_SYMBOL(dma_alloc_noncoherent); void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, int gfp) + dma_addr_t * dma_handle, gfp_t gfp) __attribute__((alias("dma_alloc_noncoherent"))); EXPORT_SYMBOL(dma_alloc_coherent); diff --git a/arch/mips/mm/dma-ip27.c b/arch/mips/mm/dma-ip27.c index aa7c94b5d781..8da19fd22ac6 100644 --- a/arch/mips/mm/dma-ip27.c +++ b/arch/mips/mm/dma-ip27.c @@ -22,7 +22,7 @@ pdev_to_baddr(to_pci_dev(dev), (addr)) void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, int gfp) + dma_addr_t * dma_handle, gfp_t gfp) { void *ret; @@ -44,7 +44,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, EXPORT_SYMBOL(dma_alloc_noncoherent); void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, int gfp) + dma_addr_t * dma_handle, gfp_t gfp) __attribute__((alias("dma_alloc_noncoherent"))); EXPORT_SYMBOL(dma_alloc_coherent); diff --git a/arch/mips/mm/dma-ip32.c b/arch/mips/mm/dma-ip32.c index 2cbe196c35fb..a7e3072ff78d 100644 --- a/arch/mips/mm/dma-ip32.c +++ b/arch/mips/mm/dma-ip32.c @@ -37,7 +37,7 @@ #define RAM_OFFSET_MASK 0x3fffffff void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, int gfp) + dma_addr_t * dma_handle, gfp_t gfp) { void *ret; /* ignore region specifiers */ @@ -61,7 +61,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, EXPORT_SYMBOL(dma_alloc_noncoherent); void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, int gfp) + dma_addr_t * dma_handle, gfp_t gfp) { void *ret; diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index 59e54f12212e..4ce02028a292 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -24,7 +24,7 @@ */ void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, int gfp) + dma_addr_t * dma_handle, gfp_t gfp) { void *ret; /* ignore region specifiers */ @@ -45,7 +45,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, EXPORT_SYMBOL(dma_alloc_noncoherent); void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t * dma_handle, int gfp) + dma_addr_t * dma_handle, gfp_t gfp) { void *ret; diff --git a/arch/mips/pci/fixup-tb0226.c b/arch/mips/pci/fixup-tb0226.c index 61513d5d97da..b5d42b12de10 100644 --- a/arch/mips/pci/fixup-tb0226.c +++ b/arch/mips/pci/fixup-tb0226.c @@ -1,7 +1,7 @@ /* * fixup-tb0226.c, The TANBAC TB0226 specific PCI fixups. * - * Copyright (C) 2002-2004 Yoichi Yuasa + * Copyright (C) 2002-2005 Yoichi Yuasa * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -20,6 +20,7 @@ #include #include +#include #include int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin) @@ -29,42 +30,42 @@ int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin) switch (slot) { case 12: vr41xx_set_irq_trigger(GD82559_1_PIN, - TRIGGER_LEVEL, - SIGNAL_THROUGH); - vr41xx_set_irq_level(GD82559_1_PIN, LEVEL_LOW); + IRQ_TRIGGER_LEVEL, + IRQ_SIGNAL_THROUGH); + vr41xx_set_irq_level(GD82559_1_PIN, IRQ_LEVEL_LOW); irq = GD82559_1_IRQ; break; case 13: vr41xx_set_irq_trigger(GD82559_2_PIN, - TRIGGER_LEVEL, - SIGNAL_THROUGH); - vr41xx_set_irq_level(GD82559_2_PIN, LEVEL_LOW); + IRQ_TRIGGER_LEVEL, + IRQ_SIGNAL_THROUGH); + vr41xx_set_irq_level(GD82559_2_PIN, IRQ_LEVEL_LOW); irq = GD82559_2_IRQ; break; case 14: switch (pin) { case 1: vr41xx_set_irq_trigger(UPD720100_INTA_PIN, - TRIGGER_LEVEL, - SIGNAL_THROUGH); + IRQ_TRIGGER_LEVEL, + IRQ_SIGNAL_THROUGH); vr41xx_set_irq_level(UPD720100_INTA_PIN, - LEVEL_LOW); + IRQ_LEVEL_LOW); irq = UPD720100_INTA_IRQ; break; case 2: vr41xx_set_irq_trigger(UPD720100_INTB_PIN, - TRIGGER_LEVEL, - SIGNAL_THROUGH); + IRQ_TRIGGER_LEVEL, + IRQ_SIGNAL_THROUGH); vr41xx_set_irq_level(UPD720100_INTB_PIN, - LEVEL_LOW); + IRQ_LEVEL_LOW); irq = UPD720100_INTB_IRQ; break; case 3: vr41xx_set_irq_trigger(UPD720100_INTC_PIN, - TRIGGER_LEVEL, - SIGNAL_THROUGH); + IRQ_TRIGGER_LEVEL, + IRQ_SIGNAL_THROUGH); vr41xx_set_irq_level(UPD720100_INTC_PIN, - LEVEL_LOW); + IRQ_LEVEL_LOW); irq = UPD720100_INTC_IRQ; break; default: diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 368cc095c99f..844c2877a2e3 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -349,7 +349,7 @@ pcxl_dma_init(void) __initcall(pcxl_dma_init); -static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flag) +static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { unsigned long vaddr; unsigned long paddr; @@ -502,13 +502,13 @@ struct hppa_dma_ops pcxl_dma_ops = { }; static void *fail_alloc_consistent(struct device *dev, size_t size, - dma_addr_t *dma_handle, int flag) + dma_addr_t *dma_handle, gfp_t flag) { return NULL; } static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, int flag) + dma_addr_t *dma_handle, gfp_t flag) { void *addr = NULL; diff --git a/arch/ppc/8xx_io/cs4218.h b/arch/ppc/8xx_io/cs4218.h index a3c38c5a5db2..f1c7392255f8 100644 --- a/arch/ppc/8xx_io/cs4218.h +++ b/arch/ppc/8xx_io/cs4218.h @@ -78,7 +78,7 @@ typedef struct { const char *name2; void (*open)(void); void (*release)(void); - void *(*dma_alloc)(unsigned int, int); + void *(*dma_alloc)(unsigned int, gfp_t); void (*dma_free)(void *, unsigned int); int (*irqinit)(void); #ifdef MODULE diff --git a/arch/ppc/8xx_io/cs4218_tdm.c b/arch/ppc/8xx_io/cs4218_tdm.c index 2ca9ec7ec3a7..532caa388dc2 100644 --- a/arch/ppc/8xx_io/cs4218_tdm.c +++ b/arch/ppc/8xx_io/cs4218_tdm.c @@ -318,7 +318,7 @@ struct cs_sound_settings { static struct cs_sound_settings sound; -static void *CS_Alloc(unsigned int size, int flags); +static void *CS_Alloc(unsigned int size, gfp_t flags); static void CS_Free(void *ptr, unsigned int size); static int CS_IrqInit(void); #ifdef MODULE @@ -959,7 +959,7 @@ static TRANS transCSNormalRead = { /*** Low level stuff *********************************************************/ -static void *CS_Alloc(unsigned int size, int flags) +static void *CS_Alloc(unsigned int size, gfp_t flags) { int order; diff --git a/arch/ppc/boot/ld.script b/arch/ppc/boot/ld.script index 9362193742ac..d4dd8f15395e 100644 --- a/arch/ppc/boot/ld.script +++ b/arch/ppc/boot/ld.script @@ -1,4 +1,4 @@ -OUTPUT_ARCH(powerpc) +OUTPUT_ARCH(powerpc:common) SECTIONS { /* Read-only sections, merged into text segment: */ diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile index 1fb92f16acd6..b1457a8a9c0f 100644 --- a/arch/ppc/kernel/Makefile +++ b/arch/ppc/kernel/Makefile @@ -15,9 +15,8 @@ extra-y += vmlinux.lds obj-y := entry.o traps.o irq.o idle.o time.o misc.o \ process.o signal.o ptrace.o align.o \ semaphore.o syscalls.o setup.o \ - cputable.o ppc_htab.o + cputable.o ppc_htab.o perfmon.o obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o -obj-$(CONFIG_E500) += perfmon.o obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o obj-$(CONFIG_POWER4) += cpu_setup_power4.o obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o diff --git a/arch/ppc/kernel/cputable.c b/arch/ppc/kernel/cputable.c index 546e1ea4cafa..6b76cf58d9e0 100644 --- a/arch/ppc/kernel/cputable.c +++ b/arch/ppc/kernel/cputable.c @@ -91,7 +91,7 @@ struct cpu_spec cpu_specs[] = { .cpu_features = CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE, .cpu_user_features = COMMON_PPC | PPC_FEATURE_601_INSTR | - PPC_FEATURE_UNIFIED_CACHE, + PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB, .icache_bsize = 32, .dcache_bsize = 32, .cpu_setup = __setup_cpu_601 @@ -745,7 +745,8 @@ struct cpu_spec cpu_specs[] = { .cpu_name = "403GCX", .cpu_features = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .cpu_user_features = PPC_FEATURE_32 | + PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB, .icache_bsize = 16, .dcache_bsize = 16, }, diff --git a/arch/ppc/kernel/dma-mapping.c b/arch/ppc/kernel/dma-mapping.c index b566d982806c..0f710d2baec6 100644 --- a/arch/ppc/kernel/dma-mapping.c +++ b/arch/ppc/kernel/dma-mapping.c @@ -115,7 +115,7 @@ static struct vm_region consistent_head = { }; static struct vm_region * -vm_region_alloc(struct vm_region *head, size_t size, int gfp) +vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) { unsigned long addr = head->vm_start, end = head->vm_end - size; unsigned long flags; @@ -173,7 +173,7 @@ static struct vm_region *vm_region_find(struct vm_region *head, unsigned long ad * virtual and bus address for that space. */ void * -__dma_alloc_coherent(size_t size, dma_addr_t *handle, int gfp) +__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) { struct page *page; struct vm_region *c; @@ -401,10 +401,10 @@ EXPORT_SYMBOL(__dma_sync); static inline void __dma_sync_page_highmem(struct page *page, unsigned long offset, size_t size, int direction) { - size_t seg_size = min((size_t)PAGE_SIZE, size) - offset; + size_t seg_size = min((size_t)(PAGE_SIZE - offset), size); size_t cur_size = seg_size; unsigned long flags, start, seg_offset = offset; - int nr_segs = PAGE_ALIGN(size + (PAGE_SIZE - offset))/PAGE_SIZE; + int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE; int seg_nr = 0; local_irq_save(flags); diff --git a/arch/ppc/kernel/perfmon.c b/arch/ppc/kernel/perfmon.c index fa1dad96b830..22df9a596a0f 100644 --- a/arch/ppc/kernel/perfmon.c +++ b/arch/ppc/kernel/perfmon.c @@ -45,9 +45,8 @@ static void dummy_perf(struct pt_regs *regs) mtpmr(PMRN_PMGC0, pmgc0); } -#else +#elif defined(CONFIG_6xx) /* Ensure exceptions are disabled */ - static void dummy_perf(struct pt_regs *regs) { unsigned int mmcr0 = mfspr(SPRN_MMCR0); @@ -55,6 +54,10 @@ static void dummy_perf(struct pt_regs *regs) mmcr0 &= ~MMCR0_PMXE; mtspr(SPRN_MMCR0, mmcr0); } +#else +static void dummy_perf(struct pt_regs *regs) +{ +} #endif void (*perf_irq)(struct pt_regs *) = dummy_perf; diff --git a/arch/ppc/mm/pgtable.c b/arch/ppc/mm/pgtable.c index 81a3d7446d37..43505b1fc5d8 100644 --- a/arch/ppc/mm/pgtable.c +++ b/arch/ppc/mm/pgtable.c @@ -114,9 +114,9 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) struct page *ptepage; #ifdef CONFIG_HIGHPTE - int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT; + gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT; #else - int flags = GFP_KERNEL | __GFP_REPEAT; + gfp_t flags = GFP_KERNEL | __GFP_REPEAT; #endif ptepage = alloc_pages(flags, 0); diff --git a/arch/ppc/platforms/4xx/bamboo.c b/arch/ppc/platforms/4xx/bamboo.c index ac391d463d78..78a403b48dba 100644 --- a/arch/ppc/platforms/4xx/bamboo.c +++ b/arch/ppc/platforms/4xx/bamboo.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/4xx/ebony.c b/arch/ppc/platforms/4xx/ebony.c index d6b2b1965dcb..27b778ab903b 100644 --- a/arch/ppc/platforms/4xx/ebony.c +++ b/arch/ppc/platforms/4xx/ebony.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/4xx/luan.c b/arch/ppc/platforms/4xx/luan.c index a38e6f9ef858..16d953bda22c 100644 --- a/arch/ppc/platforms/4xx/luan.c +++ b/arch/ppc/platforms/4xx/luan.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/4xx/ocotea.c b/arch/ppc/platforms/4xx/ocotea.c index 80028df1b445..506949c5dd29 100644 --- a/arch/ppc/platforms/4xx/ocotea.c +++ b/arch/ppc/platforms/4xx/ocotea.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/83xx/mpc834x_sys.c b/arch/ppc/platforms/83xx/mpc834x_sys.c index b38a851a64ec..79b3f533d0a3 100644 --- a/arch/ppc/platforms/83xx/mpc834x_sys.c +++ b/arch/ppc/platforms/83xx/mpc834x_sys.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/85xx/mpc8540_ads.c b/arch/ppc/platforms/85xx/mpc8540_ads.c index f761fdf160db..7dc8a68acfd0 100644 --- a/arch/ppc/platforms/85xx/mpc8540_ads.c +++ b/arch/ppc/platforms/85xx/mpc8540_ads.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/85xx/mpc8560_ads.c b/arch/ppc/platforms/85xx/mpc8560_ads.c index f2748c88665a..8841fd7da6ee 100644 --- a/arch/ppc/platforms/85xx/mpc8560_ads.c +++ b/arch/ppc/platforms/85xx/mpc8560_ads.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/85xx/mpc85xx_ads_common.c b/arch/ppc/platforms/85xx/mpc85xx_ads_common.c index 18e952d1767c..bd3ac0136756 100644 --- a/arch/ppc/platforms/85xx/mpc85xx_ads_common.c +++ b/arch/ppc/platforms/85xx/mpc85xx_ads_common.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c index 6267b294f704..9f9039498ae5 100644 --- a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c +++ b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/85xx/sbc8560.c b/arch/ppc/platforms/85xx/sbc8560.c index 165df94d4aa6..c76760a781c1 100644 --- a/arch/ppc/platforms/85xx/sbc8560.c +++ b/arch/ppc/platforms/85xx/sbc8560.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/85xx/sbc85xx.c b/arch/ppc/platforms/85xx/sbc85xx.c index 4f6d1ddd6fb8..c02f110219f5 100644 --- a/arch/ppc/platforms/85xx/sbc85xx.c +++ b/arch/ppc/platforms/85xx/sbc85xx.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/85xx/stx_gp3.c b/arch/ppc/platforms/85xx/stx_gp3.c index c99b365d6110..20940f4044f4 100644 --- a/arch/ppc/platforms/85xx/stx_gp3.c +++ b/arch/ppc/platforms/85xx/stx_gp3.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/chestnut.c b/arch/ppc/platforms/chestnut.c index 7786818bd9d0..df6ff98c023a 100644 --- a/arch/ppc/platforms/chestnut.c +++ b/arch/ppc/platforms/chestnut.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/chrp_setup.c b/arch/ppc/platforms/chrp_setup.c index 57f29ab29bda..66346f0de7ec 100644 --- a/arch/ppc/platforms/chrp_setup.c +++ b/arch/ppc/platforms/chrp_setup.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/gemini_setup.c b/arch/ppc/platforms/gemini_setup.c index e391e52383c7..3a5ff9fb71d6 100644 --- a/arch/ppc/platforms/gemini_setup.c +++ b/arch/ppc/platforms/gemini_setup.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/mvme5100.c b/arch/ppc/platforms/mvme5100.c index b292b44b760c..ce2ce88c8033 100644 --- a/arch/ppc/platforms/mvme5100.c +++ b/arch/ppc/platforms/mvme5100.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/pmac_cpufreq.c b/arch/ppc/platforms/pmac_cpufreq.c index c0605244edda..d4bc5f67ec53 100644 --- a/arch/ppc/platforms/pmac_cpufreq.c +++ b/arch/ppc/platforms/pmac_cpufreq.c @@ -695,6 +695,13 @@ static int __init pmac_cpufreq_setup(void) set_speed_proc = pmu_set_cpu_speed; is_pmu_based = 1; } + /* Else check for TiPb 550 */ + else if (machine_is_compatible("PowerBook3,3") && cur_freq == 550000) { + hi_freq = cur_freq; + low_freq = 500000; + set_speed_proc = pmu_set_cpu_speed; + is_pmu_based = 1; + } /* Else check for TiPb 400 & 500 */ else if (machine_is_compatible("PowerBook3,2")) { /* We only know about the 400 MHz and the 500Mhz model diff --git a/arch/ppc/platforms/pmac_feature.c b/arch/ppc/platforms/pmac_feature.c index 867336ad5d36..dd6d45ae0501 100644 --- a/arch/ppc/platforms/pmac_feature.c +++ b/arch/ppc/platforms/pmac_feature.c @@ -2337,6 +2337,10 @@ static struct pmac_mb_def pmac_mb_defs[] __pmacdata = { PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, }, + { "PowerBook6,7", "iBook G4", + PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, + PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, + }, { "PowerBook6,8", "PowerBook G4 12\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, diff --git a/arch/ppc/platforms/pmac_setup.c b/arch/ppc/platforms/pmac_setup.c index b392b9a15987..d6356f480d90 100644 --- a/arch/ppc/platforms/pmac_setup.c +++ b/arch/ppc/platforms/pmac_setup.c @@ -48,7 +48,6 @@ #include #include #include -#include #include #include #include @@ -719,7 +718,8 @@ pmac_declare_of_platform_devices(void) if (np) { for (np = np->child; np != NULL; np = np->sibling) if (strncmp(np->name, "i2c", 3) == 0) { - of_platform_device_create(np, "uni-n-i2c"); + of_platform_device_create(np, "uni-n-i2c", + NULL); break; } } @@ -727,17 +727,18 @@ pmac_declare_of_platform_devices(void) if (np) { for (np = np->child; np != NULL; np = np->sibling) if (strncmp(np->name, "i2c", 3) == 0) { - of_platform_device_create(np, "u3-i2c"); + of_platform_device_create(np, "u3-i2c", + NULL); break; } } np = find_devices("valkyrie"); if (np) - of_platform_device_create(np, "valkyrie"); + of_platform_device_create(np, "valkyrie", NULL); np = find_devices("platinum"); if (np) - of_platform_device_create(np, "platinum"); + of_platform_device_create(np, "platinum", NULL); return 0; } diff --git a/arch/ppc/platforms/pmac_time.c b/arch/ppc/platforms/pmac_time.c index 778ce4fec368..efb819f9490d 100644 --- a/arch/ppc/platforms/pmac_time.c +++ b/arch/ppc/platforms/pmac_time.c @@ -195,7 +195,7 @@ via_calibrate_decr(void) ; dend = get_dec(); - tb_ticks_per_jiffy = (dstart - dend) / (6 * (HZ/100)); + tb_ticks_per_jiffy = (dstart - dend) / ((6 * HZ)/100); tb_to_us = mulhwu_scale_factor(dstart - dend, 60000); printk(KERN_INFO "via_calibrate_decr: ticks per jiffy = %u (%u ticks)\n", diff --git a/arch/ppc/platforms/powerpmc250.c b/arch/ppc/platforms/powerpmc250.c index 0abe15159e6c..e6b520e6e13f 100644 --- a/arch/ppc/platforms/powerpmc250.c +++ b/arch/ppc/platforms/powerpmc250.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/pplus.c b/arch/ppc/platforms/pplus.c index 65705c911795..e70aae20d6f9 100644 --- a/arch/ppc/platforms/pplus.c +++ b/arch/ppc/platforms/pplus.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/prpmc750.c b/arch/ppc/platforms/prpmc750.c index 24ae1caafc61..0bb14a5e824c 100644 --- a/arch/ppc/platforms/prpmc750.c +++ b/arch/ppc/platforms/prpmc750.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/prpmc800.c b/arch/ppc/platforms/prpmc800.c index 8b09fa69b35b..de7baefacd3a 100644 --- a/arch/ppc/platforms/prpmc800.c +++ b/arch/ppc/platforms/prpmc800.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/platforms/radstone_ppc7d.c b/arch/ppc/platforms/radstone_ppc7d.c index c30607a972d8..0376c8cff5d1 100644 --- a/arch/ppc/platforms/radstone_ppc7d.c +++ b/arch/ppc/platforms/radstone_ppc7d.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include @@ -59,7 +58,6 @@ #include #include #include -#include #include "radstone_ppc7d.h" diff --git a/arch/ppc/platforms/sandpoint.c b/arch/ppc/platforms/sandpoint.c index 21e31346b12b..5232283c1974 100644 --- a/arch/ppc/platforms/sandpoint.c +++ b/arch/ppc/platforms/sandpoint.c @@ -74,7 +74,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/syslib/mv64x60.c b/arch/ppc/syslib/mv64x60.c index 839f8872826f..4849850a59ed 100644 --- a/arch/ppc/syslib/mv64x60.c +++ b/arch/ppc/syslib/mv64x60.c @@ -34,7 +34,7 @@ u8 mv64x60_pci_exclude_bridge = 1; DEFINE_SPINLOCK(mv64x60_lock); static phys_addr_t mv64x60_bridge_pbase; -static void *mv64x60_bridge_vbase; +static void __iomem *mv64x60_bridge_vbase; static u32 mv64x60_bridge_type = MV64x60_TYPE_INVALID; static u32 mv64x60_bridge_rev; #if defined(CONFIG_SYSFS) && !defined(CONFIG_GT64260) @@ -938,7 +938,7 @@ mv64x60_setup_for_chip(struct mv64x60_handle *bh) * * Return the virtual address of the bridge's registers. */ -void * +void __iomem * mv64x60_get_bridge_vbase(void) { return mv64x60_bridge_vbase; diff --git a/arch/ppc/syslib/of_device.c b/arch/ppc/syslib/of_device.c index da8a0f2128dc..93c7231ea709 100644 --- a/arch/ppc/syslib/of_device.c +++ b/arch/ppc/syslib/of_device.c @@ -234,7 +234,9 @@ void of_device_unregister(struct of_device *ofdev) device_unregister(&ofdev->dev); } -struct of_device* of_platform_device_create(struct device_node *np, const char *bus_id) +struct of_device* of_platform_device_create(struct device_node *np, + const char *bus_id, + struct device *parent) { struct of_device *dev; u32 *reg; @@ -247,7 +249,7 @@ struct of_device* of_platform_device_create(struct device_node *np, const char * dev->node = of_node_get(np); dev->dma_mask = 0xffffffffUL; dev->dev.dma_mask = &dev->dma_mask; - dev->dev.parent = NULL; + dev->dev.parent = parent; dev->dev.bus = &of_platform_bus_type; dev->dev.release = of_release_dev; diff --git a/arch/ppc/syslib/open_pic.c b/arch/ppc/syslib/open_pic.c index 53da58523e39..1cf5de21a3fd 100644 --- a/arch/ppc/syslib/open_pic.c +++ b/arch/ppc/syslib/open_pic.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/syslib/open_pic2.c b/arch/ppc/syslib/open_pic2.c index 9a7e8748e2b2..16cff91d9f41 100644 --- a/arch/ppc/syslib/open_pic2.c +++ b/arch/ppc/syslib/open_pic2.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/syslib/ppc4xx_setup.c b/arch/ppc/syslib/ppc4xx_setup.c index b843c4fef25e..bf83240689dc 100644 --- a/arch/ppc/syslib/ppc4xx_setup.c +++ b/arch/ppc/syslib/ppc4xx_setup.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/ppc/syslib/ppc85xx_setup.c b/arch/ppc/syslib/ppc85xx_setup.c index b7242f1bd931..832b8bf99ae7 100644 --- a/arch/ppc/syslib/ppc85xx_setup.c +++ b/arch/ppc/syslib/ppc85xx_setup.c @@ -184,8 +184,8 @@ mpc85xx_setup_pci1(struct pci_controller *hose) pci->powar1 = 0x80044000 | (__ilog2(MPC85XX_PCI1_UPPER_MEM - MPC85XX_PCI1_LOWER_MEM + 1) - 1); - /* Setup outboud IO windows @ MPC85XX_PCI1_IO_BASE */ - pci->potar2 = 0x00000000; + /* Setup outbound IO windows @ MPC85XX_PCI1_IO_BASE */ + pci->potar2 = (MPC85XX_PCI1_LOWER_IO >> 12) & 0x000fffff; pci->potear2 = 0x00000000; pci->powbar2 = (MPC85XX_PCI1_IO_BASE >> 12) & 0x000fffff; /* Enable, IO R/W */ @@ -235,8 +235,8 @@ mpc85xx_setup_pci2(struct pci_controller *hose) pci->powar1 = 0x80044000 | (__ilog2(MPC85XX_PCI2_UPPER_MEM - MPC85XX_PCI2_LOWER_MEM + 1) - 1); - /* Setup outboud IO windows @ MPC85XX_PCI2_IO_BASE */ - pci->potar2 = 0x00000000; + /* Setup outbound IO windows @ MPC85XX_PCI2_IO_BASE */ + pci->potar2 = (MPC85XX_PCI2_LOWER_IO >> 12) & 0x000fffff;; pci->potear2 = 0x00000000; pci->powbar2 = (MPC85XX_PCI2_IO_BASE >> 12) & 0x000fffff; /* Enable, IO R/W */ diff --git a/arch/ppc64/configs/bpa_defconfig b/arch/ppc64/configs/bpa_defconfig index 46c5da41c3ae..67ffecbc05cb 100644 --- a/arch/ppc64/configs/bpa_defconfig +++ b/arch/ppc64/configs/bpa_defconfig @@ -1,17 +1,17 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.13-rc6 -# Mon Aug 8 14:12:19 2005 +# Linux kernel version: 2.6.14-rc4 +# Thu Oct 20 08:29:10 2005 # CONFIG_64BIT=y CONFIG_MMU=y CONFIG_RWSEM_XCHGADD_ALGORITHM=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_ISA_DMA=y -CONFIG_HAVE_DEC_LOCK=y CONFIG_EARLY_PRINTK=y CONFIG_COMPAT=y CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y CONFIG_FORCE_MAX_ZONEORDER=13 # @@ -26,6 +26,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32 # General setup # CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y CONFIG_SWAP=y CONFIG_SYSVIPC=y # CONFIG_POSIX_MQUEUE is not set @@ -36,6 +37,7 @@ CONFIG_HOTPLUG=y CONFIG_KOBJECT_UEVENT=y # CONFIG_IKCONFIG is not set # CONFIG_CPUSETS is not set +CONFIG_INITRAMFS_SOURCE="" # CONFIG_EMBEDDED is not set CONFIG_KALLSYMS=y # CONFIG_KALLSYMS_ALL is not set @@ -95,6 +97,7 @@ CONFIG_FLATMEM_MANUAL=y # CONFIG_SPARSEMEM_MANUAL is not set CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y +# CONFIG_SPARSEMEM_STATIC is not set # CONFIG_NUMA is not set CONFIG_SCHED_SMT=y CONFIG_PREEMPT_NONE=y @@ -110,17 +113,18 @@ CONFIG_PPC_RTAS=y CONFIG_RTAS_PROC=y CONFIG_RTAS_FLASH=y CONFIG_SECCOMP=y +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set +CONFIG_PROC_DEVICETREE=y +# CONFIG_CMDLINE_BOOL is not set CONFIG_ISA_DMA_API=y # -# General setup +# Bus Options # CONFIG_PCI=y CONFIG_PCI_DOMAINS=y -CONFIG_BINFMT_ELF=y -# CONFIG_BINFMT_MISC is not set CONFIG_PCI_LEGACY_PROC=y -CONFIG_PCI_NAMES=y # CONFIG_PCI_DEBUG is not set # @@ -132,8 +136,6 @@ CONFIG_PCI_NAMES=y # PCI Hotplug Support # # CONFIG_HOTPLUG_PCI is not set -CONFIG_PROC_DEVICETREE=y -# CONFIG_CMDLINE_BOOL is not set # # Networking @@ -163,8 +165,8 @@ CONFIG_SYN_COOKIES=y # CONFIG_INET_ESP is not set # CONFIG_INET_IPCOMP is not set CONFIG_INET_TUNNEL=y -CONFIG_IP_TCPDIAG=y -CONFIG_IP_TCPDIAG_IPV6=y +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_BIC=y @@ -181,6 +183,7 @@ CONFIG_INET6_TUNNEL=m CONFIG_IPV6_TUNNEL=m CONFIG_NETFILTER=y # CONFIG_NETFILTER_DEBUG is not set +# CONFIG_NETFILTER_NETLINK is not set # # IP: Netfilter Configuration @@ -188,11 +191,14 @@ CONFIG_NETFILTER=y CONFIG_IP_NF_CONNTRACK=y # CONFIG_IP_NF_CT_ACCT is not set # CONFIG_IP_NF_CONNTRACK_MARK is not set +# CONFIG_IP_NF_CONNTRACK_EVENTS is not set CONFIG_IP_NF_CT_PROTO_SCTP=y CONFIG_IP_NF_FTP=m CONFIG_IP_NF_IRC=m +# CONFIG_IP_NF_NETBIOS_NS is not set CONFIG_IP_NF_TFTP=m CONFIG_IP_NF_AMANDA=m +# CONFIG_IP_NF_PPTP is not set CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_LIMIT=m @@ -216,13 +222,16 @@ CONFIG_IP_NF_MATCH_OWNER=m CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_REALM=m CONFIG_IP_NF_MATCH_SCTP=m +# CONFIG_IP_NF_MATCH_DCCP is not set CONFIG_IP_NF_MATCH_COMMENT=m CONFIG_IP_NF_MATCH_HASHLIMIT=m +CONFIG_IP_NF_MATCH_STRING=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_IP_NF_TARGET_TCPMSS=m +CONFIG_IP_NF_TARGET_NFQUEUE=m CONFIG_IP_NF_NAT=m CONFIG_IP_NF_NAT_NEEDED=y CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -240,6 +249,7 @@ CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_DSCP=m CONFIG_IP_NF_TARGET_MARK=m CONFIG_IP_NF_TARGET_CLASSIFY=m +CONFIG_IP_NF_TARGET_TTL=m CONFIG_IP_NF_RAW=m CONFIG_IP_NF_TARGET_NOTRACK=m CONFIG_IP_NF_ARPTABLES=m @@ -251,6 +261,12 @@ CONFIG_IP_NF_ARP_MANGLE=m # # CONFIG_IP6_NF_QUEUE is not set # CONFIG_IP6_NF_IPTABLES is not set +# CONFIG_IP6_NF_TARGET_NFQUEUE is not set + +# +# DCCP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_DCCP is not set # # SCTP Configuration (EXPERIMENTAL) @@ -278,6 +294,7 @@ CONFIG_NET_CLS_ROUTE=y # CONFIG_HAMRADIO is not set # CONFIG_IRDA is not set # CONFIG_BT is not set +# CONFIG_IEEE80211 is not set # # Device Drivers @@ -291,6 +308,11 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y CONFIG_FW_LOADER=y # CONFIG_DEBUG_DRIVER is not set +# +# Connector - unified userspace <-> kernelspace linker +# +# CONFIG_CONNECTOR is not set + # # Memory Technology Devices (MTD) # @@ -322,7 +344,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=131072 CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" # CONFIG_CDROM_PKTCDVD is not set # @@ -395,6 +416,7 @@ CONFIG_IDEDMA_AUTO=y # # SCSI device support # +# CONFIG_RAID_ATTRS is not set # CONFIG_SCSI is not set # @@ -435,6 +457,11 @@ CONFIG_NETDEVICES=y # # CONFIG_ARCNET is not set +# +# PHY device support +# +# CONFIG_PHYLIB is not set + # # Ethernet (10 or 100Mbit) # @@ -442,6 +469,7 @@ CONFIG_NET_ETHERNET=y CONFIG_MII=y # CONFIG_HAPPYMEAL is not set # CONFIG_SUNGEM is not set +# CONFIG_CASSINI is not set # CONFIG_NET_VENDOR_3COM is not set # @@ -462,15 +490,18 @@ CONFIG_E1000=m # CONFIG_HAMACHI is not set # CONFIG_YELLOWFIN is not set # CONFIG_R8169 is not set +# CONFIG_SIS190 is not set CONFIG_SKGE=m # CONFIG_SK98LIN is not set # CONFIG_TIGON3 is not set # CONFIG_BNX2 is not set +# CONFIG_SPIDER_NET is not set # CONFIG_MV643XX_ETH is not set # # Ethernet (10000 Mbit) # +# CONFIG_CHELSIO_T1 is not set # CONFIG_IXGB is not set # CONFIG_S2IO is not set @@ -552,6 +583,7 @@ CONFIG_HW_CONSOLE=y CONFIG_SERIAL_NONSTANDARD=y # CONFIG_ROCKETPORT is not set # CONFIG_CYCLADES is not set +# CONFIG_DIGIEPCA is not set # CONFIG_MOXA_SMARTIO is not set # CONFIG_ISI is not set # CONFIG_SYNCLINK is not set @@ -642,7 +674,6 @@ CONFIG_I2C_ALGOBIT=y # CONFIG_I2C_I801 is not set # CONFIG_I2C_I810 is not set # CONFIG_I2C_PIIX4 is not set -# CONFIG_I2C_ISA is not set # CONFIG_I2C_NFORCE2 is not set # CONFIG_I2C_PARPORT_LIGHT is not set # CONFIG_I2C_PROSAVAGE is not set @@ -656,7 +687,6 @@ CONFIG_I2C_ALGOBIT=y # CONFIG_I2C_VIAPRO is not set # CONFIG_I2C_VOODOO3 is not set # CONFIG_I2C_PCA_ISA is not set -# CONFIG_I2C_SENSOR is not set # # Miscellaneous I2C Chip support @@ -683,11 +713,16 @@ CONFIG_I2C_ALGOBIT=y # Hardware Monitoring support # # CONFIG_HWMON is not set +# CONFIG_HWMON_VID is not set # # Misc devices # +# +# Multimedia Capabilities Port drivers +# + # # Multimedia devices # @@ -756,10 +791,6 @@ CONFIG_FS_MBCACHE=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set CONFIG_FS_POSIX_ACL=y - -# -# XFS support -# # CONFIG_XFS_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_ROMFS_FS is not set @@ -768,6 +799,7 @@ CONFIG_INOTIFY=y CONFIG_DNOTIFY=y # CONFIG_AUTOFS_FS is not set # CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set # # CD-ROM/DVD Filesystems @@ -794,13 +826,11 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y CONFIG_SYSFS=y -# CONFIG_DEVPTS_FS_XATTR is not set CONFIG_TMPFS=y -CONFIG_TMPFS_XATTR=y -# CONFIG_TMPFS_SECURITY is not set CONFIG_HUGETLBFS=y CONFIG_HUGETLB_PAGE=y CONFIG_RAMFS=y +# CONFIG_RELAYFS_FS is not set # # Miscellaneous filesystems @@ -846,6 +876,7 @@ CONFIG_SUNRPC=m # CONFIG_NCP_FS is not set # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set +# CONFIG_9P_FS is not set # # Partition Types @@ -923,6 +954,7 @@ CONFIG_NLS_ISO8859_15=m CONFIG_DEBUG_KERNEL=y CONFIG_MAGIC_SYSRQ=y CONFIG_LOG_BUF_SHIFT=15 +CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_SCHEDSTATS is not set # CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_SPINLOCK is not set @@ -981,7 +1013,12 @@ CONFIG_CRYPTO_DEFLATE=m # Library routines # # CONFIG_CRC_CCITT is not set +# CONFIG_CRC16 is not set CONFIG_CRC32=y # CONFIG_LIBCRC32C is not set CONFIG_ZLIB_INFLATE=m CONFIG_ZLIB_DEFLATE=m +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m diff --git a/arch/ppc64/configs/g5_defconfig b/arch/ppc64/configs/g5_defconfig index fc83d9330282..6323065fbf2c 100644 --- a/arch/ppc64/configs/g5_defconfig +++ b/arch/ppc64/configs/g5_defconfig @@ -1,17 +1,17 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.13-rc6 -# Mon Aug 8 14:16:59 2005 +# Linux kernel version: 2.6.14-rc4 +# Thu Oct 20 08:30:23 2005 # CONFIG_64BIT=y CONFIG_MMU=y CONFIG_RWSEM_XCHGADD_ALGORITHM=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_ISA_DMA=y -CONFIG_HAVE_DEC_LOCK=y CONFIG_EARLY_PRINTK=y CONFIG_COMPAT=y CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y CONFIG_FORCE_MAX_ZONEORDER=13 # @@ -26,6 +26,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32 # General setup # CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -37,6 +38,7 @@ CONFIG_KOBJECT_UEVENT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y # CONFIG_CPUSETS is not set +CONFIG_INITRAMFS_SOURCE="" # CONFIG_EMBEDDED is not set CONFIG_KALLSYMS=y # CONFIG_KALLSYMS_ALL is not set @@ -97,6 +99,7 @@ CONFIG_FLATMEM_MANUAL=y # CONFIG_SPARSEMEM_MANUAL is not set CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y +# CONFIG_SPARSEMEM_STATIC is not set # CONFIG_NUMA is not set # CONFIG_SCHED_SMT is not set CONFIG_PREEMPT_NONE=y @@ -109,19 +112,20 @@ CONFIG_HZ_250=y CONFIG_HZ=250 CONFIG_GENERIC_HARDIRQS=y CONFIG_SECCOMP=y +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set +# CONFIG_HOTPLUG_CPU is not set +CONFIG_PROC_DEVICETREE=y +# CONFIG_CMDLINE_BOOL is not set CONFIG_ISA_DMA_API=y # -# General setup +# Bus Options # CONFIG_PCI=y CONFIG_PCI_DOMAINS=y -CONFIG_BINFMT_ELF=y -# CONFIG_BINFMT_MISC is not set CONFIG_PCI_LEGACY_PROC=y -CONFIG_PCI_NAMES=y # CONFIG_PCI_DEBUG is not set -# CONFIG_HOTPLUG_CPU is not set # # PCCARD (PCMCIA/CardBus) support @@ -132,8 +136,6 @@ CONFIG_PCI_NAMES=y # PCI Hotplug Support # # CONFIG_HOTPLUG_PCI is not set -CONFIG_PROC_DEVICETREE=y -# CONFIG_CMDLINE_BOOL is not set # # Networking @@ -163,8 +165,8 @@ CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m CONFIG_INET_TUNNEL=y -CONFIG_IP_TCPDIAG=m -# CONFIG_IP_TCPDIAG_IPV6 is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_BIC=y @@ -175,6 +177,7 @@ CONFIG_TCP_CONG_BIC=y # CONFIG_IPV6 is not set CONFIG_NETFILTER=y # CONFIG_NETFILTER_DEBUG is not set +# CONFIG_NETFILTER_NETLINK is not set # # IP: Netfilter Configuration @@ -182,11 +185,14 @@ CONFIG_NETFILTER=y CONFIG_IP_NF_CONNTRACK=m CONFIG_IP_NF_CT_ACCT=y CONFIG_IP_NF_CONNTRACK_MARK=y +CONFIG_IP_NF_CONNTRACK_EVENTS=y CONFIG_IP_NF_CT_PROTO_SCTP=m CONFIG_IP_NF_FTP=m CONFIG_IP_NF_IRC=m +# CONFIG_IP_NF_NETBIOS_NS is not set CONFIG_IP_NF_TFTP=m CONFIG_IP_NF_AMANDA=m +# CONFIG_IP_NF_PPTP is not set CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_LIMIT=m @@ -210,14 +216,18 @@ CONFIG_IP_NF_MATCH_OWNER=m CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_REALM=m CONFIG_IP_NF_MATCH_SCTP=m +# CONFIG_IP_NF_MATCH_DCCP is not set CONFIG_IP_NF_MATCH_COMMENT=m CONFIG_IP_NF_MATCH_CONNMARK=m +CONFIG_IP_NF_MATCH_CONNBYTES=m CONFIG_IP_NF_MATCH_HASHLIMIT=m +CONFIG_IP_NF_MATCH_STRING=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_IP_NF_TARGET_TCPMSS=m +CONFIG_IP_NF_TARGET_NFQUEUE=m CONFIG_IP_NF_NAT=m CONFIG_IP_NF_NAT_NEEDED=y CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -235,6 +245,7 @@ CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_DSCP=m CONFIG_IP_NF_TARGET_MARK=m CONFIG_IP_NF_TARGET_CLASSIFY=m +CONFIG_IP_NF_TARGET_TTL=m CONFIG_IP_NF_TARGET_CONNMARK=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_RAW=m @@ -243,6 +254,11 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m +# +# DCCP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_DCCP is not set + # # SCTP Configuration (EXPERIMENTAL) # @@ -270,6 +286,7 @@ CONFIG_NET_CLS_ROUTE=y # CONFIG_HAMRADIO is not set # CONFIG_IRDA is not set # CONFIG_BT is not set +# CONFIG_IEEE80211 is not set # # Device Drivers @@ -283,6 +300,11 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y CONFIG_FW_LOADER=y # CONFIG_DEBUG_DRIVER is not set +# +# Connector - unified userspace <-> kernelspace linker +# +# CONFIG_CONNECTOR is not set + # # Memory Technology Devices (MTD) # @@ -315,7 +337,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=65536 CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" CONFIG_CDROM_PKTCDVD=m CONFIG_CDROM_PKTCDVD_BUFFERS=8 # CONFIG_CDROM_PKTCDVD_WCACHE is not set @@ -395,6 +416,7 @@ CONFIG_IDEDMA_AUTO=y # # SCSI device support # +# CONFIG_RAID_ATTRS is not set CONFIG_SCSI=y CONFIG_SCSI_PROC_FS=y @@ -422,6 +444,7 @@ CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SPI_ATTRS=y # CONFIG_SCSI_FC_ATTRS is not set # CONFIG_SCSI_ISCSI_ATTRS is not set +# CONFIG_SCSI_SAS_ATTRS is not set # # SCSI low-level drivers @@ -435,10 +458,12 @@ CONFIG_SCSI_SPI_ATTRS=y # CONFIG_SCSI_AIC79XX is not set # CONFIG_MEGARAID_NEWGEN is not set # CONFIG_MEGARAID_LEGACY is not set +# CONFIG_MEGARAID_SAS is not set CONFIG_SCSI_SATA=y # CONFIG_SCSI_SATA_AHCI is not set CONFIG_SCSI_SATA_SVW=y # CONFIG_SCSI_ATA_PIIX is not set +# CONFIG_SCSI_SATA_MV is not set # CONFIG_SCSI_SATA_NV is not set # CONFIG_SCSI_SATA_PROMISE is not set # CONFIG_SCSI_SATA_QSTOR is not set @@ -498,6 +523,7 @@ CONFIG_DM_ZERO=m # CONFIG_FUSION is not set # CONFIG_FUSION_SPI is not set # CONFIG_FUSION_FC is not set +# CONFIG_FUSION_SAS is not set # # IEEE 1394 (FireWire) support @@ -540,7 +566,6 @@ CONFIG_IEEE1394_RAWIO=y # CONFIG_ADB_PMU=y CONFIG_PMAC_SMU=y -# CONFIG_PMAC_BACKLIGHT is not set CONFIG_THERM_PM72=y # @@ -557,6 +582,11 @@ CONFIG_TUN=m # # CONFIG_ARCNET is not set +# +# PHY device support +# +# CONFIG_PHYLIB is not set + # # Ethernet (10 or 100Mbit) # @@ -564,6 +594,7 @@ CONFIG_NET_ETHERNET=y CONFIG_MII=y # CONFIG_HAPPYMEAL is not set CONFIG_SUNGEM=y +# CONFIG_CASSINI is not set # CONFIG_NET_VENDOR_3COM is not set # @@ -585,6 +616,7 @@ CONFIG_E1000=y # CONFIG_HAMACHI is not set # CONFIG_YELLOWFIN is not set # CONFIG_R8169 is not set +# CONFIG_SIS190 is not set # CONFIG_SKGE is not set # CONFIG_SK98LIN is not set CONFIG_TIGON3=m @@ -594,6 +626,7 @@ CONFIG_TIGON3=m # # Ethernet (10000 Mbit) # +# CONFIG_CHELSIO_T1 is not set # CONFIG_IXGB is not set # CONFIG_S2IO is not set @@ -760,8 +793,8 @@ CONFIG_I2C_ALGOBIT=y # CONFIG_I2C_I801 is not set # CONFIG_I2C_I810 is not set # CONFIG_I2C_PIIX4 is not set -# CONFIG_I2C_ISA is not set CONFIG_I2C_KEYWEST=y +CONFIG_I2C_PMAC_SMU=y # CONFIG_I2C_NFORCE2 is not set # CONFIG_I2C_PARPORT_LIGHT is not set # CONFIG_I2C_PROSAVAGE is not set @@ -775,7 +808,6 @@ CONFIG_I2C_KEYWEST=y # CONFIG_I2C_VIAPRO is not set # CONFIG_I2C_VOODOO3 is not set # CONFIG_I2C_PCA_ISA is not set -# CONFIG_I2C_SENSOR is not set # # Miscellaneous I2C Chip support @@ -802,11 +834,16 @@ CONFIG_I2C_KEYWEST=y # Hardware Monitoring support # # CONFIG_HWMON is not set +# CONFIG_HWMON_VID is not set # # Misc devices # +# +# Multimedia Capabilities Port drivers +# + # # Multimedia devices # @@ -856,6 +893,7 @@ CONFIG_FB_RADEON_I2C=y # CONFIG_FB_KYRO is not set # CONFIG_FB_3DFX is not set # CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_CYBLA is not set # CONFIG_FB_TRIDENT is not set # CONFIG_FB_S1D13XXX is not set # CONFIG_FB_VIRTUAL is not set @@ -937,6 +975,7 @@ CONFIG_USB_STORAGE_DPCM=y CONFIG_USB_STORAGE_SDDR09=y CONFIG_USB_STORAGE_SDDR55=y CONFIG_USB_STORAGE_JUMPSHOT=y +# CONFIG_USB_STORAGE_ONETOUCH is not set # # USB Input Devices @@ -956,9 +995,11 @@ CONFIG_USB_HIDDEV=y # CONFIG_USB_MTOUCH is not set # CONFIG_USB_ITMTOUCH is not set # CONFIG_USB_EGALAX is not set +# CONFIG_USB_YEALINK is not set # CONFIG_USB_XPAD is not set # CONFIG_USB_ATI_REMOTE is not set # CONFIG_USB_KEYSPAN_REMOTE is not set +# CONFIG_USB_APPLETOUCH is not set # # USB Imaging devices @@ -983,30 +1024,14 @@ CONFIG_USB_KAWETH=m CONFIG_USB_PEGASUS=m CONFIG_USB_RTL8150=m CONFIG_USB_USBNET=m - -# -# USB Host-to-Host Cables -# -CONFIG_USB_ALI_M5632=y -CONFIG_USB_AN2720=y -CONFIG_USB_BELKIN=y -CONFIG_USB_GENESYS=y -CONFIG_USB_NET1080=y -CONFIG_USB_PL2301=y -CONFIG_USB_KC2190=y - -# -# Intelligent USB Devices/Gadgets -# -CONFIG_USB_ARMLINUX=y -CONFIG_USB_EPSON2888=y -CONFIG_USB_ZAURUS=y -CONFIG_USB_CDCETHER=y - -# -# USB Network Adapters -# -CONFIG_USB_AX8817X=y +# CONFIG_USB_NET_AX8817X is not set +CONFIG_USB_NET_CDCETHER=m +# CONFIG_USB_NET_GL620A is not set +# CONFIG_USB_NET_NET1080 is not set +# CONFIG_USB_NET_PLUSB is not set +# CONFIG_USB_NET_RNDIS_HOST is not set +# CONFIG_USB_NET_CDC_SUBSET is not set +# CONFIG_USB_NET_ZAURUS is not set CONFIG_USB_MON=y # @@ -1124,16 +1149,12 @@ CONFIG_REISERFS_FS_POSIX_ACL=y CONFIG_REISERFS_FS_SECURITY=y # CONFIG_JFS_FS is not set CONFIG_FS_POSIX_ACL=y - -# -# XFS support -# CONFIG_XFS_FS=m CONFIG_XFS_EXPORT=y -# CONFIG_XFS_RT is not set # CONFIG_XFS_QUOTA is not set CONFIG_XFS_SECURITY=y CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set # CONFIG_MINIX_FS is not set # CONFIG_ROMFS_FS is not set CONFIG_INOTIFY=y @@ -1141,6 +1162,7 @@ CONFIG_INOTIFY=y CONFIG_DNOTIFY=y CONFIG_AUTOFS_FS=m # CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set # # CD-ROM/DVD Filesystems @@ -1168,14 +1190,11 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y CONFIG_SYSFS=y -CONFIG_DEVPTS_FS_XATTR=y -# CONFIG_DEVPTS_FS_SECURITY is not set CONFIG_TMPFS=y -CONFIG_TMPFS_XATTR=y -CONFIG_TMPFS_SECURITY=y CONFIG_HUGETLBFS=y CONFIG_HUGETLB_PAGE=y CONFIG_RAMFS=y +# CONFIG_RELAYFS_FS is not set # # Miscellaneous filesystems @@ -1225,6 +1244,7 @@ CONFIG_CIFS=m # CONFIG_NCP_FS is not set # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set +# CONFIG_9P_FS is not set # # Partition Types @@ -1303,6 +1323,7 @@ CONFIG_OPROFILE=y CONFIG_DEBUG_KERNEL=y CONFIG_MAGIC_SYSRQ=y CONFIG_LOG_BUF_SHIFT=17 +CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_SCHEDSTATS is not set # CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_SPINLOCK is not set @@ -1360,7 +1381,12 @@ CONFIG_CRYPTO_TEST=m # Library routines # CONFIG_CRC_CCITT=m +# CONFIG_CRC16 is not set CONFIG_CRC32=y CONFIG_LIBCRC32C=m CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=m +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m diff --git a/arch/ppc64/configs/iSeries_defconfig b/arch/ppc64/configs/iSeries_defconfig index 013d4e0e4003..62e92c7e9e27 100644 --- a/arch/ppc64/configs/iSeries_defconfig +++ b/arch/ppc64/configs/iSeries_defconfig @@ -1,17 +1,17 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.13-rc6 -# Mon Aug 8 14:17:02 2005 +# Linux kernel version: 2.6.14-rc4 +# Thu Oct 20 08:30:56 2005 # CONFIG_64BIT=y CONFIG_MMU=y CONFIG_RWSEM_XCHGADD_ALGORITHM=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_ISA_DMA=y -CONFIG_HAVE_DEC_LOCK=y CONFIG_EARLY_PRINTK=y CONFIG_COMPAT=y CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y CONFIG_FORCE_MAX_ZONEORDER=13 # @@ -26,6 +26,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32 # General setup # CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -38,6 +39,7 @@ CONFIG_KOBJECT_UEVENT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y # CONFIG_CPUSETS is not set +CONFIG_INITRAMFS_SOURCE="" # CONFIG_EMBEDDED is not set CONFIG_KALLSYMS=y # CONFIG_KALLSYMS_ALL is not set @@ -88,6 +90,7 @@ CONFIG_FLATMEM_MANUAL=y # CONFIG_SPARSEMEM_MANUAL is not set CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y +# CONFIG_SPARSEMEM_STATIC is not set # CONFIG_NUMA is not set # CONFIG_SCHED_SMT is not set CONFIG_PREEMPT_NONE=y @@ -101,17 +104,16 @@ CONFIG_HZ=250 CONFIG_GENERIC_HARDIRQS=y CONFIG_LPARCFG=y CONFIG_SECCOMP=y +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set CONFIG_ISA_DMA_API=y # -# General setup +# Bus Options # CONFIG_PCI=y CONFIG_PCI_DOMAINS=y -CONFIG_BINFMT_ELF=y -# CONFIG_BINFMT_MISC is not set CONFIG_PCI_LEGACY_PROC=y -CONFIG_PCI_NAMES=y # CONFIG_PCI_DEBUG is not set # @@ -152,8 +154,8 @@ CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m CONFIG_INET_TUNNEL=y -CONFIG_IP_TCPDIAG=m -# CONFIG_IP_TCPDIAG_IPV6 is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_BIC=y @@ -164,6 +166,7 @@ CONFIG_TCP_CONG_BIC=y # CONFIG_IPV6 is not set CONFIG_NETFILTER=y # CONFIG_NETFILTER_DEBUG is not set +# CONFIG_NETFILTER_NETLINK is not set # # IP: Netfilter Configuration @@ -171,11 +174,14 @@ CONFIG_NETFILTER=y CONFIG_IP_NF_CONNTRACK=m CONFIG_IP_NF_CT_ACCT=y CONFIG_IP_NF_CONNTRACK_MARK=y +CONFIG_IP_NF_CONNTRACK_EVENTS=y CONFIG_IP_NF_CT_PROTO_SCTP=m CONFIG_IP_NF_FTP=m CONFIG_IP_NF_IRC=m +# CONFIG_IP_NF_NETBIOS_NS is not set CONFIG_IP_NF_TFTP=m CONFIG_IP_NF_AMANDA=m +# CONFIG_IP_NF_PPTP is not set CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_LIMIT=m @@ -199,14 +205,18 @@ CONFIG_IP_NF_MATCH_OWNER=m CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_REALM=m CONFIG_IP_NF_MATCH_SCTP=m +# CONFIG_IP_NF_MATCH_DCCP is not set CONFIG_IP_NF_MATCH_COMMENT=m CONFIG_IP_NF_MATCH_CONNMARK=m +CONFIG_IP_NF_MATCH_CONNBYTES=m CONFIG_IP_NF_MATCH_HASHLIMIT=m +CONFIG_IP_NF_MATCH_STRING=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_IP_NF_TARGET_TCPMSS=m +CONFIG_IP_NF_TARGET_NFQUEUE=m CONFIG_IP_NF_NAT=m CONFIG_IP_NF_NAT_NEEDED=y CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -224,6 +234,7 @@ CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_DSCP=m CONFIG_IP_NF_TARGET_MARK=m CONFIG_IP_NF_TARGET_CLASSIFY=m +CONFIG_IP_NF_TARGET_TTL=m CONFIG_IP_NF_TARGET_CONNMARK=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_RAW=m @@ -232,6 +243,11 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m +# +# DCCP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_DCCP is not set + # # SCTP Configuration (EXPERIMENTAL) # @@ -259,6 +275,7 @@ CONFIG_NET_CLS_ROUTE=y # CONFIG_HAMRADIO is not set # CONFIG_IRDA is not set # CONFIG_BT is not set +# CONFIG_IEEE80211 is not set # # Device Drivers @@ -272,6 +289,11 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y CONFIG_FW_LOADER=m # CONFIG_DEBUG_DRIVER is not set +# +# Connector - unified userspace <-> kernelspace linker +# +# CONFIG_CONNECTOR is not set + # # Memory Technology Devices (MTD) # @@ -303,7 +325,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=65536 CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" # CONFIG_CDROM_PKTCDVD is not set # @@ -323,6 +344,7 @@ CONFIG_IOSCHED_CFQ=y # # SCSI device support # +# CONFIG_RAID_ATTRS is not set CONFIG_SCSI=y CONFIG_SCSI_PROC_FS=y @@ -350,6 +372,7 @@ CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SPI_ATTRS=y CONFIG_SCSI_FC_ATTRS=y # CONFIG_SCSI_ISCSI_ATTRS is not set +# CONFIG_SCSI_SAS_ATTRS is not set # # SCSI low-level drivers @@ -363,6 +386,7 @@ CONFIG_SCSI_FC_ATTRS=y # CONFIG_SCSI_AIC79XX is not set # CONFIG_MEGARAID_NEWGEN is not set # CONFIG_MEGARAID_LEGACY is not set +# CONFIG_MEGARAID_SAS is not set # CONFIG_SCSI_SATA is not set # CONFIG_SCSI_BUSLOGIC is not set # CONFIG_SCSI_DMX3191D is not set @@ -415,6 +439,7 @@ CONFIG_DM_ZERO=m # CONFIG_FUSION is not set # CONFIG_FUSION_SPI is not set # CONFIG_FUSION_FC is not set +# CONFIG_FUSION_SAS is not set # # IEEE 1394 (FireWire) support @@ -444,6 +469,11 @@ CONFIG_TUN=m # # CONFIG_ARCNET is not set +# +# PHY device support +# +# CONFIG_PHYLIB is not set + # # Ethernet (10 or 100Mbit) # @@ -451,6 +481,7 @@ CONFIG_NET_ETHERNET=y CONFIG_MII=y # CONFIG_HAPPYMEAL is not set # CONFIG_SUNGEM is not set +# CONFIG_CASSINI is not set # CONFIG_NET_VENDOR_3COM is not set # @@ -489,6 +520,7 @@ CONFIG_E1000=m # CONFIG_HAMACHI is not set # CONFIG_YELLOWFIN is not set # CONFIG_R8169 is not set +# CONFIG_SIS190 is not set # CONFIG_SKGE is not set # CONFIG_SK98LIN is not set # CONFIG_VIA_VELOCITY is not set @@ -498,6 +530,7 @@ CONFIG_E1000=m # # Ethernet (10000 Mbit) # +# CONFIG_CHELSIO_T1 is not set # CONFIG_IXGB is not set # CONFIG_S2IO is not set @@ -632,7 +665,6 @@ CONFIG_MAX_RAW_DEVS=256 # I2C support # # CONFIG_I2C is not set -# CONFIG_I2C_SENSOR is not set # # Dallas's 1-wire bus @@ -643,11 +675,16 @@ CONFIG_MAX_RAW_DEVS=256 # Hardware Monitoring support # # CONFIG_HWMON is not set +# CONFIG_HWMON_VID is not set # # Misc devices # +# +# Multimedia Capabilities Port drivers +# + # # Multimedia devices # @@ -722,16 +759,12 @@ CONFIG_JFS_SECURITY=y # CONFIG_JFS_DEBUG is not set # CONFIG_JFS_STATISTICS is not set CONFIG_FS_POSIX_ACL=y - -# -# XFS support -# CONFIG_XFS_FS=m CONFIG_XFS_EXPORT=y -# CONFIG_XFS_RT is not set # CONFIG_XFS_QUOTA is not set CONFIG_XFS_SECURITY=y CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set # CONFIG_MINIX_FS is not set # CONFIG_ROMFS_FS is not set CONFIG_INOTIFY=y @@ -739,6 +772,7 @@ CONFIG_INOTIFY=y CONFIG_DNOTIFY=y CONFIG_AUTOFS_FS=m # CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set # # CD-ROM/DVD Filesystems @@ -766,14 +800,11 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y CONFIG_SYSFS=y -CONFIG_DEVPTS_FS_XATTR=y -CONFIG_DEVPTS_FS_SECURITY=y CONFIG_TMPFS=y -CONFIG_TMPFS_XATTR=y -CONFIG_TMPFS_SECURITY=y # CONFIG_HUGETLBFS is not set # CONFIG_HUGETLB_PAGE is not set CONFIG_RAMFS=y +# CONFIG_RELAYFS_FS is not set # # Miscellaneous filesystems @@ -824,6 +855,7 @@ CONFIG_CIFS_POSIX=y # CONFIG_NCP_FS is not set # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set +# CONFIG_9P_FS is not set # # Partition Types @@ -897,6 +929,7 @@ CONFIG_OPROFILE=y CONFIG_DEBUG_KERNEL=y CONFIG_MAGIC_SYSRQ=y CONFIG_LOG_BUF_SHIFT=17 +CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_SCHEDSTATS is not set # CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_SPINLOCK is not set @@ -954,7 +987,12 @@ CONFIG_CRYPTO_TEST=m # Library routines # CONFIG_CRC_CCITT=m +# CONFIG_CRC16 is not set CONFIG_CRC32=y CONFIG_LIBCRC32C=m CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=m +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m diff --git a/arch/ppc64/configs/maple_defconfig b/arch/ppc64/configs/maple_defconfig index dd42892cd873..7b480f3d1406 100644 --- a/arch/ppc64/configs/maple_defconfig +++ b/arch/ppc64/configs/maple_defconfig @@ -1,17 +1,17 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.13-rc6 -# Mon Aug 8 14:17:04 2005 +# Linux kernel version: 2.6.14-rc4 +# Thu Oct 20 08:31:24 2005 # CONFIG_64BIT=y CONFIG_MMU=y CONFIG_RWSEM_XCHGADD_ALGORITHM=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_ISA_DMA=y -CONFIG_HAVE_DEC_LOCK=y CONFIG_EARLY_PRINTK=y CONFIG_COMPAT=y CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y CONFIG_FORCE_MAX_ZONEORDER=13 # @@ -26,6 +26,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32 # General setup # CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -37,6 +38,7 @@ CONFIG_KOBJECT_UEVENT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y # CONFIG_CPUSETS is not set +CONFIG_INITRAMFS_SOURCE="" # CONFIG_EMBEDDED is not set CONFIG_KALLSYMS=y CONFIG_KALLSYMS_ALL=y @@ -97,6 +99,7 @@ CONFIG_FLATMEM_MANUAL=y # CONFIG_SPARSEMEM_MANUAL is not set CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y +# CONFIG_SPARSEMEM_STATIC is not set # CONFIG_NUMA is not set # CONFIG_SCHED_SMT is not set CONFIG_PREEMPT_NONE=y @@ -109,17 +112,18 @@ CONFIG_HZ_250=y CONFIG_HZ=250 CONFIG_GENERIC_HARDIRQS=y CONFIG_SECCOMP=y +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set +CONFIG_PROC_DEVICETREE=y +# CONFIG_CMDLINE_BOOL is not set CONFIG_ISA_DMA_API=y # -# General setup +# Bus Options # CONFIG_PCI=y CONFIG_PCI_DOMAINS=y -CONFIG_BINFMT_ELF=y -# CONFIG_BINFMT_MISC is not set CONFIG_PCI_LEGACY_PROC=y -CONFIG_PCI_NAMES=y # CONFIG_PCI_DEBUG is not set # @@ -131,8 +135,6 @@ CONFIG_PCI_NAMES=y # PCI Hotplug Support # # CONFIG_HOTPLUG_PCI is not set -CONFIG_PROC_DEVICETREE=y -# CONFIG_CMDLINE_BOOL is not set # # Networking @@ -163,13 +165,18 @@ CONFIG_IP_PNP_DHCP=y # CONFIG_INET_ESP is not set # CONFIG_INET_IPCOMP is not set # CONFIG_INET_TUNNEL is not set -CONFIG_IP_TCPDIAG=y -# CONFIG_IP_TCPDIAG_IPV6 is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_BIC=y # CONFIG_IPV6 is not set # CONFIG_NETFILTER is not set +# +# DCCP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_DCCP is not set + # # SCTP Configuration (EXPERIMENTAL) # @@ -196,6 +203,7 @@ CONFIG_TCP_CONG_BIC=y # CONFIG_HAMRADIO is not set # CONFIG_IRDA is not set # CONFIG_BT is not set +# CONFIG_IEEE80211 is not set # # Device Drivers @@ -209,6 +217,11 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y # CONFIG_FW_LOADER is not set # CONFIG_DEBUG_DRIVER is not set +# +# Connector - unified userspace <-> kernelspace linker +# +# CONFIG_CONNECTOR is not set + # # Memory Technology Devices (MTD) # @@ -240,7 +253,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=8192 # CONFIG_BLK_DEV_INITRD is not set -CONFIG_INITRAMFS_SOURCE="" # CONFIG_CDROM_PKTCDVD is not set # @@ -313,6 +325,7 @@ CONFIG_IDEDMA_AUTO=y # # SCSI device support # +# CONFIG_RAID_ATTRS is not set # CONFIG_SCSI is not set # @@ -353,6 +366,11 @@ CONFIG_NETDEVICES=y # # CONFIG_ARCNET is not set +# +# PHY device support +# +# CONFIG_PHYLIB is not set + # # Ethernet (10 or 100Mbit) # @@ -360,6 +378,7 @@ CONFIG_NET_ETHERNET=y CONFIG_MII=y # CONFIG_HAPPYMEAL is not set # CONFIG_SUNGEM is not set +# CONFIG_CASSINI is not set # CONFIG_NET_VENDOR_3COM is not set # @@ -398,6 +417,7 @@ CONFIG_E1000=y # CONFIG_HAMACHI is not set # CONFIG_YELLOWFIN is not set # CONFIG_R8169 is not set +# CONFIG_SIS190 is not set # CONFIG_SKGE is not set # CONFIG_SK98LIN is not set # CONFIG_VIA_VELOCITY is not set @@ -408,6 +428,7 @@ CONFIG_E1000=y # # Ethernet (10000 Mbit) # +# CONFIG_CHELSIO_T1 is not set # CONFIG_IXGB is not set # CONFIG_S2IO is not set @@ -553,7 +574,6 @@ CONFIG_I2C_AMD8111=y # CONFIG_I2C_I801 is not set # CONFIG_I2C_I810 is not set # CONFIG_I2C_PIIX4 is not set -# CONFIG_I2C_ISA is not set # CONFIG_I2C_NFORCE2 is not set # CONFIG_I2C_PARPORT_LIGHT is not set # CONFIG_I2C_PROSAVAGE is not set @@ -567,7 +587,6 @@ CONFIG_I2C_AMD8111=y # CONFIG_I2C_VIAPRO is not set # CONFIG_I2C_VOODOO3 is not set # CONFIG_I2C_PCA_ISA is not set -# CONFIG_I2C_SENSOR is not set # # Miscellaneous I2C Chip support @@ -594,11 +613,16 @@ CONFIG_I2C_AMD8111=y # Hardware Monitoring support # # CONFIG_HWMON is not set +# CONFIG_HWMON_VID is not set # # Misc devices # +# +# Multimedia Capabilities Port drivers +# + # # Multimedia devices # @@ -681,9 +705,11 @@ CONFIG_USB_HIDINPUT=y # CONFIG_USB_MTOUCH is not set # CONFIG_USB_ITMTOUCH is not set # CONFIG_USB_EGALAX is not set +# CONFIG_USB_YEALINK is not set # CONFIG_USB_XPAD is not set # CONFIG_USB_ATI_REMOTE is not set # CONFIG_USB_KEYSPAN_REMOTE is not set +# CONFIG_USB_APPLETOUCH is not set # # USB Imaging devices @@ -814,10 +840,6 @@ CONFIG_JBD=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set CONFIG_FS_POSIX_ACL=y - -# -# XFS support -# # CONFIG_XFS_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_ROMFS_FS is not set @@ -826,6 +848,7 @@ CONFIG_INOTIFY=y CONFIG_DNOTIFY=y # CONFIG_AUTOFS_FS is not set # CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set # # CD-ROM/DVD Filesystems @@ -849,14 +872,11 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y CONFIG_SYSFS=y -CONFIG_DEVPTS_FS_XATTR=y -# CONFIG_DEVPTS_FS_SECURITY is not set CONFIG_TMPFS=y -CONFIG_TMPFS_XATTR=y -CONFIG_TMPFS_SECURITY=y CONFIG_HUGETLBFS=y CONFIG_HUGETLB_PAGE=y CONFIG_RAMFS=y +# CONFIG_RELAYFS_FS is not set # # Miscellaneous filesystems @@ -898,6 +918,7 @@ CONFIG_RPCSEC_GSS_KRB5=y # CONFIG_NCP_FS is not set # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set +# CONFIG_9P_FS is not set # # Partition Types @@ -975,6 +996,7 @@ CONFIG_NLS_UTF8=y CONFIG_DEBUG_KERNEL=y CONFIG_MAGIC_SYSRQ=y CONFIG_LOG_BUF_SHIFT=17 +CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_SCHEDSTATS is not set CONFIG_DEBUG_SLAB=y # CONFIG_DEBUG_SPINLOCK is not set @@ -1034,6 +1056,7 @@ CONFIG_CRYPTO_DES=y # Library routines # CONFIG_CRC_CCITT=y +# CONFIG_CRC16 is not set CONFIG_CRC32=y # CONFIG_LIBCRC32C is not set CONFIG_ZLIB_INFLATE=y diff --git a/arch/ppc64/configs/pSeries_defconfig b/arch/ppc64/configs/pSeries_defconfig index 29f7b80b0efc..9f09dff9e11a 100644 --- a/arch/ppc64/configs/pSeries_defconfig +++ b/arch/ppc64/configs/pSeries_defconfig @@ -1,17 +1,17 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.13-rc6 -# Mon Aug 8 14:17:07 2005 +# Linux kernel version: 2.6.14-rc4 +# Thu Oct 20 08:32:17 2005 # CONFIG_64BIT=y CONFIG_MMU=y CONFIG_RWSEM_XCHGADD_ALGORITHM=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_ISA_DMA=y -CONFIG_HAVE_DEC_LOCK=y CONFIG_EARLY_PRINTK=y CONFIG_COMPAT=y CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y CONFIG_FORCE_MAX_ZONEORDER=13 # @@ -26,6 +26,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32 # General setup # CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -38,6 +39,7 @@ CONFIG_KOBJECT_UEVENT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_CPUSETS=y +CONFIG_INITRAMFS_SOURCE="" # CONFIG_EMBEDDED is not set CONFIG_KALLSYMS=y CONFIG_KALLSYMS_ALL=y @@ -104,6 +106,7 @@ CONFIG_DISCONTIGMEM_MANUAL=y CONFIG_DISCONTIGMEM=y CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_NEED_MULTIPLE_NODES=y +# CONFIG_SPARSEMEM_STATIC is not set CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y CONFIG_NODES_SPAN_OTHER_NODES=y CONFIG_NUMA=y @@ -124,19 +127,20 @@ CONFIG_RTAS_FLASH=m CONFIG_SCANLOG=m CONFIG_LPARCFG=y CONFIG_SECCOMP=y +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set +CONFIG_HOTPLUG_CPU=y +CONFIG_PROC_DEVICETREE=y +# CONFIG_CMDLINE_BOOL is not set CONFIG_ISA_DMA_API=y # -# General setup +# Bus Options # CONFIG_PCI=y CONFIG_PCI_DOMAINS=y -CONFIG_BINFMT_ELF=y -# CONFIG_BINFMT_MISC is not set CONFIG_PCI_LEGACY_PROC=y -CONFIG_PCI_NAMES=y # CONFIG_PCI_DEBUG is not set -CONFIG_HOTPLUG_CPU=y # # PCCARD (PCMCIA/CardBus) support @@ -152,8 +156,6 @@ CONFIG_HOTPLUG_PCI=m # CONFIG_HOTPLUG_PCI_SHPC is not set CONFIG_HOTPLUG_PCI_RPA=m CONFIG_HOTPLUG_PCI_RPA_DLPAR=m -CONFIG_PROC_DEVICETREE=y -# CONFIG_CMDLINE_BOOL is not set # # Networking @@ -183,8 +185,8 @@ CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m CONFIG_INET_TUNNEL=y -CONFIG_IP_TCPDIAG=m -# CONFIG_IP_TCPDIAG_IPV6 is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_BIC=y @@ -195,6 +197,9 @@ CONFIG_TCP_CONG_BIC=y # CONFIG_IPV6 is not set CONFIG_NETFILTER=y # CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m # # IP: Netfilter Configuration @@ -202,11 +207,15 @@ CONFIG_NETFILTER=y CONFIG_IP_NF_CONNTRACK=m CONFIG_IP_NF_CT_ACCT=y CONFIG_IP_NF_CONNTRACK_MARK=y +CONFIG_IP_NF_CONNTRACK_EVENTS=y +CONFIG_IP_NF_CONNTRACK_NETLINK=m CONFIG_IP_NF_CT_PROTO_SCTP=m CONFIG_IP_NF_FTP=m CONFIG_IP_NF_IRC=m +# CONFIG_IP_NF_NETBIOS_NS is not set CONFIG_IP_NF_TFTP=m CONFIG_IP_NF_AMANDA=m +# CONFIG_IP_NF_PPTP is not set CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_LIMIT=m @@ -230,14 +239,18 @@ CONFIG_IP_NF_MATCH_OWNER=m CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_REALM=m CONFIG_IP_NF_MATCH_SCTP=m +# CONFIG_IP_NF_MATCH_DCCP is not set CONFIG_IP_NF_MATCH_COMMENT=m CONFIG_IP_NF_MATCH_CONNMARK=m +CONFIG_IP_NF_MATCH_CONNBYTES=m CONFIG_IP_NF_MATCH_HASHLIMIT=m +CONFIG_IP_NF_MATCH_STRING=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_IP_NF_TARGET_TCPMSS=m +CONFIG_IP_NF_TARGET_NFQUEUE=m CONFIG_IP_NF_NAT=m CONFIG_IP_NF_NAT_NEEDED=y CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -255,6 +268,7 @@ CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_DSCP=m CONFIG_IP_NF_TARGET_MARK=m CONFIG_IP_NF_TARGET_CLASSIFY=m +CONFIG_IP_NF_TARGET_TTL=m CONFIG_IP_NF_TARGET_CONNMARK=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_RAW=m @@ -263,6 +277,11 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m +# +# DCCP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_DCCP is not set + # # SCTP Configuration (EXPERIMENTAL) # @@ -290,6 +309,7 @@ CONFIG_NET_CLS_ROUTE=y # CONFIG_HAMRADIO is not set # CONFIG_IRDA is not set # CONFIG_BT is not set +# CONFIG_IEEE80211 is not set # # Device Drivers @@ -303,6 +323,11 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y CONFIG_FW_LOADER=y # CONFIG_DEBUG_DRIVER is not set +# +# Connector - unified userspace <-> kernelspace linker +# +# CONFIG_CONNECTOR is not set + # # Memory Technology Devices (MTD) # @@ -342,7 +367,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=65536 CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" # CONFIG_CDROM_PKTCDVD is not set # @@ -416,6 +440,7 @@ CONFIG_IDEDMA_AUTO=y # # SCSI device support # +# CONFIG_RAID_ATTRS is not set CONFIG_SCSI=y CONFIG_SCSI_PROC_FS=y @@ -443,6 +468,7 @@ CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SPI_ATTRS=y CONFIG_SCSI_FC_ATTRS=y CONFIG_SCSI_ISCSI_ATTRS=m +# CONFIG_SCSI_SAS_ATTRS is not set # # SCSI low-level drivers @@ -456,6 +482,7 @@ CONFIG_SCSI_ISCSI_ATTRS=m # CONFIG_SCSI_AIC79XX is not set # CONFIG_MEGARAID_NEWGEN is not set # CONFIG_MEGARAID_LEGACY is not set +# CONFIG_MEGARAID_SAS is not set # CONFIG_SCSI_SATA is not set # CONFIG_SCSI_BUSLOGIC is not set # CONFIG_SCSI_DMX3191D is not set @@ -517,6 +544,7 @@ CONFIG_DM_MULTIPATH_EMC=m # CONFIG_FUSION is not set # CONFIG_FUSION_SPI is not set # CONFIG_FUSION_FC is not set +# CONFIG_FUSION_SAS is not set # # IEEE 1394 (FireWire) support @@ -546,6 +574,11 @@ CONFIG_TUN=m # # CONFIG_ARCNET is not set +# +# PHY device support +# +# CONFIG_PHYLIB is not set + # # Ethernet (10 or 100Mbit) # @@ -553,6 +586,7 @@ CONFIG_NET_ETHERNET=y CONFIG_MII=y # CONFIG_HAPPYMEAL is not set # CONFIG_SUNGEM is not set +# CONFIG_CASSINI is not set CONFIG_NET_VENDOR_3COM=y CONFIG_VORTEX=y # CONFIG_TYPHOON is not set @@ -581,6 +615,7 @@ CONFIG_E100=y # CONFIG_EPIC100 is not set # CONFIG_SUNDANCE is not set # CONFIG_VIA_RHINE is not set +# CONFIG_NET_POCKET is not set # # Ethernet (1000 Mbit) @@ -594,6 +629,7 @@ CONFIG_E1000=y # CONFIG_HAMACHI is not set # CONFIG_YELLOWFIN is not set # CONFIG_R8169 is not set +# CONFIG_SIS190 is not set # CONFIG_SKGE is not set # CONFIG_SK98LIN is not set # CONFIG_VIA_VELOCITY is not set @@ -604,6 +640,7 @@ CONFIG_TIGON3=y # # Ethernet (10000 Mbit) # +# CONFIG_CHELSIO_T1 is not set CONFIG_IXGB=m # CONFIG_IXGB_NAPI is not set CONFIG_S2IO=m @@ -789,7 +826,6 @@ CONFIG_I2C_ALGOBIT=y # CONFIG_I2C_I801 is not set # CONFIG_I2C_I810 is not set # CONFIG_I2C_PIIX4 is not set -# CONFIG_I2C_ISA is not set # CONFIG_I2C_NFORCE2 is not set # CONFIG_I2C_PARPORT is not set # CONFIG_I2C_PARPORT_LIGHT is not set @@ -804,7 +840,6 @@ CONFIG_I2C_ALGOBIT=y # CONFIG_I2C_VIAPRO is not set # CONFIG_I2C_VOODOO3 is not set # CONFIG_I2C_PCA_ISA is not set -# CONFIG_I2C_SENSOR is not set # # Miscellaneous I2C Chip support @@ -831,11 +866,16 @@ CONFIG_I2C_ALGOBIT=y # Hardware Monitoring support # # CONFIG_HWMON is not set +# CONFIG_HWMON_VID is not set # # Misc devices # +# +# Multimedia Capabilities Port drivers +# + # # Multimedia devices # @@ -885,6 +925,7 @@ CONFIG_FB_RADEON_I2C=y # CONFIG_FB_KYRO is not set # CONFIG_FB_3DFX is not set # CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_CYBLA is not set # CONFIG_FB_TRIDENT is not set # CONFIG_FB_S1D13XXX is not set # CONFIG_FB_VIRTUAL is not set @@ -982,9 +1023,11 @@ CONFIG_USB_HIDDEV=y # CONFIG_USB_MTOUCH is not set # CONFIG_USB_ITMTOUCH is not set # CONFIG_USB_EGALAX is not set +# CONFIG_USB_YEALINK is not set # CONFIG_USB_XPAD is not set # CONFIG_USB_ATI_REMOTE is not set # CONFIG_USB_KEYSPAN_REMOTE is not set +# CONFIG_USB_APPLETOUCH is not set # # USB Imaging devices @@ -1057,7 +1100,8 @@ CONFIG_USB_MON=y # InfiniBand support # CONFIG_INFINIBAND=m -CONFIG_INFINIBAND_USER_VERBS=m +# CONFIG_INFINIBAND_USER_MAD is not set +# CONFIG_INFINIBAND_USER_ACCESS is not set CONFIG_INFINIBAND_MTHCA=m # CONFIG_INFINIBAND_MTHCA_DEBUG is not set CONFIG_INFINIBAND_IPOIB=m @@ -1095,16 +1139,12 @@ CONFIG_JFS_SECURITY=y # CONFIG_JFS_DEBUG is not set # CONFIG_JFS_STATISTICS is not set CONFIG_FS_POSIX_ACL=y - -# -# XFS support -# CONFIG_XFS_FS=m CONFIG_XFS_EXPORT=y -# CONFIG_XFS_RT is not set # CONFIG_XFS_QUOTA is not set CONFIG_XFS_SECURITY=y CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set # CONFIG_MINIX_FS is not set # CONFIG_ROMFS_FS is not set CONFIG_INOTIFY=y @@ -1112,6 +1152,7 @@ CONFIG_INOTIFY=y CONFIG_DNOTIFY=y CONFIG_AUTOFS_FS=m # CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set # # CD-ROM/DVD Filesystems @@ -1139,14 +1180,11 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y CONFIG_SYSFS=y -CONFIG_DEVPTS_FS_XATTR=y -CONFIG_DEVPTS_FS_SECURITY=y CONFIG_TMPFS=y -CONFIG_TMPFS_XATTR=y -CONFIG_TMPFS_SECURITY=y CONFIG_HUGETLBFS=y CONFIG_HUGETLB_PAGE=y CONFIG_RAMFS=y +# CONFIG_RELAYFS_FS is not set # # Miscellaneous filesystems @@ -1197,6 +1235,7 @@ CONFIG_CIFS_POSIX=y # CONFIG_NCP_FS is not set # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set +# CONFIG_9P_FS is not set # # Partition Types @@ -1261,6 +1300,7 @@ CONFIG_OPROFILE=y CONFIG_DEBUG_KERNEL=y CONFIG_MAGIC_SYSRQ=y CONFIG_LOG_BUF_SHIFT=17 +CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_SCHEDSTATS is not set # CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_SPINLOCK is not set @@ -1320,7 +1360,12 @@ CONFIG_CRYPTO_TEST=m # Library routines # CONFIG_CRC_CCITT=m +# CONFIG_CRC16 is not set CONFIG_CRC32=y CONFIG_LIBCRC32C=m CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=m +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m diff --git a/arch/ppc64/defconfig b/arch/ppc64/defconfig index 7cb4750bb7a9..37c157c93cef 100644 --- a/arch/ppc64/defconfig +++ b/arch/ppc64/defconfig @@ -1,17 +1,17 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.13-rc6 -# Mon Aug 8 14:16:54 2005 +# Linux kernel version: 2.6.14-rc4 +# Thu Oct 20 08:28:33 2005 # CONFIG_64BIT=y CONFIG_MMU=y CONFIG_RWSEM_XCHGADD_ALGORITHM=y CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_ISA_DMA=y -CONFIG_HAVE_DEC_LOCK=y CONFIG_EARLY_PRINTK=y CONFIG_COMPAT=y CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y CONFIG_FORCE_MAX_ZONEORDER=13 # @@ -26,6 +26,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32 # General setup # CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y @@ -37,6 +38,7 @@ CONFIG_KOBJECT_UEVENT=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_CPUSETS=y +CONFIG_INITRAMFS_SOURCE="" # CONFIG_EMBEDDED is not set CONFIG_KALLSYMS=y # CONFIG_KALLSYMS_ALL is not set @@ -106,6 +108,7 @@ CONFIG_DISCONTIGMEM_MANUAL=y CONFIG_DISCONTIGMEM=y CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_NEED_MULTIPLE_NODES=y +# CONFIG_SPARSEMEM_STATIC is not set CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y CONFIG_NODES_SPAN_OTHER_NODES=y # CONFIG_NUMA is not set @@ -126,19 +129,20 @@ CONFIG_RTAS_FLASH=m CONFIG_SCANLOG=m CONFIG_LPARCFG=y CONFIG_SECCOMP=y +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_MISC=m +CONFIG_HOTPLUG_CPU=y +CONFIG_PROC_DEVICETREE=y +# CONFIG_CMDLINE_BOOL is not set CONFIG_ISA_DMA_API=y # -# General setup +# Bus Options # CONFIG_PCI=y CONFIG_PCI_DOMAINS=y -CONFIG_BINFMT_ELF=y -CONFIG_BINFMT_MISC=m # CONFIG_PCI_LEGACY_PROC is not set -# CONFIG_PCI_NAMES is not set # CONFIG_PCI_DEBUG is not set -CONFIG_HOTPLUG_CPU=y # # PCCARD (PCMCIA/CardBus) support @@ -154,8 +158,6 @@ CONFIG_HOTPLUG_PCI=m # CONFIG_HOTPLUG_PCI_SHPC is not set CONFIG_HOTPLUG_PCI_RPA=m CONFIG_HOTPLUG_PCI_RPA_DLPAR=m -CONFIG_PROC_DEVICETREE=y -# CONFIG_CMDLINE_BOOL is not set # # Networking @@ -185,8 +187,8 @@ CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m CONFIG_INET_TUNNEL=y -# CONFIG_IP_TCPDIAG is not set -# CONFIG_IP_TCPDIAG_IPV6 is not set +CONFIG_INET_DIAG=y +CONFIG_INET_TCP_DIAG=y # CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_BIC=y @@ -197,6 +199,9 @@ CONFIG_TCP_CONG_BIC=y # CONFIG_IPV6 is not set CONFIG_NETFILTER=y # CONFIG_NETFILTER_DEBUG is not set +CONFIG_NETFILTER_NETLINK=y +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m # # IP: Netfilter Configuration @@ -204,11 +209,15 @@ CONFIG_NETFILTER=y CONFIG_IP_NF_CONNTRACK=m CONFIG_IP_NF_CT_ACCT=y CONFIG_IP_NF_CONNTRACK_MARK=y +CONFIG_IP_NF_CONNTRACK_EVENTS=y +CONFIG_IP_NF_CONNTRACK_NETLINK=m CONFIG_IP_NF_CT_PROTO_SCTP=m CONFIG_IP_NF_FTP=m CONFIG_IP_NF_IRC=m +# CONFIG_IP_NF_NETBIOS_NS is not set CONFIG_IP_NF_TFTP=m CONFIG_IP_NF_AMANDA=m +# CONFIG_IP_NF_PPTP is not set CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_LIMIT=m @@ -232,14 +241,18 @@ CONFIG_IP_NF_MATCH_OWNER=m CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_REALM=m CONFIG_IP_NF_MATCH_SCTP=m +CONFIG_IP_NF_MATCH_DCCP=m CONFIG_IP_NF_MATCH_COMMENT=m CONFIG_IP_NF_MATCH_CONNMARK=m +CONFIG_IP_NF_MATCH_CONNBYTES=m CONFIG_IP_NF_MATCH_HASHLIMIT=m +CONFIG_IP_NF_MATCH_STRING=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m CONFIG_IP_NF_TARGET_TCPMSS=m +CONFIG_IP_NF_TARGET_NFQUEUE=m CONFIG_IP_NF_NAT=m CONFIG_IP_NF_NAT_NEEDED=y CONFIG_IP_NF_TARGET_MASQUERADE=m @@ -257,6 +270,7 @@ CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_DSCP=m CONFIG_IP_NF_TARGET_MARK=m CONFIG_IP_NF_TARGET_CLASSIFY=m +CONFIG_IP_NF_TARGET_TTL=m CONFIG_IP_NF_TARGET_CONNMARK=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_RAW=m @@ -265,6 +279,11 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m +# +# DCCP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_DCCP is not set + # # SCTP Configuration (EXPERIMENTAL) # @@ -292,6 +311,7 @@ CONFIG_NET_CLS_ROUTE=y # CONFIG_HAMRADIO is not set # CONFIG_IRDA is not set # CONFIG_BT is not set +# CONFIG_IEEE80211 is not set # # Device Drivers @@ -305,6 +325,11 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y CONFIG_FW_LOADER=y # CONFIG_DEBUG_DRIVER is not set +# +# Connector - unified userspace <-> kernelspace linker +# +# CONFIG_CONNECTOR is not set + # # Memory Technology Devices (MTD) # @@ -344,7 +369,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=65536 CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" # CONFIG_CDROM_PKTCDVD is not set # @@ -422,6 +446,7 @@ CONFIG_IDEDMA_AUTO=y # # SCSI device support # +# CONFIG_RAID_ATTRS is not set CONFIG_SCSI=y CONFIG_SCSI_PROC_FS=y @@ -449,6 +474,7 @@ CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_SPI_ATTRS=y CONFIG_SCSI_FC_ATTRS=y CONFIG_SCSI_ISCSI_ATTRS=m +# CONFIG_SCSI_SAS_ATTRS is not set # # SCSI low-level drivers @@ -462,10 +488,12 @@ CONFIG_SCSI_ISCSI_ATTRS=m # CONFIG_SCSI_AIC79XX is not set # CONFIG_MEGARAID_NEWGEN is not set # CONFIG_MEGARAID_LEGACY is not set +# CONFIG_MEGARAID_SAS is not set CONFIG_SCSI_SATA=y # CONFIG_SCSI_SATA_AHCI is not set CONFIG_SCSI_SATA_SVW=y # CONFIG_SCSI_ATA_PIIX is not set +# CONFIG_SCSI_SATA_MV is not set # CONFIG_SCSI_SATA_NV is not set # CONFIG_SCSI_SATA_PROMISE is not set # CONFIG_SCSI_SATA_QSTOR is not set @@ -535,6 +563,7 @@ CONFIG_DM_MULTIPATH_EMC=m # CONFIG_FUSION is not set # CONFIG_FUSION_SPI is not set # CONFIG_FUSION_FC is not set +# CONFIG_FUSION_SAS is not set # # IEEE 1394 (FireWire) support @@ -578,7 +607,6 @@ CONFIG_IEEE1394_AMDTP=m # CONFIG_ADB_PMU=y CONFIG_PMAC_SMU=y -# CONFIG_PMAC_BACKLIGHT is not set CONFIG_THERM_PM72=y # @@ -595,6 +623,11 @@ CONFIG_TUN=m # # CONFIG_ARCNET is not set +# +# PHY device support +# +# CONFIG_PHYLIB is not set + # # Ethernet (10 or 100Mbit) # @@ -602,6 +635,7 @@ CONFIG_NET_ETHERNET=y CONFIG_MII=y # CONFIG_HAPPYMEAL is not set CONFIG_SUNGEM=y +# CONFIG_CASSINI is not set CONFIG_NET_VENDOR_3COM=y CONFIG_VORTEX=y # CONFIG_TYPHOON is not set @@ -630,6 +664,7 @@ CONFIG_E100=y # CONFIG_EPIC100 is not set # CONFIG_SUNDANCE is not set # CONFIG_VIA_RHINE is not set +# CONFIG_NET_POCKET is not set # # Ethernet (1000 Mbit) @@ -643,16 +678,19 @@ CONFIG_E1000=y # CONFIG_HAMACHI is not set # CONFIG_YELLOWFIN is not set # CONFIG_R8169 is not set +# CONFIG_SIS190 is not set # CONFIG_SKGE is not set # CONFIG_SK98LIN is not set # CONFIG_VIA_VELOCITY is not set CONFIG_TIGON3=y # CONFIG_BNX2 is not set +# CONFIG_SPIDER_NET is not set # CONFIG_MV643XX_ETH is not set # # Ethernet (10000 Mbit) # +# CONFIG_CHELSIO_T1 is not set CONFIG_IXGB=m # CONFIG_IXGB_NAPI is not set # CONFIG_S2IO is not set @@ -838,8 +876,8 @@ CONFIG_I2C_AMD8111=y # CONFIG_I2C_I801 is not set # CONFIG_I2C_I810 is not set # CONFIG_I2C_PIIX4 is not set -# CONFIG_I2C_ISA is not set CONFIG_I2C_KEYWEST=y +CONFIG_I2C_PMAC_SMU=y # CONFIG_I2C_NFORCE2 is not set # CONFIG_I2C_PARPORT is not set # CONFIG_I2C_PARPORT_LIGHT is not set @@ -854,7 +892,6 @@ CONFIG_I2C_KEYWEST=y # CONFIG_I2C_VIAPRO is not set # CONFIG_I2C_VOODOO3 is not set # CONFIG_I2C_PCA_ISA is not set -# CONFIG_I2C_SENSOR is not set # # Miscellaneous I2C Chip support @@ -881,11 +918,16 @@ CONFIG_I2C_KEYWEST=y # Hardware Monitoring support # # CONFIG_HWMON is not set +# CONFIG_HWMON_VID is not set # # Misc devices # +# +# Multimedia Capabilities Port drivers +# + # # Multimedia devices # @@ -939,6 +981,7 @@ CONFIG_FB_RADEON_I2C=y # CONFIG_FB_KYRO is not set # CONFIG_FB_3DFX is not set # CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_CYBLA is not set # CONFIG_FB_TRIDENT is not set # CONFIG_FB_S1D13XXX is not set # CONFIG_FB_VIRTUAL is not set @@ -1020,6 +1063,7 @@ CONFIG_USB_STORAGE=m # CONFIG_USB_STORAGE_SDDR09 is not set # CONFIG_USB_STORAGE_SDDR55 is not set # CONFIG_USB_STORAGE_JUMPSHOT is not set +# CONFIG_USB_STORAGE_ONETOUCH is not set # # USB Input Devices @@ -1036,9 +1080,11 @@ CONFIG_USB_HIDDEV=y # CONFIG_USB_MTOUCH is not set # CONFIG_USB_ITMTOUCH is not set # CONFIG_USB_EGALAX is not set +# CONFIG_USB_YEALINK is not set # CONFIG_USB_XPAD is not set # CONFIG_USB_ATI_REMOTE is not set # CONFIG_USB_KEYSPAN_REMOTE is not set +# CONFIG_USB_APPLETOUCH is not set # # USB Imaging devices @@ -1111,7 +1157,8 @@ CONFIG_USB_PEGASUS=y # InfiniBand support # CONFIG_INFINIBAND=m -CONFIG_INFINIBAND_USER_VERBS=m +# CONFIG_INFINIBAND_USER_MAD is not set +# CONFIG_INFINIBAND_USER_ACCESS is not set CONFIG_INFINIBAND_MTHCA=m # CONFIG_INFINIBAND_MTHCA_DEBUG is not set CONFIG_INFINIBAND_IPOIB=m @@ -1149,16 +1196,12 @@ CONFIG_JFS_SECURITY=y # CONFIG_JFS_DEBUG is not set # CONFIG_JFS_STATISTICS is not set CONFIG_FS_POSIX_ACL=y - -# -# XFS support -# CONFIG_XFS_FS=m CONFIG_XFS_EXPORT=y -# CONFIG_XFS_RT is not set # CONFIG_XFS_QUOTA is not set CONFIG_XFS_SECURITY=y CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set # CONFIG_MINIX_FS is not set # CONFIG_ROMFS_FS is not set CONFIG_INOTIFY=y @@ -1166,6 +1209,7 @@ CONFIG_INOTIFY=y CONFIG_DNOTIFY=y CONFIG_AUTOFS_FS=y # CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set # # CD-ROM/DVD Filesystems @@ -1192,14 +1236,11 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" CONFIG_PROC_FS=y CONFIG_PROC_KCORE=y CONFIG_SYSFS=y -CONFIG_DEVPTS_FS_XATTR=y -CONFIG_DEVPTS_FS_SECURITY=y CONFIG_TMPFS=y -CONFIG_TMPFS_XATTR=y -CONFIG_TMPFS_SECURITY=y CONFIG_HUGETLBFS=y CONFIG_HUGETLB_PAGE=y CONFIG_RAMFS=y +# CONFIG_RELAYFS_FS is not set # # Miscellaneous filesystems @@ -1250,6 +1291,7 @@ CONFIG_CIFS_POSIX=y # CONFIG_NCP_FS is not set # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set +# CONFIG_9P_FS is not set # # Partition Types @@ -1328,6 +1370,7 @@ CONFIG_OPROFILE=y CONFIG_DEBUG_KERNEL=y CONFIG_MAGIC_SYSRQ=y CONFIG_LOG_BUF_SHIFT=17 +CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_SCHEDSTATS is not set # CONFIG_DEBUG_SLAB is not set # CONFIG_DEBUG_SPINLOCK is not set @@ -1387,7 +1430,12 @@ CONFIG_CRYPTO_TEST=m # Library routines # CONFIG_CRC_CCITT=m +# CONFIG_CRC16 is not set CONFIG_CRC32=y CONFIG_LIBCRC32C=m CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=m +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m diff --git a/arch/ppc64/kernel/bpa_iommu.c b/arch/ppc64/kernel/bpa_iommu.c index f33a7bccb0d7..5f2460090e03 100644 --- a/arch/ppc64/kernel/bpa_iommu.c +++ b/arch/ppc64/kernel/bpa_iommu.c @@ -99,7 +99,11 @@ get_iost_entry(unsigned long iopt_base, unsigned long io_address, unsigned page_ break; default: /* not a known compile time constant */ - BUILD_BUG_ON(1); + { + /* BUILD_BUG_ON() is not usable here */ + extern void __get_iost_entry_bad_page_size(void); + __get_iost_entry_bad_page_size(); + } break; } @@ -306,7 +310,7 @@ static void bpa_map_iommu(void) static void *bpa_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, unsigned int __nocast flag) + dma_addr_t *dma_handle, gfp_t flag) { void *ret; diff --git a/arch/ppc64/kernel/dma.c b/arch/ppc64/kernel/dma.c index 4da8e31b2b61..7c3419656ccc 100644 --- a/arch/ppc64/kernel/dma.c +++ b/arch/ppc64/kernel/dma.c @@ -53,7 +53,7 @@ int dma_set_mask(struct device *dev, u64 dma_mask) EXPORT_SYMBOL(dma_set_mask); void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, unsigned int __nocast flag) + dma_addr_t *dma_handle, gfp_t flag) { struct dma_mapping_ops *dma_ops = get_dma_ops(dev); diff --git a/arch/ppc64/kernel/iSeries_htab.c b/arch/ppc64/kernel/iSeries_htab.c index 2192055a90a0..073b76661747 100644 --- a/arch/ppc64/kernel/iSeries_htab.c +++ b/arch/ppc64/kernel/iSeries_htab.c @@ -66,7 +66,7 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va, } if (slot < 0) { /* MSB set means secondary group */ - vflags |= HPTE_V_VALID; + vflags |= HPTE_V_SECONDARY; secondary = 1; slot &= 0x7fffffffffffffff; } diff --git a/arch/ppc64/kernel/iommu.c b/arch/ppc64/kernel/iommu.c index 9032b6bfe036..4d9b4388918b 100644 --- a/arch/ppc64/kernel/iommu.c +++ b/arch/ppc64/kernel/iommu.c @@ -519,7 +519,7 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, * to the dma address (mapping) of the first page. */ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, - dma_addr_t *dma_handle, unsigned int __nocast flag) + dma_addr_t *dma_handle, gfp_t flag) { void *ret = NULL; dma_addr_t mapping; diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c index 7e80d49c589a..9c6facc24f70 100644 --- a/arch/ppc64/kernel/kprobes.c +++ b/arch/ppc64/kernel/kprobes.c @@ -59,9 +59,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) /* insn must be on a special executable page on ppc64 */ if (!ret) { - up(&kprobe_mutex); - p->ainsn.insn = get_insn_slot(); down(&kprobe_mutex); + p->ainsn.insn = get_insn_slot(); + up(&kprobe_mutex); if (!p->ainsn.insn) ret = -ENOMEM; } @@ -90,9 +90,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - up(&kprobe_mutex); - free_insn_slot(p->ainsn.insn); down(&kprobe_mutex); + free_insn_slot(p->ainsn.insn); + up(&kprobe_mutex); } static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) diff --git a/arch/ppc64/kernel/machine_kexec.c b/arch/ppc64/kernel/machine_kexec.c index 4775f12a013c..bf7cc4f8210f 100644 --- a/arch/ppc64/kernel/machine_kexec.c +++ b/arch/ppc64/kernel/machine_kexec.c @@ -205,6 +205,7 @@ static void kexec_prepare_cpus(void) continue; while (paca[i].hw_cpu_id != -1) { + barrier(); if (!cpu_possible(i)) { printk("kexec: cpu %d hw_cpu_id %d is not" " possible, ignoring\n", diff --git a/arch/ppc64/kernel/module.c b/arch/ppc64/kernel/module.c index c683bf88e690..928b8581fcb0 100644 --- a/arch/ppc64/kernel/module.c +++ b/arch/ppc64/kernel/module.c @@ -341,6 +341,19 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, *(unsigned long *)location = my_r2(sechdrs, me); break; + case R_PPC64_TOC16: + /* Subtact TOC pointer */ + value -= my_r2(sechdrs, me); + if (value + 0x8000 > 0xffff) { + printk("%s: bad TOC16 relocation (%lu)\n", + me->name, value); + return -ENOEXEC; + } + *((uint16_t *) location) + = (*((uint16_t *) location) & ~0xffff) + | (value & 0xffff); + break; + case R_PPC64_TOC16_DS: /* Subtact TOC pointer */ value -= my_r2(sechdrs, me); diff --git a/arch/ppc64/kernel/mpic.c b/arch/ppc64/kernel/mpic.c index cc262a05ddb4..5f5bc73754d9 100644 --- a/arch/ppc64/kernel/mpic.c +++ b/arch/ppc64/kernel/mpic.c @@ -506,8 +506,8 @@ struct mpic * __init mpic_alloc(unsigned long phys_addr, mpic->senses_count = senses_count; /* Map the global registers */ - mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000); - mpic->tmregs = mpic->gregs + (MPIC_TIMER_BASE >> 2); + mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x2000); + mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2); BUG_ON(mpic->gregs == NULL); /* Reset */ diff --git a/arch/ppc64/kernel/of_device.c b/arch/ppc64/kernel/of_device.c index da580812ddfe..9f200f0f2ad5 100644 --- a/arch/ppc64/kernel/of_device.c +++ b/arch/ppc64/kernel/of_device.c @@ -233,7 +233,9 @@ void of_device_unregister(struct of_device *ofdev) device_unregister(&ofdev->dev); } -struct of_device* of_platform_device_create(struct device_node *np, const char *bus_id) +struct of_device* of_platform_device_create(struct device_node *np, + const char *bus_id, + struct device *parent) { struct of_device *dev; @@ -245,7 +247,7 @@ struct of_device* of_platform_device_create(struct device_node *np, const char * dev->node = np; dev->dma_mask = 0xffffffffUL; dev->dev.dma_mask = &dev->dma_mask; - dev->dev.parent = NULL; + dev->dev.parent = parent; dev->dev.bus = &of_platform_bus_type; dev->dev.release = of_release_dev; @@ -259,6 +261,7 @@ struct of_device* of_platform_device_create(struct device_node *np, const char * return dev; } + EXPORT_SYMBOL(of_match_device); EXPORT_SYMBOL(of_platform_bus_type); EXPORT_SYMBOL(of_register_driver); diff --git a/arch/ppc64/kernel/pSeries_iommu.c b/arch/ppc64/kernel/pSeries_iommu.c index 8c6313e7e145..d17f0108a032 100644 --- a/arch/ppc64/kernel/pSeries_iommu.c +++ b/arch/ppc64/kernel/pSeries_iommu.c @@ -364,7 +364,8 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus) while (pci->phb->dma_window_size * children > 0x80000000ul) pci->phb->dma_window_size >>= 1; - DBG("No ISA/IDE, window size is %x\n", pci->phb->dma_window_size); + DBG("No ISA/IDE, window size is 0x%lx\n", + pci->phb->dma_window_size); pci->phb->dma_window_base_cur = 0; return; @@ -388,7 +389,7 @@ static void iommu_bus_setup_pSeries(struct pci_bus *bus) while (pci->phb->dma_window_size * children > 0x70000000ul) pci->phb->dma_window_size >>= 1; - DBG("ISA/IDE, window size is %x\n", pci->phb->dma_window_size); + DBG("ISA/IDE, window size is 0x%lx\n", pci->phb->dma_window_size); } @@ -442,7 +443,7 @@ static void iommu_dev_setup_pSeries(struct pci_dev *dev) struct device_node *dn, *mydn; struct iommu_table *tbl; - DBG("iommu_dev_setup_pSeries, dev %p (%s)\n", dev, dev->pretty_name); + DBG("iommu_dev_setup_pSeries, dev %p (%s)\n", dev, pci_name(dev)); mydn = dn = pci_device_to_OF_node(dev); @@ -469,7 +470,7 @@ static void iommu_dev_setup_pSeries(struct pci_dev *dev) if (dn && dn->data) { PCI_DN(mydn)->iommu_table = PCI_DN(dn)->iommu_table; } else { - DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, dev->pretty_name); + DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, pci_name(dev)); } } @@ -503,7 +504,7 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev) int *dma_window = NULL; struct pci_dn *pci; - DBG("iommu_dev_setup_pSeriesLP, dev %p (%s)\n", dev, dev->pretty_name); + DBG("iommu_dev_setup_pSeriesLP, dev %p (%s)\n", dev, pci_name(dev)); /* dev setup for LPAR is a little tricky, since the device tree might * contain the dma-window properties per-device and not neccesarily @@ -525,9 +526,8 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev) * slots on POWER4 machines. */ if (dma_window == NULL || pdn->parent == NULL) { - /* Fall back to regular (non-LPAR) dev setup */ - DBG("No dma window for device, falling back to regular setup\n"); - iommu_dev_setup_pSeries(dev); + DBG("No dma window for device, linking to parent\n"); + PCI_DN(dn)->iommu_table = PCI_DN(pdn)->iommu_table; return; } else { DBG("Found DMA window, allocating table\n"); diff --git a/arch/ppc64/kernel/pSeries_pci.c b/arch/ppc64/kernel/pSeries_pci.c index 1f5f141fb7a1..928f8febdb3b 100644 --- a/arch/ppc64/kernel/pSeries_pci.c +++ b/arch/ppc64/kernel/pSeries_pci.c @@ -32,7 +32,7 @@ #include "pci.h" -static int __initdata s7a_workaround = -1; +static int __devinitdata s7a_workaround = -1; #if 0 void pcibios_name_device(struct pci_dev *dev) @@ -60,7 +60,7 @@ void pcibios_name_device(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device); #endif -static void __init check_s7a(void) +static void __devinit check_s7a(void) { struct device_node *root; char *model; diff --git a/arch/ppc64/kernel/pci_direct_iommu.c b/arch/ppc64/kernel/pci_direct_iommu.c index b8f7f58824f4..54055c81017a 100644 --- a/arch/ppc64/kernel/pci_direct_iommu.c +++ b/arch/ppc64/kernel/pci_direct_iommu.c @@ -31,7 +31,7 @@ #include "pci.h" static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, unsigned int __nocast flag) + dma_addr_t *dma_handle, gfp_t flag) { void *ret; diff --git a/arch/ppc64/kernel/pci_iommu.c b/arch/ppc64/kernel/pci_iommu.c index 14647e09c9cd..d9e33b7d4203 100644 --- a/arch/ppc64/kernel/pci_iommu.c +++ b/arch/ppc64/kernel/pci_iommu.c @@ -76,7 +76,7 @@ static inline struct iommu_table *devnode_table(struct device *dev) * to the dma address (mapping) of the first page. */ static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, unsigned int __nocast flag) + dma_addr_t *dma_handle, gfp_t flag) { return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle, flag); diff --git a/arch/ppc64/kernel/pmac_setup.c b/arch/ppc64/kernel/pmac_setup.c index 325426c7bed0..fa8121d53b89 100644 --- a/arch/ppc64/kernel/pmac_setup.c +++ b/arch/ppc64/kernel/pmac_setup.c @@ -115,7 +115,7 @@ static void __pmac pmac_show_cpuinfo(struct seq_file *m) /* find motherboard type */ seq_printf(m, "machine\t\t: "); - np = find_devices("device-tree"); + np = of_find_node_by_path("/"); if (np != NULL) { pp = (char *) get_property(np, "model", NULL); if (pp != NULL) @@ -133,6 +133,7 @@ static void __pmac pmac_show_cpuinfo(struct seq_file *m) } seq_printf(m, "\n"); } + of_node_put(np); } else seq_printf(m, "PowerMac\n"); @@ -434,15 +435,23 @@ static int pmac_check_legacy_ioport(unsigned int baseport) static int __init pmac_declare_of_platform_devices(void) { - struct device_node *np; + struct device_node *np, *npp; - np = find_devices("u3"); - if (np) { - for (np = np->child; np != NULL; np = np->sibling) + npp = of_find_node_by_name(NULL, "u3"); + if (npp) { + for (np = NULL; (np = of_get_next_child(npp, np)) != NULL;) { if (strncmp(np->name, "i2c", 3) == 0) { - of_platform_device_create(np, "u3-i2c"); + of_platform_device_create(np, "u3-i2c", NULL); + of_node_put(np); break; } + } + of_node_put(npp); + } + npp = of_find_node_by_type(NULL, "smu"); + if (npp) { + of_platform_device_create(npp, "smu", NULL); + of_node_put(npp); } return 0; diff --git a/arch/ppc64/kernel/pmac_time.c b/arch/ppc64/kernel/pmac_time.c index 3059edb09cc8..41bbb8c59697 100644 --- a/arch/ppc64/kernel/pmac_time.c +++ b/arch/ppc64/kernel/pmac_time.c @@ -84,7 +84,7 @@ void __pmac pmac_get_rtc_time(struct rtc_time *tm) #ifdef CONFIG_PMAC_SMU case SYS_CTRLER_SMU: - smu_get_rtc_time(tm); + smu_get_rtc_time(tm, 1); break; #endif /* CONFIG_PMAC_SMU */ default: @@ -128,7 +128,7 @@ int __pmac pmac_set_rtc_time(struct rtc_time *tm) #ifdef CONFIG_PMAC_SMU case SYS_CTRLER_SMU: - return smu_set_rtc_time(tm); + return smu_set_rtc_time(tm, 1); #endif /* CONFIG_PMAC_SMU */ default: return -ENODEV; diff --git a/arch/ppc64/kernel/ptrace.c b/arch/ppc64/kernel/ptrace.c index 85ed3188a91d..b1c044ca5756 100644 --- a/arch/ppc64/kernel/ptrace.c +++ b/arch/ppc64/kernel/ptrace.c @@ -219,6 +219,7 @@ int sys_ptrace(long request, long pid, long addr, long data) case PTRACE_SET_DEBUGREG: ret = ptrace_set_debugreg(child, addr, data); + break; case PTRACE_DETACH: ret = ptrace_detach(child, data); diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c index 9939c206afa4..b56c6a324e17 100644 --- a/arch/ppc64/kernel/time.c +++ b/arch/ppc64/kernel/time.c @@ -870,7 +870,7 @@ void div128_by_32( unsigned long dividend_high, unsigned long dividend_low, rb = ((ra + b) - (x * divisor)) << 32; y = (rb + c)/divisor; - rc = ((rb + b) - (y * divisor)) << 32; + rc = ((rb + c) - (y * divisor)) << 32; z = (rc + d)/divisor; diff --git a/arch/ppc64/kernel/vdso32/gettimeofday.S b/arch/ppc64/kernel/vdso32/gettimeofday.S index 07f1c1c650c8..e243c1d24af7 100644 --- a/arch/ppc64/kernel/vdso32/gettimeofday.S +++ b/arch/ppc64/kernel/vdso32/gettimeofday.S @@ -109,7 +109,7 @@ __do_get_xsec: lwz r6,(CFG_TB_TO_XS+4)(r9) mulhwu r4,r7,r5 mulhwu r6,r7,r6 - mullw r6,r7,r5 + mullw r0,r7,r5 addc r6,r6,r0 /* At this point, we have the scaled xsec value in r4 + XER:CA diff --git a/arch/ppc64/kernel/vio.c b/arch/ppc64/kernel/vio.c index c90e1dd875ce..0e555b7a6587 100644 --- a/arch/ppc64/kernel/vio.c +++ b/arch/ppc64/kernel/vio.c @@ -218,7 +218,7 @@ static void vio_unmap_sg(struct device *dev, struct scatterlist *sglist, } static void *vio_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, unsigned int __nocast flag) + dma_addr_t *dma_handle, gfp_t flag) { return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size, dma_handle, flag); diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c index 7626bb59954d..bfd385b7713c 100644 --- a/arch/ppc64/mm/hash_native.c +++ b/arch/ppc64/mm/hash_native.c @@ -343,9 +343,7 @@ static void native_flush_hash_range(unsigned long context, hpte_t *hptep; unsigned long hpte_v; struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); - - /* XXX fix for large ptes */ - unsigned long large = 0; + unsigned long large = batch->large; local_irq_save(flags); @@ -407,7 +405,7 @@ static void native_flush_hash_range(unsigned long context, asm volatile("ptesync":::"memory"); for (i = 0; i < j; i++) - __tlbie(batch->vaddr[i], 0); + __tlbie(batch->vaddr[i], large); asm volatile("eieio; tlbsync; ptesync":::"memory"); diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c index 338771ec70d7..0ea0994ed974 100644 --- a/arch/ppc64/mm/hugetlbpage.c +++ b/arch/ppc64/mm/hugetlbpage.c @@ -710,10 +710,13 @@ repeat: hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; slot = ppc_md.hpte_insert(hpte_group, va, prpn, - HPTE_V_LARGE, rflags); + HPTE_V_LARGE | + HPTE_V_SECONDARY, + rflags); if (slot == -1) { if (mftb() & 0x1) - hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; + hpte_group = ((hash & htab_hash_mask) * + HPTES_PER_GROUP)&~0x7UL; ppc_md.hpte_remove(hpte_group); goto repeat; diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c index c2157c9c3acb..be64b157afce 100644 --- a/arch/ppc64/mm/init.c +++ b/arch/ppc64/mm/init.c @@ -799,8 +799,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea, if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp)) local = 1; - __hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep, - 0x300, local); + __hash_page(ea, 0, vsid, ptep, 0x300, local); local_irq_restore(flags); } diff --git a/arch/ppc64/mm/tlb.c b/arch/ppc64/mm/tlb.c index d8a6593a13f0..21fbffb23a43 100644 --- a/arch/ppc64/mm/tlb.c +++ b/arch/ppc64/mm/tlb.c @@ -143,7 +143,8 @@ void hpte_update(struct mm_struct *mm, unsigned long addr, * up scanning and resetting referenced bits then our batch context * will change mid stream. */ - if (unlikely(i != 0 && context != batch->context)) { + if (i != 0 && (context != batch->context || + batch->large != pte_huge(pte))) { flush_tlb_pending(); i = 0; } @@ -151,6 +152,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr, if (i == 0) { batch->context = context; batch->mm = mm; + batch->large = pte_huge(pte); } batch->pte[i] = __pte(pte); batch->addr[i] = addr; diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 7358cdb8441f..4ff6808456ea 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -143,7 +143,7 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) break; case __SI_FAULT >> 16: err |= __get_user(tmp, &from->si_addr); - to->si_addr = (void *)(u64) (tmp & PSW32_ADDR_INSN); + to->si_addr = (void __user *)(u64) (tmp & PSW32_ADDR_INSN); break; case __SI_POLL >> 16: err |= __get_user(to->si_band, &from->si_band); @@ -338,7 +338,7 @@ sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss, err |= __get_user(kss.ss_flags, &uss->ss_flags); if (err) return -EFAULT; - kss.ss_sp = (void *) ss_sp; + kss.ss_sp = (void __user *) ss_sp; } set_fs (KERNEL_DS); @@ -461,7 +461,7 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) goto badframe; err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp); - st.ss_sp = (void *) A((unsigned long)ss_sp); + st.ss_sp = compat_ptr(ss_sp); err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size); err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags); if (err) diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 6a3f5b7473a9..6e0110d71191 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -376,8 +376,8 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); - err |= __put_user(0, &frame->uc.uc_link); - err |= __put_user((void *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); + err |= __put_user(NULL, &frame->uc.uc_link); + err |= __put_user((void __user *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); err |= __put_user(sas_ss_flags(regs->gprs[15]), &frame->uc.uc_stack.ss_flags); err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); diff --git a/arch/sh/boards/renesas/rts7751r2d/mach.c b/arch/sh/boards/renesas/rts7751r2d/mach.c index 1efc18e786d5..610740512d56 100644 --- a/arch/sh/boards/renesas/rts7751r2d/mach.c +++ b/arch/sh/boards/renesas/rts7751r2d/mach.c @@ -23,7 +23,7 @@ extern void init_rts7751r2d_IRQ(void); extern void *rts7751r2d_ioremap(unsigned long, unsigned long); extern int rts7751r2d_irq_demux(int irq); -extern void *voyagergx_consistent_alloc(struct device *, size_t, dma_addr_t *, int); +extern void *voyagergx_consistent_alloc(struct device *, size_t, dma_addr_t *, gfp_t); extern int voyagergx_consistent_free(struct device *, size_t, void *, dma_addr_t); /* diff --git a/arch/sh/cchips/voyagergx/consistent.c b/arch/sh/cchips/voyagergx/consistent.c index 5b92585a38d2..3d9a02c093a3 100644 --- a/arch/sh/cchips/voyagergx/consistent.c +++ b/arch/sh/cchips/voyagergx/consistent.c @@ -31,7 +31,7 @@ static LIST_HEAD(voya_alloc_list); #define OHCI_SRAM_SIZE 0x10000 void *voyagergx_consistent_alloc(struct device *dev, size_t size, - dma_addr_t *handle, int flag) + dma_addr_t *handle, gfp_t flag) { struct list_head *list = &voya_alloc_list; struct voya_alloc_entry *entry; diff --git a/arch/sh/drivers/pci/dma-dreamcast.c b/arch/sh/drivers/pci/dma-dreamcast.c index 83de7ef4e7df..e12418bb1fa5 100644 --- a/arch/sh/drivers/pci/dma-dreamcast.c +++ b/arch/sh/drivers/pci/dma-dreamcast.c @@ -33,7 +33,7 @@ static int gapspci_dma_used = 0; void *dreamcast_consistent_alloc(struct device *dev, size_t size, - dma_addr_t *dma_handle, int flag) + dma_addr_t *dma_handle, gfp_t flag) { unsigned long buf; diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 56a39d69e080..5ecefc02896a 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -39,6 +40,8 @@ struct sh_cpuinfo cpu_data[NR_CPUS]; extern void per_cpu_trap_init(void); cpumask_t cpu_possible_map; +EXPORT_SYMBOL(cpu_possible_map); + cpumask_t cpu_online_map; static atomic_t cpus_booted = ATOMIC_INIT(0); diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index 1f7af0c73cf4..df3a9e452cc5 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c @@ -11,7 +11,7 @@ #include #include -void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle) +void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle) { struct page *page, *end, *free; void *ret; diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index aba05394d30a..6537445dac0e 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -25,62 +25,6 @@ source "init/Kconfig" menu "General machine setup" -config VT - bool - select INPUT - default y - ---help--- - If you say Y here, you will get support for terminal devices with - display and keyboard devices. These are called "virtual" because you - can run several virtual terminals (also called virtual consoles) on - one physical terminal. This is rather useful, for example one - virtual terminal can collect system messages and warnings, another - one can be used for a text-mode user session, and a third could run - an X session, all in parallel. Switching between virtual terminals - is done with certain key combinations, usually Alt-. - - The setterm command ("man setterm") can be used to change the - properties (such as colors or beeping) of a virtual terminal. The - man page console_codes(4) ("man console_codes") contains the special - character sequences that can be used to change those properties - directly. The fonts used on virtual terminals can be changed with - the setfont ("man setfont") command and the key bindings are defined - with the loadkeys ("man loadkeys") command. - - You need at least one virtual terminal device in order to make use - of your keyboard and monitor. Therefore, only people configuring an - embedded system would want to say N here in order to save some - memory; the only way to log into such a system is then via a serial - or network connection. - - If unsure, say Y, or else you won't be able to do much with your new - shiny Linux system :-) - -config VT_CONSOLE - bool - default y - ---help--- - The system console is the device which receives all kernel messages - and warnings and which allows logins in single user mode. If you - answer Y here, a virtual terminal (the device used to interact with - a physical terminal) can be used as system console. This is the most - common mode of operations, so you should say Y here unless you want - the kernel messages be output only to a serial port (in which case - you should say Y to "Console on serial port", below). - - If you do say Y here, by default the currently visible virtual - terminal (/dev/tty0) will be used as system console. You can change - that with a kernel command line option such as "console=tty3" which - would use the third virtual terminal as system console. (Try "man - bootparam" or see the documentation of your boot loader (lilo or - loadlin) about how to pass options to the kernel at boot time.) - - If unsure, say Y. - -config HW_CONSOLE - bool - default y - config SMP bool "Symmetric multi-processing support (does not work on sun4/sun4c)" depends on BROKEN diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c index 53c192a4982f..3509e4305532 100644 --- a/arch/sparc/kernel/setup.c +++ b/arch/sparc/kernel/setup.c @@ -249,8 +249,6 @@ struct tt_entry *sparc_ttable; struct pt_regs fake_swapper_regs; -extern void paging_init(void); - void __init setup_arch(char **cmdline_p) { int i; diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c index bc015e980341..279a62627c10 100644 --- a/arch/sparc/kernel/time.c +++ b/arch/sparc/kernel/time.c @@ -457,7 +457,7 @@ void __init time_init(void) sbus_time_init(); } -extern __inline__ unsigned long do_gettimeoffset(void) +static inline unsigned long do_gettimeoffset(void) { return (*master_l10_counter >> 10) & 0x1fffff; } diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index c89a803cbc20..c664b962987c 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -260,7 +260,7 @@ static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot) { return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); } /* to find an entry in a top-level page table... */ -extern inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) +static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) { return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); } /* Find an entry in the second-level page table.. */ diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug index af0e9411b83e..fa06ea04837b 100644 --- a/arch/sparc64/Kconfig.debug +++ b/arch/sparc64/Kconfig.debug @@ -33,6 +33,14 @@ config DEBUG_BOOTMEM depends on DEBUG_KERNEL bool "Debug BOOTMEM initialization" +config DEBUG_PAGEALLOC + bool "Page alloc debugging" + depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND + help + Unmap pages from the kernel linear mapping after free_pages(). + This results in a large slowdown, but helps to find certain types + of memory corruptions. + config MCOUNT bool depends on STACK_DEBUG diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c index 48756958116b..77ef5df4e5a7 100644 --- a/arch/sparc64/kernel/cpu.c +++ b/arch/sparc64/kernel/cpu.c @@ -39,6 +39,8 @@ struct cpu_fp_info linux_sparc_fpu[] = { { 0x3e, 0x15, 0, "UltraSparc III+ integrated FPU"}, { 0x3e, 0x16, 0, "UltraSparc IIIi integrated FPU"}, { 0x3e, 0x18, 0, "UltraSparc IV integrated FPU"}, + { 0x3e, 0x19, 0, "UltraSparc IV+ integrated FPU"}, + { 0x3e, 0x22, 0, "UltraSparc IIIi+ integrated FPU"}, }; #define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info)) @@ -53,6 +55,8 @@ struct cpu_iu_info linux_sparc_chips[] = { { 0x3e, 0x15, "TI UltraSparc III+ (Cheetah+)"}, { 0x3e, 0x16, "TI UltraSparc IIIi (Jalapeno)"}, { 0x3e, 0x18, "TI UltraSparc IV (Jaguar)"}, + { 0x3e, 0x19, "TI UltraSparc IV+ (Panther)"}, + { 0x3e, 0x22, "TI UltraSparc IIIi+ (Serrano)"}, }; #define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info)) diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c index d710274e516b..df9a1ca8fd77 100644 --- a/arch/sparc64/kernel/devices.c +++ b/arch/sparc64/kernel/devices.c @@ -135,6 +135,28 @@ void __init device_scan(void) cpu_data(0).clock_tick = prom_getintdefault(cpu_node, "clock-frequency", 0); + cpu_data(0).dcache_size = prom_getintdefault(cpu_node, + "dcache-size", + 16 * 1024); + cpu_data(0).dcache_line_size = + prom_getintdefault(cpu_node, "dcache-line-size", 32); + cpu_data(0).icache_size = prom_getintdefault(cpu_node, + "icache-size", + 16 * 1024); + cpu_data(0).icache_line_size = + prom_getintdefault(cpu_node, "icache-line-size", 32); + cpu_data(0).ecache_size = prom_getintdefault(cpu_node, + "ecache-size", + 4 * 1024 * 1024); + cpu_data(0).ecache_line_size = + prom_getintdefault(cpu_node, "ecache-line-size", 64); + printk("CPU[0]: Caches " + "D[sz(%d):line_sz(%d)] " + "I[sz(%d):line_sz(%d)] " + "E[sz(%d):line_sz(%d)]\n", + cpu_data(0).dcache_size, cpu_data(0).dcache_line_size, + cpu_data(0).icache_size, cpu_data(0).icache_line_size, + cpu_data(0).ecache_size, cpu_data(0).ecache_line_size); } #endif diff --git a/arch/sparc64/kernel/dtlb_backend.S b/arch/sparc64/kernel/dtlb_backend.S index 538522848ad4..acc889a7f9c1 100644 --- a/arch/sparc64/kernel/dtlb_backend.S +++ b/arch/sparc64/kernel/dtlb_backend.S @@ -9,17 +9,7 @@ #include #include -#if PAGE_SHIFT == 13 -#define SZ_BITS _PAGE_SZ8K -#elif PAGE_SHIFT == 16 -#define SZ_BITS _PAGE_SZ64K -#elif PAGE_SHIFT == 19 -#define SZ_BITS _PAGE_SZ512K -#elif PAGE_SHIFT == 22 -#define SZ_BITS _PAGE_SZ4MB -#endif - -#define VALID_SZ_BITS (_PAGE_VALID | SZ_BITS) +#define VALID_SZ_BITS (_PAGE_VALID | _PAGE_SZBITS) #define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P ) #define VPTE_SHIFT (PAGE_SHIFT - 3) @@ -163,7 +153,6 @@ sparc64_vpte_continue: stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS retry ! Load PTE once again -#undef SZ_BITS #undef VALID_SZ_BITS #undef VPTE_SHIFT #undef VPTE_BITS diff --git a/arch/sparc64/kernel/dtlb_base.S b/arch/sparc64/kernel/dtlb_base.S index ded2fed23fcc..6528786840c0 100644 --- a/arch/sparc64/kernel/dtlb_base.S +++ b/arch/sparc64/kernel/dtlb_base.S @@ -53,39 +53,36 @@ * be guaranteed to be 0 ... mmu_context.h does guarantee this * by only using 10 bits in the hwcontext value. */ -#define CREATE_VPTE_OFFSET1(r1, r2) +#define CREATE_VPTE_OFFSET1(r1, r2) nop #define CREATE_VPTE_OFFSET2(r1, r2) \ srax r1, 10, r2 -#define CREATE_VPTE_NOP nop #else #define CREATE_VPTE_OFFSET1(r1, r2) \ srax r1, PAGE_SHIFT, r2 #define CREATE_VPTE_OFFSET2(r1, r2) \ sllx r2, 3, r2 -#define CREATE_VPTE_NOP #endif /* DTLB ** ICACHE line 1: Quick user TLB misses */ + mov TLB_SFSR, %g1 ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS andcc %g4, TAG_CONTEXT_BITS, %g0 ! From Nucleus? from_tl1_trap: rdpr %tl, %g5 ! For TL==3 test CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset - be,pn %xcc, 3f ! Yep, special processing + be,pn %xcc, kvmap ! Yep, special processing CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset cmp %g5, 4 ! Last trap level? - be,pn %xcc, longpath ! Yep, cannot risk VPTE miss - nop ! delay slot /* DTLB ** ICACHE line 2: User finish + quick kernel TLB misses */ + be,pn %xcc, longpath ! Yep, cannot risk VPTE miss + nop ! delay slot ldxa [%g3 + %g6] ASI_S, %g5 ! Load VPTE 1: brgez,pn %g5, longpath ! Invalid, branch out nop ! Delay-slot 9: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB retry ! Trap return -3: brlz,pt %g4, 9b ! Kernel virtual map? - xor %g2, %g4, %g5 ! Finish bit twiddles - ba,a,pt %xcc, kvmap ! Yep, go check for obp/vmalloc + nop /* DTLB ** ICACHE line 3: winfixups+real_faults */ longpath: @@ -106,8 +103,7 @@ longpath: nop nop nop - CREATE_VPTE_NOP + nop #undef CREATE_VPTE_OFFSET1 #undef CREATE_VPTE_OFFSET2 -#undef CREATE_VPTE_NOP diff --git a/arch/sparc64/kernel/dtlb_prot.S b/arch/sparc64/kernel/dtlb_prot.S index d848bb7374bb..e0a920162604 100644 --- a/arch/sparc64/kernel/dtlb_prot.S +++ b/arch/sparc64/kernel/dtlb_prot.S @@ -14,14 +14,14 @@ */ /* PROT ** ICACHE line 1: User DTLB protection trap */ - stxa %g0, [%g1] ASI_DMMU ! Clear SFSR FaultValid bit - membar #Sync ! Synchronize ASI stores - rdpr %pstate, %g5 ! Move into alternate globals + mov TLB_SFSR, %g1 + stxa %g0, [%g1] ASI_DMMU ! Clear FaultValid bit + membar #Sync ! Synchronize stores + rdpr %pstate, %g5 ! Move into alt-globals wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate - rdpr %tl, %g1 ! Need to do a winfixup? + rdpr %tl, %g1 ! Need a winfixup? cmp %g1, 1 ! Trap level >1? - mov TLB_TAG_ACCESS, %g4 ! Prepare reload of vaddr - nop + mov TLB_TAG_ACCESS, %g4 ! For reload of vaddr /* PROT ** ICACHE line 2: More real fault processing */ bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index b48349527853..11a848402fb1 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -30,163 +30,10 @@ .text .align 32 - .globl sparc64_vpte_patchme1 - .globl sparc64_vpte_patchme2 -/* - * On a second level vpte miss, check whether the original fault is to the OBP - * range (note that this is only possible for instruction miss, data misses to - * obp range do not use vpte). If so, go back directly to the faulting address. - * This is because we want to read the tpc, otherwise we have no way of knowing - * the 8k aligned faulting address if we are using >8k kernel pagesize. This - * also ensures no vpte range addresses are dropped into tlb while obp is - * executing (see inherit_locked_prom_mappings() rant). - */ -sparc64_vpte_nucleus: - /* Note that kvmap below has verified that the address is - * in the range MODULES_VADDR --> VMALLOC_END already. So - * here we need only check if it is an OBP address or not. - */ - sethi %hi(LOW_OBP_ADDRESS), %g5 - cmp %g4, %g5 - blu,pn %xcc, sparc64_vpte_patchme1 - mov 0x1, %g5 - sllx %g5, 32, %g5 - cmp %g4, %g5 - blu,pn %xcc, obp_iaddr_patch - nop - - /* These two instructions are patched by paginig_init(). */ -sparc64_vpte_patchme1: - sethi %hi(0), %g5 -sparc64_vpte_patchme2: - or %g5, %lo(0), %g5 - - /* With kernel PGD in %g5, branch back into dtlb_backend. */ - ba,pt %xcc, sparc64_kpte_continue - andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */ - -vpte_noent: - /* Restore previous TAG_ACCESS, %g5 is zero, and we will - * skip over the trap instruction so that the top level - * TLB miss handler will thing this %g5 value is just an - * invalid PTE, thus branching to full fault processing. - */ - mov TLB_SFSR, %g1 - stxa %g4, [%g1 + %g1] ASI_DMMU - done - - .globl obp_iaddr_patch -obp_iaddr_patch: - /* These two instructions patched by inherit_prom_mappings(). */ - sethi %hi(0), %g5 - or %g5, %lo(0), %g5 - - /* Behave as if we are at TL0. */ - wrpr %g0, 1, %tl - rdpr %tpc, %g4 /* Find original faulting iaddr */ - srlx %g4, 13, %g4 /* Throw out context bits */ - sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */ - - /* Restore previous TAG_ACCESS. */ - mov TLB_SFSR, %g1 - stxa %g4, [%g1 + %g1] ASI_IMMU - - /* Get PMD offset. */ - srlx %g4, 23, %g6 - and %g6, 0x7ff, %g6 - sllx %g6, 2, %g6 - - /* Load PMD, is it valid? */ - lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 - brz,pn %g5, longpath - sllx %g5, 11, %g5 - - /* Get PTE offset. */ - srlx %g4, 13, %g6 - and %g6, 0x3ff, %g6 - sllx %g6, 3, %g6 - - /* Load PTE. */ - ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 - brgez,pn %g5, longpath - nop - - /* TLB load and return from trap. */ - stxa %g5, [%g0] ASI_ITLB_DATA_IN - retry - - .globl obp_daddr_patch -obp_daddr_patch: - /* These two instructions patched by inherit_prom_mappings(). */ - sethi %hi(0), %g5 - or %g5, %lo(0), %g5 - - /* Get PMD offset. */ - srlx %g4, 23, %g6 - and %g6, 0x7ff, %g6 - sllx %g6, 2, %g6 - - /* Load PMD, is it valid? */ - lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 - brz,pn %g5, longpath - sllx %g5, 11, %g5 - - /* Get PTE offset. */ - srlx %g4, 13, %g6 - and %g6, 0x3ff, %g6 - sllx %g6, 3, %g6 - - /* Load PTE. */ - ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 - brgez,pn %g5, longpath - nop - - /* TLB load and return from trap. */ - stxa %g5, [%g0] ASI_DTLB_DATA_IN - retry - -/* - * On a first level data miss, check whether this is to the OBP range (note - * that such accesses can be made by prom, as well as by kernel using - * prom_getproperty on "address"), and if so, do not use vpte access ... - * rather, use information saved during inherit_prom_mappings() using 8k - * pagesize. - */ - .align 32 -kvmap: - sethi %hi(MODULES_VADDR), %g5 - cmp %g4, %g5 - blu,pn %xcc, longpath - mov (VMALLOC_END >> 24), %g5 - sllx %g5, 24, %g5 - cmp %g4, %g5 - bgeu,pn %xcc, longpath - nop - -kvmap_check_obp: - sethi %hi(LOW_OBP_ADDRESS), %g5 - cmp %g4, %g5 - blu,pn %xcc, kvmap_vmalloc_addr - mov 0x1, %g5 - sllx %g5, 32, %g5 - cmp %g4, %g5 - blu,pn %xcc, obp_daddr_patch - nop - -kvmap_vmalloc_addr: - /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */ - ldxa [%g3 + %g6] ASI_N, %g5 - brgez,pn %g5, longpath - nop - - /* PTE is valid, load into TLB and return from trap. */ - stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB - retry - /* This is trivial with the new code... */ .globl do_fpdis do_fpdis: - sethi %hi(TSTATE_PEF), %g4 ! IEU0 + sethi %hi(TSTATE_PEF), %g4 rdpr %tstate, %g5 andcc %g5, %g4, %g0 be,pt %xcc, 1f @@ -203,18 +50,18 @@ do_fpdis: add %g0, %g0, %g0 ba,a,pt %xcc, rtrap_clr_l6 -1: ldub [%g6 + TI_FPSAVED], %g5 ! Load Group - wr %g0, FPRS_FEF, %fprs ! LSU Group+4bubbles - andcc %g5, FPRS_FEF, %g0 ! IEU1 Group - be,a,pt %icc, 1f ! CTI - clr %g7 ! IEU0 - ldx [%g6 + TI_GSR], %g7 ! Load Group -1: andcc %g5, FPRS_DL, %g0 ! IEU1 - bne,pn %icc, 2f ! CTI - fzero %f0 ! FPA - andcc %g5, FPRS_DU, %g0 ! IEU1 Group - bne,pn %icc, 1f ! CTI - fzero %f2 ! FPA +1: ldub [%g6 + TI_FPSAVED], %g5 + wr %g0, FPRS_FEF, %fprs + andcc %g5, FPRS_FEF, %g0 + be,a,pt %icc, 1f + clr %g7 + ldx [%g6 + TI_GSR], %g7 +1: andcc %g5, FPRS_DL, %g0 + bne,pn %icc, 2f + fzero %f0 + andcc %g5, FPRS_DU, %g0 + bne,pn %icc, 1f + fzero %f2 faddd %f0, %f2, %f4 fmuld %f0, %f2, %f6 faddd %f0, %f2, %f8 @@ -250,15 +97,17 @@ do_fpdis: faddd %f0, %f2, %f4 fmuld %f0, %f2, %f6 ldxa [%g3] ASI_DMMU, %g5 -cplus_fptrap_insn_1: - sethi %hi(0), %g2 + sethi %hi(sparc64_kern_sec_context), %g2 + ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 stxa %g2, [%g3] ASI_DMMU membar #Sync add %g6, TI_FPREGS + 0xc0, %g2 faddd %f0, %f2, %f8 fmuld %f0, %f2, %f10 - ldda [%g1] ASI_BLK_S, %f32 ! grrr, where is ASI_BLK_NUCLEUS 8-( + membar #Sync + ldda [%g1] ASI_BLK_S, %f32 ldda [%g2] ASI_BLK_S, %f48 + membar #Sync faddd %f0, %f2, %f12 fmuld %f0, %f2, %f14 faddd %f0, %f2, %f16 @@ -269,7 +118,6 @@ cplus_fptrap_insn_1: fmuld %f0, %f2, %f26 faddd %f0, %f2, %f28 fmuld %f0, %f2, %f30 - membar #Sync b,pt %xcc, fpdis_exit nop 2: andcc %g5, FPRS_DU, %g0 @@ -279,15 +127,17 @@ cplus_fptrap_insn_1: fzero %f34 ldxa [%g3] ASI_DMMU, %g5 add %g6, TI_FPREGS, %g1 -cplus_fptrap_insn_2: - sethi %hi(0), %g2 + sethi %hi(sparc64_kern_sec_context), %g2 + ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 stxa %g2, [%g3] ASI_DMMU membar #Sync add %g6, TI_FPREGS + 0x40, %g2 faddd %f32, %f34, %f36 fmuld %f32, %f34, %f38 - ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-( + membar #Sync + ldda [%g1] ASI_BLK_S, %f0 ldda [%g2] ASI_BLK_S, %f16 + membar #Sync faddd %f32, %f34, %f40 fmuld %f32, %f34, %f42 faddd %f32, %f34, %f44 @@ -300,18 +150,18 @@ cplus_fptrap_insn_2: fmuld %f32, %f34, %f58 faddd %f32, %f34, %f60 fmuld %f32, %f34, %f62 - membar #Sync ba,pt %xcc, fpdis_exit nop 3: mov SECONDARY_CONTEXT, %g3 add %g6, TI_FPREGS, %g1 ldxa [%g3] ASI_DMMU, %g5 -cplus_fptrap_insn_3: - sethi %hi(0), %g2 + sethi %hi(sparc64_kern_sec_context), %g2 + ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 stxa %g2, [%g3] ASI_DMMU membar #Sync mov 0x40, %g2 - ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-( + membar #Sync + ldda [%g1] ASI_BLK_S, %f0 ldda [%g1 + %g2] ASI_BLK_S, %f16 add %g1, 0x80, %g1 ldda [%g1] ASI_BLK_S, %f32 @@ -472,8 +322,8 @@ do_fptrap_after_fsr: stx %g3, [%g6 + TI_GSR] mov SECONDARY_CONTEXT, %g3 ldxa [%g3] ASI_DMMU, %g5 -cplus_fptrap_insn_4: - sethi %hi(0), %g2 + sethi %hi(sparc64_kern_sec_context), %g2 + ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2 stxa %g2, [%g3] ASI_DMMU membar #Sync add %g6, TI_FPREGS, %g2 @@ -494,45 +344,17 @@ cplus_fptrap_insn_4: ba,pt %xcc, etrap wr %g0, 0, %fprs -cplus_fptrap_1: - sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2 - - .globl cheetah_plus_patch_fpdis -cheetah_plus_patch_fpdis: - /* We configure the dTLB512_0 for 4MB pages and the - * dTLB512_1 for 8K pages when in context zero. - */ - sethi %hi(cplus_fptrap_1), %o0 - lduw [%o0 + %lo(cplus_fptrap_1)], %o1 - - set cplus_fptrap_insn_1, %o2 - stw %o1, [%o2] - flush %o2 - set cplus_fptrap_insn_2, %o2 - stw %o1, [%o2] - flush %o2 - set cplus_fptrap_insn_3, %o2 - stw %o1, [%o2] - flush %o2 - set cplus_fptrap_insn_4, %o2 - stw %o1, [%o2] - flush %o2 - - retl - nop - /* The registers for cross calls will be: * * DATA 0: [low 32-bits] Address of function to call, jmp to this * [high 32-bits] MMU Context Argument 0, place in %g5 - * DATA 1: Address Argument 1, place in %g6 + * DATA 1: Address Argument 1, place in %g1 * DATA 2: Address Argument 2, place in %g7 * * With this method we can do most of the cross-call tlb/cache * flushing very quickly. * - * Current CPU's IRQ worklist table is locked into %g1, - * don't touch. + * Current CPU's IRQ worklist table is locked into %g6, don't touch. */ .text .align 32 @@ -1006,13 +828,14 @@ cheetah_plus_dcpe_trap_vector: nop do_cheetah_plus_data_parity: - ba,pt %xcc, etrap + rdpr %pil, %g2 + wrpr %g0, 15, %pil + ba,pt %xcc, etrap_irq rd %pc, %g7 mov 0x0, %o0 call cheetah_plus_parity_error add %sp, PTREGS_OFF, %o1 - ba,pt %xcc, rtrap - clr %l6 + ba,a,pt %xcc, rtrap_irq cheetah_plus_dcpe_trap_vector_tl1: membar #Sync @@ -1036,13 +859,14 @@ cheetah_plus_icpe_trap_vector: nop do_cheetah_plus_insn_parity: - ba,pt %xcc, etrap + rdpr %pil, %g2 + wrpr %g0, 15, %pil + ba,pt %xcc, etrap_irq rd %pc, %g7 mov 0x1, %o0 call cheetah_plus_parity_error add %sp, PTREGS_OFF, %o1 - ba,pt %xcc, rtrap - clr %l6 + ba,a,pt %xcc, rtrap_irq cheetah_plus_icpe_trap_vector_tl1: membar #Sync @@ -1075,6 +899,10 @@ do_dcpe_tl1: nop wrpr %g1, %tl ! Restore original trap level do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ + sethi %hi(dcache_parity_tl1_occurred), %g2 + lduw [%g2 + %lo(dcache_parity_tl1_occurred)], %g1 + add %g1, 1, %g1 + stw %g1, [%g2 + %lo(dcache_parity_tl1_occurred)] /* Reset D-cache parity */ sethi %hi(1 << 16), %g1 ! D-cache size mov (1 << 5), %g2 ! D-cache line size @@ -1121,6 +949,10 @@ do_icpe_tl1: nop wrpr %g1, %tl ! Restore original trap level do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ + sethi %hi(icache_parity_tl1_occurred), %g2 + lduw [%g2 + %lo(icache_parity_tl1_occurred)], %g1 + add %g1, 1, %g1 + stw %g1, [%g2 + %lo(icache_parity_tl1_occurred)] /* Flush I-cache */ sethi %hi(1 << 15), %g1 ! I-cache size mov (1 << 5), %g2 ! I-cache line size diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S index 50d2af1d98ae..0d8eba21111b 100644 --- a/arch/sparc64/kernel/etrap.S +++ b/arch/sparc64/kernel/etrap.S @@ -68,12 +68,8 @@ etrap_irq: wrpr %g3, 0, %otherwin wrpr %g2, 0, %wstate -cplus_etrap_insn_1: - sethi %hi(0), %g3 - sllx %g3, 32, %g3 -cplus_etrap_insn_2: - sethi %hi(0), %g2 - or %g3, %g2, %g3 + sethi %hi(sparc64_kern_pri_context), %g2 + ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 stxa %g3, [%l4] ASI_DMMU flush %l6 wr %g0, ASI_AIUS, %asi @@ -215,12 +211,8 @@ scetrap: rdpr %pil, %g2 mov PRIMARY_CONTEXT, %l4 wrpr %g3, 0, %otherwin wrpr %g2, 0, %wstate -cplus_etrap_insn_3: - sethi %hi(0), %g3 - sllx %g3, 32, %g3 -cplus_etrap_insn_4: - sethi %hi(0), %g2 - or %g3, %g2, %g3 + sethi %hi(sparc64_kern_pri_context), %g2 + ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 stxa %g3, [%l4] ASI_DMMU flush %l6 @@ -264,38 +256,3 @@ cplus_etrap_insn_4: #undef TASK_REGOFF #undef ETRAP_PSTATE1 - -cplus_einsn_1: - sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3 -cplus_einsn_2: - sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2 - - .globl cheetah_plus_patch_etrap -cheetah_plus_patch_etrap: - /* We configure the dTLB512_0 for 4MB pages and the - * dTLB512_1 for 8K pages when in context zero. - */ - sethi %hi(cplus_einsn_1), %o0 - sethi %hi(cplus_etrap_insn_1), %o2 - lduw [%o0 + %lo(cplus_einsn_1)], %o1 - or %o2, %lo(cplus_etrap_insn_1), %o2 - stw %o1, [%o2] - flush %o2 - sethi %hi(cplus_etrap_insn_3), %o2 - or %o2, %lo(cplus_etrap_insn_3), %o2 - stw %o1, [%o2] - flush %o2 - - sethi %hi(cplus_einsn_2), %o0 - sethi %hi(cplus_etrap_insn_2), %o2 - lduw [%o0 + %lo(cplus_einsn_2)], %o1 - or %o2, %lo(cplus_etrap_insn_2), %o2 - stw %o1, [%o2] - flush %o2 - sethi %hi(cplus_etrap_insn_4), %o2 - or %o2, %lo(cplus_etrap_insn_4), %o2 - stw %o1, [%o2] - flush %o2 - - retl - nop diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S index 1fa06c4e3bdb..b49dcd4504b0 100644 --- a/arch/sparc64/kernel/head.S +++ b/arch/sparc64/kernel/head.S @@ -28,19 +28,14 @@ #include /* This section from from _start to sparc64_boot_end should fit into - * 0x0000.0000.0040.4000 to 0x0000.0000.0040.8000 and will be sharing space - * with bootup_user_stack, which is from 0x0000.0000.0040.4000 to - * 0x0000.0000.0040.6000 and empty_bad_page, which is from - * 0x0000.0000.0040.6000 to 0x0000.0000.0040.8000. + * 0x0000000000404000 to 0x0000000000408000. */ - .text .globl start, _start, stext, _stext _start: start: _stext: stext: -bootup_user_stack: ! 0x0000000000404000 b sparc64_boot flushw /* Flush register file. */ @@ -80,15 +75,169 @@ sparc_ramdisk_image64: .xword 0 .word _end - /* We must be careful, 32-bit OpenBOOT will get confused if it - * tries to save away a register window to a 64-bit kernel - * stack address. Flush all windows, disable interrupts, - * remap if necessary, jump onto kernel trap table, then kernel - * stack, or else we die. - * - * PROM entry point is on %o4 - */ + /* PROM cif handler code address is in %o4. */ sparc64_boot: +1: rd %pc, %g7 + set 1b, %g1 + cmp %g1, %g7 + be,pn %xcc, sparc64_boot_after_remap + mov %o4, %l7 + + /* We need to remap the kernel. Use position independant + * code to remap us to KERNBASE. + * + * SILO can invoke us with 32-bit address masking enabled, + * so make sure that's clear. + */ + rdpr %pstate, %g1 + andn %g1, PSTATE_AM, %g1 + wrpr %g1, 0x0, %pstate + ba,a,pt %xcc, 1f + + .globl prom_finddev_name, prom_chosen_path + .globl prom_getprop_name, prom_mmu_name + .globl prom_callmethod_name, prom_translate_name + .globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache + .globl prom_boot_mapped_pc, prom_boot_mapping_mode + .globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low +prom_finddev_name: + .asciz "finddevice" +prom_chosen_path: + .asciz "/chosen" +prom_getprop_name: + .asciz "getprop" +prom_mmu_name: + .asciz "mmu" +prom_callmethod_name: + .asciz "call-method" +prom_translate_name: + .asciz "translate" +prom_map_name: + .asciz "map" +prom_unmap_name: + .asciz "unmap" + .align 4 +prom_mmu_ihandle_cache: + .word 0 +prom_boot_mapped_pc: + .word 0 +prom_boot_mapping_mode: + .word 0 + .align 8 +prom_boot_mapping_phys_high: + .xword 0 +prom_boot_mapping_phys_low: + .xword 0 +1: + rd %pc, %l0 + mov (1b - prom_finddev_name), %l1 + mov (1b - prom_chosen_path), %l2 + mov (1b - prom_boot_mapped_pc), %l3 + sub %l0, %l1, %l1 + sub %l0, %l2, %l2 + sub %l0, %l3, %l3 + stw %l0, [%l3] + sub %sp, (192 + 128), %sp + + /* chosen_node = prom_finddevice("/chosen") */ + stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "finddevice" + mov 1, %l3 + stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1 + stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 + stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, "/chosen" + stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1 + call %l7 + add %sp, (2047 + 128), %o0 ! argument array + + ldx [%sp + 2047 + 128 + 0x20], %l4 ! chosen device node + + mov (1b - prom_getprop_name), %l1 + mov (1b - prom_mmu_name), %l2 + mov (1b - prom_mmu_ihandle_cache), %l5 + sub %l0, %l1, %l1 + sub %l0, %l2, %l2 + sub %l0, %l5, %l5 + + /* prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu") */ + stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop" + mov 4, %l3 + stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4 + mov 1, %l3 + stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 + stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, chosen_node + stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "mmu" + stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_mmu_ihandle_cache + mov 4, %l3 + stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, sizeof(arg3) + stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1 + call %l7 + add %sp, (2047 + 128), %o0 ! argument array + + mov (1b - prom_callmethod_name), %l1 + mov (1b - prom_translate_name), %l2 + sub %l0, %l1, %l1 + sub %l0, %l2, %l2 + lduw [%l5], %l5 ! prom_mmu_ihandle_cache + + stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "call-method" + mov 3, %l3 + stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 3 + mov 5, %l3 + stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 5 + stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1: "translate" + stx %l5, [%sp + 2047 + 128 + 0x20] ! arg2: prom_mmu_ihandle_cache + /* PAGE align */ + srlx %l0, 13, %l3 + sllx %l3, 13, %l3 + stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: vaddr, our PC + stx %g0, [%sp + 2047 + 128 + 0x30] ! res1 + stx %g0, [%sp + 2047 + 128 + 0x38] ! res2 + stx %g0, [%sp + 2047 + 128 + 0x40] ! res3 + stx %g0, [%sp + 2047 + 128 + 0x48] ! res4 + stx %g0, [%sp + 2047 + 128 + 0x50] ! res5 + call %l7 + add %sp, (2047 + 128), %o0 ! argument array + + ldx [%sp + 2047 + 128 + 0x40], %l1 ! translation mode + mov (1b - prom_boot_mapping_mode), %l4 + sub %l0, %l4, %l4 + stw %l1, [%l4] + mov (1b - prom_boot_mapping_phys_high), %l4 + sub %l0, %l4, %l4 + ldx [%sp + 2047 + 128 + 0x48], %l2 ! physaddr high + stx %l2, [%l4 + 0x0] + ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low + /* 4MB align */ + srlx %l3, 22, %l3 + sllx %l3, 22, %l3 + stx %l3, [%l4 + 0x8] + + /* Leave service as-is, "call-method" */ + mov 7, %l3 + stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 7 + mov 1, %l3 + stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 + mov (1b - prom_map_name), %l3 + sub %l0, %l3, %l3 + stx %l3, [%sp + 2047 + 128 + 0x18] ! arg1: "map" + /* Leave arg2 as-is, prom_mmu_ihandle_cache */ + mov -1, %l3 + stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default) + sethi %hi(8 * 1024 * 1024), %l3 + stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: size (8MB) + sethi %hi(KERNBASE), %l3 + stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE) + stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty + mov (1b - prom_boot_mapping_phys_low), %l3 + sub %l0, %l3, %l3 + ldx [%l3], %l3 + stx %l3, [%sp + 2047 + 128 + 0x48] ! arg7: phys addr + call %l7 + add %sp, (2047 + 128), %o0 ! argument array + + add %sp, (192 + 128), %sp + +sparc64_boot_after_remap: BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot) BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot) ba,pt %xcc, spitfire_boot @@ -125,185 +274,7 @@ cheetah_generic_boot: stxa %g0, [%g3] ASI_IMMU membar #Sync - wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate - wr %g0, 0, %fprs - - /* Just like for Spitfire, we probe itlb-2 for a mapping which - * matches our current %pc. We take the physical address in - * that mapping and use it to make our own. - */ - - /* %g5 holds the tlb data */ - sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5 - sllx %g5, 32, %g5 - or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5 - - /* Put PADDR tlb data mask into %g3. */ - sethi %uhi(_PAGE_PADDR), %g3 - or %g3, %ulo(_PAGE_PADDR), %g3 - sllx %g3, 32, %g3 - sethi %hi(_PAGE_PADDR), %g7 - or %g7, %lo(_PAGE_PADDR), %g7 - or %g3, %g7, %g3 - - set 2 << 16, %l0 /* TLB entry walker. */ - set 0x1fff, %l2 /* Page mask. */ - rd %pc, %l3 - andn %l3, %l2, %g2 /* vaddr comparator */ - -1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1 - membar #Sync - andn %g1, %l2, %g1 - cmp %g1, %g2 - be,pn %xcc, cheetah_got_tlbentry - nop - and %l0, (127 << 3), %g1 - cmp %g1, (127 << 3) - blu,pt %xcc, 1b - add %l0, (1 << 3), %l0 - - /* Search the small TLB. OBP never maps us like that but - * newer SILO can. - */ - clr %l0 - -1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1 - membar #Sync - andn %g1, %l2, %g1 - cmp %g1, %g2 - be,pn %xcc, cheetah_got_tlbentry - nop - cmp %l0, (15 << 3) - blu,pt %xcc, 1b - add %l0, (1 << 3), %l0 - - /* BUG() if we get here... */ - ta 0x5 - -cheetah_got_tlbentry: - ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g0 - ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1 - membar #Sync - and %g1, %g3, %g1 - set 0x5fff, %l0 - andn %g1, %l0, %g1 - or %g5, %g1, %g5 - - /* Clear out any KERNBASE area entries. */ - set 2 << 16, %l0 - sethi %hi(KERNBASE), %g3 - sethi %hi(KERNBASE<<1), %g7 - mov TLB_TAG_ACCESS, %l7 - - /* First, check ITLB */ -1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1 - membar #Sync - andn %g1, %l2, %g1 - cmp %g1, %g3 - blu,pn %xcc, 2f - cmp %g1, %g7 - bgeu,pn %xcc, 2f - nop - stxa %g0, [%l7] ASI_IMMU - membar #Sync - stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS - membar #Sync - -2: and %l0, (127 << 3), %g1 - cmp %g1, (127 << 3) - blu,pt %xcc, 1b - add %l0, (1 << 3), %l0 - - /* Next, check DTLB */ - set 2 << 16, %l0 -1: ldxa [%l0] ASI_DTLB_TAG_READ, %g1 - membar #Sync - andn %g1, %l2, %g1 - cmp %g1, %g3 - blu,pn %xcc, 2f - cmp %g1, %g7 - bgeu,pn %xcc, 2f - nop - stxa %g0, [%l7] ASI_DMMU - membar #Sync - stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS - membar #Sync - -2: and %l0, (511 << 3), %g1 - cmp %g1, (511 << 3) - blu,pt %xcc, 1b - add %l0, (1 << 3), %l0 - - /* On Cheetah+, have to check second DTLB. */ - BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,l0,2f) - ba,pt %xcc, 9f - nop - -2: set 3 << 16, %l0 -1: ldxa [%l0] ASI_DTLB_TAG_READ, %g1 - membar #Sync - andn %g1, %l2, %g1 - cmp %g1, %g3 - blu,pn %xcc, 2f - cmp %g1, %g7 - bgeu,pn %xcc, 2f - nop - stxa %g0, [%l7] ASI_DMMU - membar #Sync - stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS - membar #Sync - -2: and %l0, (511 << 3), %g1 - cmp %g1, (511 << 3) - blu,pt %xcc, 1b - add %l0, (1 << 3), %l0 - -9: - - /* Now lock the TTE we created into ITLB-0 and DTLB-0, - * entry 15 (and maybe 14 too). - */ - sethi %hi(KERNBASE), %g3 - set (0 << 16) | (15 << 3), %g7 - stxa %g3, [%l7] ASI_DMMU - membar #Sync - stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS - membar #Sync - stxa %g3, [%l7] ASI_IMMU - membar #Sync - stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS - membar #Sync - flush %g3 - membar #Sync - sethi %hi(_end), %g3 /* Check for bigkernel case */ - or %g3, %lo(_end), %g3 - srl %g3, 23, %g3 /* Check if _end > 8M */ - brz,pt %g3, 1f - sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */ - sethi %hi(0x400000), %g3 - or %g3, %lo(0x400000), %g3 - add %g5, %g3, %g5 /* New tte data */ - andn %g5, (_PAGE_G), %g5 - sethi %hi(KERNBASE+0x400000), %g3 - or %g3, %lo(KERNBASE+0x400000), %g3 - set (0 << 16) | (14 << 3), %g7 - stxa %g3, [%l7] ASI_DMMU - membar #Sync - stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS - membar #Sync - stxa %g3, [%l7] ASI_IMMU - membar #Sync - stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS - membar #Sync - flush %g3 - membar #Sync - sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */ - ba,pt %xcc, 1f - nop - -1: set sun4u_init, %g2 - jmpl %g2 + %g0, %g0 - nop + ba,a,pt %xcc, jump_to_sun4u_init spitfire_boot: /* Typically PROM has already enabled both MMU's and both on-chip @@ -313,6 +284,7 @@ spitfire_boot: stxa %g1, [%g0] ASI_LSU_CONTROL membar #Sync +jump_to_sun4u_init: /* * Make sure we are in privileged mode, have address masking, * using the ordinary globals and have enabled floating @@ -324,151 +296,6 @@ spitfire_boot: wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate wr %g0, 0, %fprs -spitfire_create_mappings: - /* %g5 holds the tlb data */ - sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5 - sllx %g5, 32, %g5 - or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5 - - /* Base of physical memory cannot reliably be assumed to be - * at 0x0! Figure out where it happens to be. -DaveM - */ - - /* Put PADDR tlb data mask into %g3. */ - sethi %uhi(_PAGE_PADDR_SF), %g3 - or %g3, %ulo(_PAGE_PADDR_SF), %g3 - sllx %g3, 32, %g3 - sethi %hi(_PAGE_PADDR_SF), %g7 - or %g7, %lo(_PAGE_PADDR_SF), %g7 - or %g3, %g7, %g3 - - /* Walk through entire ITLB, looking for entry which maps - * our %pc currently, stick PADDR from there into %g5 tlb data. - */ - clr %l0 /* TLB entry walker. */ - set 0x1fff, %l2 /* Page mask. */ - rd %pc, %l3 - andn %l3, %l2, %g2 /* vaddr comparator */ -1: - /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */ - ldxa [%l0] ASI_ITLB_TAG_READ, %g1 - nop - nop - nop - andn %g1, %l2, %g1 /* Get vaddr */ - cmp %g1, %g2 - be,a,pn %xcc, spitfire_got_tlbentry - ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1 - cmp %l0, (63 << 3) - blu,pt %xcc, 1b - add %l0, (1 << 3), %l0 - - /* BUG() if we get here... */ - ta 0x5 - -spitfire_got_tlbentry: - /* Nops here again, perhaps Cheetah/Blackbird are better behaved... */ - nop - nop - nop - and %g1, %g3, %g1 /* Mask to just get paddr bits. */ - set 0x5fff, %l3 /* Mask offset to get phys base. */ - andn %g1, %l3, %g1 - - /* NOTE: We hold on to %g1 paddr base as we need it below to lock - * NOTE: the PROM cif code into the TLB. - */ - - or %g5, %g1, %g5 /* Or it into TAG being built. */ - - clr %l0 /* TLB entry walker. */ - sethi %hi(KERNBASE), %g3 /* 4M lower limit */ - sethi %hi(KERNBASE<<1), %g7 /* 8M upper limit */ - mov TLB_TAG_ACCESS, %l7 -1: - /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */ - ldxa [%l0] ASI_ITLB_TAG_READ, %g1 - nop - nop - nop - andn %g1, %l2, %g1 /* Get vaddr */ - cmp %g1, %g3 - blu,pn %xcc, 2f - cmp %g1, %g7 - bgeu,pn %xcc, 2f - nop - stxa %g0, [%l7] ASI_IMMU - stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS - membar #Sync -2: - cmp %l0, (63 << 3) - blu,pt %xcc, 1b - add %l0, (1 << 3), %l0 - - nop; nop; nop - - clr %l0 /* TLB entry walker. */ -1: - /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */ - ldxa [%l0] ASI_DTLB_TAG_READ, %g1 - nop - nop - nop - andn %g1, %l2, %g1 /* Get vaddr */ - cmp %g1, %g3 - blu,pn %xcc, 2f - cmp %g1, %g7 - bgeu,pn %xcc, 2f - nop - stxa %g0, [%l7] ASI_DMMU - stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS - membar #Sync -2: - cmp %l0, (63 << 3) - blu,pt %xcc, 1b - add %l0, (1 << 3), %l0 - - nop; nop; nop - - - /* PROM never puts any TLB entries into the MMU with the lock bit - * set. So we gladly use tlb entry 63 for KERNBASE. And maybe 62 too. - */ - - sethi %hi(KERNBASE), %g3 - mov (63 << 3), %g7 - stxa %g3, [%l7] ASI_DMMU /* KERNBASE into TLB TAG */ - stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS /* TTE into TLB DATA */ - membar #Sync - stxa %g3, [%l7] ASI_IMMU /* KERNBASE into TLB TAG */ - stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS /* TTE into TLB DATA */ - membar #Sync - flush %g3 - membar #Sync - sethi %hi(_end), %g3 /* Check for bigkernel case */ - or %g3, %lo(_end), %g3 - srl %g3, 23, %g3 /* Check if _end > 8M */ - brz,pt %g3, 2f - sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */ - sethi %hi(0x400000), %g3 - or %g3, %lo(0x400000), %g3 - add %g5, %g3, %g5 /* New tte data */ - andn %g5, (_PAGE_G), %g5 - sethi %hi(KERNBASE+0x400000), %g3 - or %g3, %lo(KERNBASE+0x400000), %g3 - mov (62 << 3), %g7 - stxa %g3, [%l7] ASI_DMMU - stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS - membar #Sync - stxa %g3, [%l7] ASI_IMMU - stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS - membar #Sync - flush %g3 - membar #Sync - sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */ -2: ba,pt %xcc, 1f - nop -1: set sun4u_init, %g2 jmpl %g2 + %g0, %g0 nop @@ -483,38 +310,12 @@ sun4u_init: stxa %g0, [%g7] ASI_DMMU membar #Sync - /* We are now safely (we hope) in Nucleus context (0), rewrite - * the KERNBASE TTE's so they no longer have the global bit set. - * Don't forget to setup TAG_ACCESS first 8-) - */ - mov TLB_TAG_ACCESS, %g2 - stxa %g3, [%g2] ASI_IMMU - stxa %g3, [%g2] ASI_DMMU - membar #Sync - BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup) ba,pt %xcc, spitfire_tlb_fixup nop cheetah_tlb_fixup: - set (0 << 16) | (15 << 3), %g7 - ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g0 - ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1 - andn %g1, (_PAGE_G), %g1 - stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS - membar #Sync - - ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g0 - ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1 - andn %g1, (_PAGE_G), %g1 - stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS - membar #Sync - - /* Kill instruction prefetch queues. */ - flush %g3 - membar #Sync - mov 2, %g2 /* Set TLB type to cheetah+. */ BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) @@ -523,23 +324,7 @@ cheetah_tlb_fixup: 1: sethi %hi(tlb_type), %g1 stw %g2, [%g1 + %lo(tlb_type)] - BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) - ba,pt %xcc, 2f - nop - -1: /* Patch context register writes to support nucleus page - * size correctly. - */ - call cheetah_plus_patch_etrap - nop - call cheetah_plus_patch_rtrap - nop - call cheetah_plus_patch_fpdis - nop - call cheetah_plus_patch_winfixup - nop - -2: /* Patch copy/page operations to cheetah optimized versions. */ + /* Patch copy/page operations to cheetah optimized versions. */ call cheetah_patch_copyops nop call cheetah_patch_copy_page @@ -551,21 +336,6 @@ cheetah_tlb_fixup: nop spitfire_tlb_fixup: - mov (63 << 3), %g7 - ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1 - andn %g1, (_PAGE_G), %g1 - stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS - membar #Sync - - ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1 - andn %g1, (_PAGE_G), %g1 - stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS - membar #Sync - - /* Kill instruction prefetch queues. */ - flush %g3 - membar #Sync - /* Set TLB type to spitfire. */ mov 0, %g2 sethi %hi(tlb_type), %g1 @@ -578,24 +348,6 @@ tlb_fixup_done: mov %sp, %l6 mov %o4, %l7 -#if 0 /* We don't do it like this anymore, but for historical hack value - * I leave this snippet here to show how crazy we can be sometimes. 8-) - */ - - /* Setup "Linux Current Register", thanks Sun 8-) */ - wr %g0, 0x1, %pcr - - /* Blackbird errata workaround. See commentary in - * smp.c:smp_percpu_timer_interrupt() for more - * information. - */ - ba,pt %xcc, 99f - nop - .align 64 -99: wr %g6, %g0, %pic - rd %pic, %g0 -#endif - wr %g0, ASI_P, %asi mov 1, %g1 sllx %g1, THREAD_SHIFT, %g1 @@ -629,32 +381,78 @@ tlb_fixup_done: nop /* Not reached... */ -/* IMPORTANT NOTE: Whenever making changes here, check - * trampoline.S as well. -jj */ - .globl setup_tba -setup_tba: /* i0 = is_starfire */ - save %sp, -160, %sp + /* This is meant to allow the sharing of this code between + * boot processor invocation (via setup_tba() below) and + * secondary processor startup (via trampoline.S). The + * former does use this code, the latter does not yet due + * to some complexities. That should be fixed up at some + * point. + * + * There used to be enormous complexity wrt. transferring + * over from the firwmare's trap table to the Linux kernel's. + * For example, there was a chicken & egg problem wrt. building + * the OBP page tables, yet needing to be on the Linux kernel + * trap table (to translate PAGE_OFFSET addresses) in order to + * do that. + * + * We now handle OBP tlb misses differently, via linear lookups + * into the prom_trans[] array. So that specific problem no + * longer exists. Yet, unfortunately there are still some issues + * preventing trampoline.S from using this code... ho hum. + */ + .globl setup_trap_table +setup_trap_table: + save %sp, -192, %sp - rdpr %tba, %g7 - sethi %hi(prom_tba), %o1 - or %o1, %lo(prom_tba), %o1 - stx %g7, [%o1] + /* Force interrupts to be disabled. */ + rdpr %pstate, %o1 + andn %o1, PSTATE_IE, %o1 + wrpr %o1, 0x0, %pstate + wrpr %g0, 15, %pil + + /* Make the firmware call to jump over to the Linux trap table. */ + call prom_set_trap_table + sethi %hi(sparc64_ttable_tl0), %o0 + + /* Start using proper page size encodings in ctx register. */ + sethi %hi(sparc64_kern_pri_context), %g3 + ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 + mov PRIMARY_CONTEXT, %g1 + stxa %g2, [%g1] ASI_DMMU + membar #Sync + + /* The Linux trap handlers expect various trap global registers + * to be setup with some fixed values. So here we set these + * up very carefully. These globals are: + * + * Alternate Globals (PSTATE_AG): + * + * %g6 --> current_thread_info() + * + * MMU Globals (PSTATE_MG): + * + * %g1 --> TLB_SFSR + * %g2 --> ((_PAGE_VALID | _PAGE_SZ4MB | + * _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) + * ^ 0xfffff80000000000) + * (this %g2 value is used for computing the PAGE_OFFSET kernel + * TLB entries quickly, the virtual address of the fault XOR'd + * with this %g2 value is the PTE to load into the TLB) + * %g3 --> VPTE_BASE_CHEETAH or VPTE_BASE_SPITFIRE + * + * Interrupt Globals (PSTATE_IG, setup by init_irqwork_curcpu()): + * + * %g6 --> __irq_work[smp_processor_id()] + */ - /* Setup "Linux" globals 8-) */ rdpr %pstate, %o1 mov %g6, %o2 - wrpr %o1, (PSTATE_AG|PSTATE_IE), %pstate - sethi %hi(sparc64_ttable_tl0), %g1 - wrpr %g1, %tba + wrpr %o1, PSTATE_AG, %pstate mov %o2, %g6 - /* Set up MMU globals */ - wrpr %o1, (PSTATE_MG|PSTATE_IE), %pstate - - /* Set fixed globals used by dTLB miss handler. */ #define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000) #define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) - + wrpr %o1, PSTATE_MG, %pstate mov TSB_REG, %g1 stxa %g0, [%g1] ASI_DMMU membar #Sync @@ -666,17 +464,17 @@ setup_tba: /* i0 = is_starfire */ sllx %g2, 32, %g2 or %g2, KERN_LOWBITS, %g2 - BRANCH_IF_ANY_CHEETAH(g3,g7,cheetah_vpte_base) - ba,pt %xcc, spitfire_vpte_base + BRANCH_IF_ANY_CHEETAH(g3,g7,8f) + ba,pt %xcc, 9f nop -cheetah_vpte_base: +8: sethi %uhi(VPTE_BASE_CHEETAH), %g3 or %g3, %ulo(VPTE_BASE_CHEETAH), %g3 ba,pt %xcc, 2f sllx %g3, 32, %g3 -spitfire_vpte_base: +9: sethi %uhi(VPTE_BASE_SPITFIRE), %g3 or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3 sllx %g3, 32, %g3 @@ -702,48 +500,55 @@ spitfire_vpte_base: sllx %o2, 32, %o2 wr %o2, %asr25 - /* Ok, we're done setting up all the state our trap mechanims needs, - * now get back into normal globals and let the PROM know what is up. - */ 2: wrpr %g0, %g0, %wstate - wrpr %o1, PSTATE_IE, %pstate + wrpr %o1, 0x0, %pstate call init_irqwork_curcpu nop - call prom_set_trap_table - sethi %hi(sparc64_ttable_tl0), %o0 - - BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f) - ba,pt %xcc, 2f - nop - -1: /* Start using proper page size encodings in ctx register. */ - sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3 - mov PRIMARY_CONTEXT, %g1 - sllx %g3, 32, %g3 - sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2 - or %g3, %g2, %g3 - stxa %g3, [%g1] ASI_DMMU - membar #Sync - -2: + /* Now we can turn interrupts back on. */ rdpr %pstate, %o1 or %o1, PSTATE_IE, %o1 wrpr %o1, 0, %pstate + wrpr %g0, 0x0, %pil ret restore + .globl setup_tba +setup_tba: /* i0 = is_starfire */ + save %sp, -192, %sp + + /* The boot processor is the only cpu which invokes this + * routine, the other cpus set things up via trampoline.S. + * So save the OBP trap table address here. + */ + rdpr %tba, %g7 + sethi %hi(prom_tba), %o1 + or %o1, %lo(prom_tba), %o1 + stx %g7, [%o1] + + call setup_trap_table + nop + + ret + restore +sparc64_boot_end: + +#include "systbls.S" +#include "ktlb.S" +#include "etrap.S" +#include "rtrap.S" +#include "winfixup.S" +#include "entry.S" + /* - * The following skips make sure the trap table in ttable.S is aligned + * The following skip makes sure the trap table in ttable.S is aligned * on a 32K boundary as required by the v9 specs for TBA register. */ -sparc64_boot_end: - .skip 0x2000 + _start - sparc64_boot_end -bootup_user_stack_end: - .skip 0x2000 +1: + .skip 0x4000 + _start - 1b #ifdef CONFIG_SBUS /* This is just a hack to fool make depend config.h discovering @@ -755,20 +560,6 @@ bootup_user_stack_end: ! 0x0000000000408000 #include "ttable.S" -#include "systbls.S" - - .align 1024 - .globl swapper_pg_dir -swapper_pg_dir: - .word 0 - -#include "etrap.S" -#include "rtrap.S" -#include "winfixup.S" -#include "entry.S" - - /* This is just anal retentiveness on my part... */ - .align 16384 .data .align 8 @@ -776,8 +567,11 @@ swapper_pg_dir: prom_tba: .xword 0 tlb_type: .word 0 /* Must NOT end up in BSS */ .section ".fixup",#alloc,#execinstr - .globl __ret_efault + + .globl __ret_efault, __retl_efault __ret_efault: ret restore %g0, -EFAULT, %o0 - +__retl_efault: + retl + mov -EFAULT, %o0 diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index c9b69167632a..233526ba3abe 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/sparc64/kernel/itlb_base.S b/arch/sparc64/kernel/itlb_base.S index b5e32dfa4fbc..4951ff8f6877 100644 --- a/arch/sparc64/kernel/itlb_base.S +++ b/arch/sparc64/kernel/itlb_base.S @@ -15,14 +15,12 @@ */ #define CREATE_VPTE_OFFSET1(r1, r2) \ srax r1, 10, r2 -#define CREATE_VPTE_OFFSET2(r1, r2) -#define CREATE_VPTE_NOP nop +#define CREATE_VPTE_OFFSET2(r1, r2) nop #else /* PAGE_SHIFT */ #define CREATE_VPTE_OFFSET1(r1, r2) \ srax r1, PAGE_SHIFT, r2 #define CREATE_VPTE_OFFSET2(r1, r2) \ sllx r2, 3, r2 -#define CREATE_VPTE_NOP #endif /* PAGE_SHIFT */ @@ -36,6 +34,7 @@ */ /* ITLB ** ICACHE line 1: Quick user TLB misses */ + mov TLB_SFSR, %g1 ldxa [%g1 + %g1] ASI_IMMU, %g4 ! Get TAG_ACCESS CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset @@ -43,41 +42,38 @@ 1: brgez,pn %g5, 3f ! Not valid, branch out sethi %hi(_PAGE_EXEC), %g4 ! Delay-slot andcc %g5, %g4, %g0 ! Executable? + +/* ITLB ** ICACHE line 2: Real faults */ be,pn %xcc, 3f ! Nope, branch. nop ! Delay-slot 2: stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load PTE into TLB retry ! Trap return -3: rdpr %pstate, %g4 ! Move into alternate globals - -/* ITLB ** ICACHE line 2: Real faults */ +3: rdpr %pstate, %g4 ! Move into alt-globals wrpr %g4, PSTATE_AG|PSTATE_MG, %pstate rdpr %tpc, %g5 ! And load faulting VA mov FAULT_CODE_ITLB, %g4 ! It was read from ITLB -sparc64_realfault_common: ! Called by TL0 dtlb_miss too + +/* ITLB ** ICACHE line 3: Finish faults */ +sparc64_realfault_common: ! Called by dtlb_miss stb %g4, [%g6 + TI_FAULT_CODE] stx %g5, [%g6 + TI_FAULT_ADDR] ba,pt %xcc, etrap ! Save state 1: rd %pc, %g7 ! ... - nop - -/* ITLB ** ICACHE line 3: Finish faults + window fixups */ call do_sparc64_fault ! Call fault handler add %sp, PTREGS_OFF, %o0! Compute pt_regs arg ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state nop + +/* ITLB ** ICACHE line 4: Window fixups */ winfix_trampoline: rdpr %tpc, %g3 ! Prepare winfixup TNPC - or %g3, 0x7c, %g3 ! Compute offset to branch + or %g3, 0x7c, %g3 ! Compute branch offset wrpr %g3, %tnpc ! Write it into TNPC done ! Do it to it - -/* ITLB ** ICACHE line 4: Unused... */ nop nop nop nop - CREATE_VPTE_NOP #undef CREATE_VPTE_OFFSET1 #undef CREATE_VPTE_OFFSET2 -#undef CREATE_VPTE_NOP diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S new file mode 100644 index 000000000000..d9244d3c9f73 --- /dev/null +++ b/arch/sparc64/kernel/ktlb.S @@ -0,0 +1,194 @@ +/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling. + * + * Copyright (C) 1995, 1997, 2005 David S. Miller + * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de) + * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) + * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) +*/ + +#include +#include +#include +#include +#include + + .text + .align 32 + +/* + * On a second level vpte miss, check whether the original fault is to the OBP + * range (note that this is only possible for instruction miss, data misses to + * obp range do not use vpte). If so, go back directly to the faulting address. + * This is because we want to read the tpc, otherwise we have no way of knowing + * the 8k aligned faulting address if we are using >8k kernel pagesize. This + * also ensures no vpte range addresses are dropped into tlb while obp is + * executing (see inherit_locked_prom_mappings() rant). + */ +sparc64_vpte_nucleus: + /* Note that kvmap below has verified that the address is + * in the range MODULES_VADDR --> VMALLOC_END already. So + * here we need only check if it is an OBP address or not. + */ + sethi %hi(LOW_OBP_ADDRESS), %g5 + cmp %g4, %g5 + blu,pn %xcc, kern_vpte + mov 0x1, %g5 + sllx %g5, 32, %g5 + cmp %g4, %g5 + blu,pn %xcc, vpte_insn_obp + nop + + /* These two instructions are patched by paginig_init(). */ +kern_vpte: + sethi %hi(swapper_pgd_zero), %g5 + lduw [%g5 + %lo(swapper_pgd_zero)], %g5 + + /* With kernel PGD in %g5, branch back into dtlb_backend. */ + ba,pt %xcc, sparc64_kpte_continue + andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */ + +vpte_noent: + /* Restore previous TAG_ACCESS, %g5 is zero, and we will + * skip over the trap instruction so that the top level + * TLB miss handler will thing this %g5 value is just an + * invalid PTE, thus branching to full fault processing. + */ + mov TLB_SFSR, %g1 + stxa %g4, [%g1 + %g1] ASI_DMMU + done + +vpte_insn_obp: + /* Behave as if we are at TL0. */ + wrpr %g0, 1, %tl + rdpr %tpc, %g4 /* Find original faulting iaddr */ + srlx %g4, 13, %g4 /* Throw out context bits */ + sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */ + + /* Restore previous TAG_ACCESS. */ + mov TLB_SFSR, %g1 + stxa %g4, [%g1 + %g1] ASI_IMMU + + sethi %hi(prom_trans), %g5 + or %g5, %lo(prom_trans), %g5 + +1: ldx [%g5 + 0x00], %g6 ! base + brz,a,pn %g6, longpath ! no more entries, fail + mov TLB_SFSR, %g1 ! and restore %g1 + ldx [%g5 + 0x08], %g1 ! len + add %g6, %g1, %g1 ! end + cmp %g6, %g4 + bgu,pt %xcc, 2f + cmp %g4, %g1 + bgeu,pt %xcc, 2f + ldx [%g5 + 0x10], %g1 ! PTE + + /* TLB load, restore %g1, and return from trap. */ + sub %g4, %g6, %g6 + add %g1, %g6, %g5 + mov TLB_SFSR, %g1 + stxa %g5, [%g0] ASI_ITLB_DATA_IN + retry + +2: ba,pt %xcc, 1b + add %g5, (3 * 8), %g5 ! next entry + +kvmap_do_obp: + sethi %hi(prom_trans), %g5 + or %g5, %lo(prom_trans), %g5 + srlx %g4, 13, %g4 + sllx %g4, 13, %g4 + +1: ldx [%g5 + 0x00], %g6 ! base + brz,a,pn %g6, longpath ! no more entries, fail + mov TLB_SFSR, %g1 ! and restore %g1 + ldx [%g5 + 0x08], %g1 ! len + add %g6, %g1, %g1 ! end + cmp %g6, %g4 + bgu,pt %xcc, 2f + cmp %g4, %g1 + bgeu,pt %xcc, 2f + ldx [%g5 + 0x10], %g1 ! PTE + + /* TLB load, restore %g1, and return from trap. */ + sub %g4, %g6, %g6 + add %g1, %g6, %g5 + mov TLB_SFSR, %g1 + stxa %g5, [%g0] ASI_DTLB_DATA_IN + retry + +2: ba,pt %xcc, 1b + add %g5, (3 * 8), %g5 ! next entry + +/* + * On a first level data miss, check whether this is to the OBP range (note + * that such accesses can be made by prom, as well as by kernel using + * prom_getproperty on "address"), and if so, do not use vpte access ... + * rather, use information saved during inherit_prom_mappings() using 8k + * pagesize. + */ + .align 32 +kvmap: + brgez,pn %g4, kvmap_nonlinear + nop + +#ifdef CONFIG_DEBUG_PAGEALLOC + .globl kvmap_linear_patch +kvmap_linear_patch: +#endif + ba,pt %xcc, kvmap_load + xor %g2, %g4, %g5 + +#ifdef CONFIG_DEBUG_PAGEALLOC + sethi %hi(swapper_pg_dir), %g5 + or %g5, %lo(swapper_pg_dir), %g5 + sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6 + srlx %g6, 64 - PAGE_SHIFT, %g6 + andn %g6, 0x3, %g6 + lduw [%g5 + %g6], %g5 + brz,pn %g5, longpath + sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6 + srlx %g6, 64 - PAGE_SHIFT, %g6 + sllx %g5, 11, %g5 + andn %g6, 0x3, %g6 + lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 + brz,pn %g5, longpath + sllx %g4, 64 - PMD_SHIFT, %g6 + srlx %g6, 64 - PAGE_SHIFT, %g6 + sllx %g5, 11, %g5 + andn %g6, 0x7, %g6 + ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5 + brz,pn %g5, longpath + nop + ba,a,pt %xcc, kvmap_load +#endif + +kvmap_nonlinear: + sethi %hi(MODULES_VADDR), %g5 + cmp %g4, %g5 + blu,pn %xcc, longpath + mov (VMALLOC_END >> 24), %g5 + sllx %g5, 24, %g5 + cmp %g4, %g5 + bgeu,pn %xcc, longpath + nop + +kvmap_check_obp: + sethi %hi(LOW_OBP_ADDRESS), %g5 + cmp %g4, %g5 + blu,pn %xcc, kvmap_vmalloc_addr + mov 0x1, %g5 + sllx %g5, 32, %g5 + cmp %g4, %g5 + blu,pn %xcc, kvmap_do_obp + nop + +kvmap_vmalloc_addr: + /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */ + ldxa [%g3 + %g6] ASI_N, %g5 + brgez,pn %g5, longpath + nop + +kvmap_load: + /* PTE is valid, load into TLB and return from trap. */ + stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB + retry diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index 425c60cfea19..a11910be1013 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c @@ -49,12 +49,6 @@ static void __iommu_flushall(struct pci_iommu *iommu) /* Ensure completion of previous PIO writes. */ (void) pci_iommu_read(iommu->write_complete_reg); - - /* Now update everyone's flush point. */ - for (entry = 0; entry < PBM_NCLUSTERS; entry++) { - iommu->alloc_info[entry].flush = - iommu->alloc_info[entry].next; - } } #define IOPTE_CONSISTENT(CTX) \ @@ -80,120 +74,117 @@ static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte) iopte_val(*iopte) = val; } -void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize) +/* Based largely upon the ppc64 iommu allocator. */ +static long pci_arena_alloc(struct pci_iommu *iommu, unsigned long npages) { - int i; + struct pci_iommu_arena *arena = &iommu->arena; + unsigned long n, i, start, end, limit; + int pass; - tsbsize /= sizeof(iopte_t); + limit = arena->limit; + start = arena->hint; + pass = 0; - for (i = 0; i < tsbsize; i++) +again: + n = find_next_zero_bit(arena->map, limit, start); + end = n + npages; + if (unlikely(end >= limit)) { + if (likely(pass < 1)) { + limit = start; + start = 0; + __iommu_flushall(iommu); + pass++; + goto again; + } else { + /* Scanned the whole thing, give up. */ + return -1; + } + } + + for (i = n; i < end; i++) { + if (test_bit(i, arena->map)) { + start = i + 1; + goto again; + } + } + + for (i = n; i < end; i++) + __set_bit(i, arena->map); + + arena->hint = end; + + return n; +} + +static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages) +{ + unsigned long i; + + for (i = base; i < (base + npages); i++) + __clear_bit(i, arena->map); +} + +void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask) +{ + unsigned long i, tsbbase, order, sz, num_tsb_entries; + + num_tsb_entries = tsbsize / sizeof(iopte_t); + + /* Setup initial software IOMMU state. */ + spin_lock_init(&iommu->lock); + iommu->ctx_lowest_free = 1; + iommu->page_table_map_base = dma_offset; + iommu->dma_addr_mask = dma_addr_mask; + + /* Allocate and initialize the free area map. */ + sz = num_tsb_entries / 8; + sz = (sz + 7UL) & ~7UL; + iommu->arena.map = kmalloc(sz, GFP_KERNEL); + if (!iommu->arena.map) { + prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n"); + prom_halt(); + } + memset(iommu->arena.map, 0, sz); + iommu->arena.limit = num_tsb_entries; + + /* Allocate and initialize the dummy page which we + * set inactive IO PTEs to point to. + */ + iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0); + if (!iommu->dummy_page) { + prom_printf("PCI_IOMMU: Error, gfp(dummy_page) failed.\n"); + prom_halt(); + } + memset((void *)iommu->dummy_page, 0, PAGE_SIZE); + iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page); + + /* Now allocate and setup the IOMMU page table itself. */ + order = get_order(tsbsize); + tsbbase = __get_free_pages(GFP_KERNEL, order); + if (!tsbbase) { + prom_printf("PCI_IOMMU: Error, gfp(tsb) failed.\n"); + prom_halt(); + } + iommu->page_table = (iopte_t *)tsbbase; + + for (i = 0; i < num_tsb_entries; i++) iopte_make_dummy(iommu, &iommu->page_table[i]); } -static iopte_t *alloc_streaming_cluster(struct pci_iommu *iommu, unsigned long npages) +static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npages) { - iopte_t *iopte, *limit, *first; - unsigned long cnum, ent, flush_point; + long entry; - cnum = 0; - while ((1UL << cnum) < npages) - cnum++; - iopte = (iommu->page_table + - (cnum << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS))); + entry = pci_arena_alloc(iommu, npages); + if (unlikely(entry < 0)) + return NULL; - if (cnum == 0) - limit = (iommu->page_table + - iommu->lowest_consistent_map); - else - limit = (iopte + - (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS))); - - iopte += ((ent = iommu->alloc_info[cnum].next) << cnum); - flush_point = iommu->alloc_info[cnum].flush; - - first = iopte; - for (;;) { - if (IOPTE_IS_DUMMY(iommu, iopte)) { - if ((iopte + (1 << cnum)) >= limit) - ent = 0; - else - ent = ent + 1; - iommu->alloc_info[cnum].next = ent; - if (ent == flush_point) - __iommu_flushall(iommu); - break; - } - iopte += (1 << cnum); - ent++; - if (iopte >= limit) { - iopte = (iommu->page_table + - (cnum << - (iommu->page_table_sz_bits - PBM_LOGCLUSTERS))); - ent = 0; - } - if (ent == flush_point) - __iommu_flushall(iommu); - if (iopte == first) - goto bad; - } - - /* I've got your streaming cluster right here buddy boy... */ - return iopte; - -bad: - printk(KERN_EMERG "pci_iommu: alloc_streaming_cluster of npages(%ld) failed!\n", - npages); - return NULL; + return iommu->page_table + entry; } -static void free_streaming_cluster(struct pci_iommu *iommu, dma_addr_t base, - unsigned long npages, unsigned long ctx) +static inline void free_npages(struct pci_iommu *iommu, dma_addr_t base, unsigned long npages) { - unsigned long cnum, ent; - - cnum = 0; - while ((1UL << cnum) < npages) - cnum++; - - ent = (base << (32 - IO_PAGE_SHIFT + PBM_LOGCLUSTERS - iommu->page_table_sz_bits)) - >> (32 + PBM_LOGCLUSTERS + cnum - iommu->page_table_sz_bits); - - /* If the global flush might not have caught this entry, - * adjust the flush point such that we will flush before - * ever trying to reuse it. - */ -#define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y))) - if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush)) - iommu->alloc_info[cnum].flush = ent; -#undef between -} - -/* We allocate consistent mappings from the end of cluster zero. */ -static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long npages) -{ - iopte_t *iopte; - - iopte = iommu->page_table + (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)); - while (iopte > iommu->page_table) { - iopte--; - if (IOPTE_IS_DUMMY(iommu, iopte)) { - unsigned long tmp = npages; - - while (--tmp) { - iopte--; - if (!IOPTE_IS_DUMMY(iommu, iopte)) - break; - } - if (tmp == 0) { - u32 entry = (iopte - iommu->page_table); - - if (entry < iommu->lowest_consistent_map) - iommu->lowest_consistent_map = entry; - return iopte; - } - } - } - return NULL; + pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages); } static int iommu_alloc_ctx(struct pci_iommu *iommu) @@ -233,7 +224,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad struct pcidev_cookie *pcp; struct pci_iommu *iommu; iopte_t *iopte; - unsigned long flags, order, first_page, ctx; + unsigned long flags, order, first_page; void *ret; int npages; @@ -251,9 +242,10 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad iommu = pcp->pbm->iommu; spin_lock_irqsave(&iommu->lock, flags); - iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT); - if (iopte == NULL) { - spin_unlock_irqrestore(&iommu->lock, flags); + iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT); + spin_unlock_irqrestore(&iommu->lock, flags); + + if (unlikely(iopte == NULL)) { free_pages(first_page, order); return NULL; } @@ -262,31 +254,15 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad ((iopte - iommu->page_table) << IO_PAGE_SHIFT)); ret = (void *) first_page; npages = size >> IO_PAGE_SHIFT; - ctx = 0; - if (iommu->iommu_ctxflush) - ctx = iommu_alloc_ctx(iommu); first_page = __pa(first_page); while (npages--) { - iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) | + iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) | IOPTE_WRITE | (first_page & IOPTE_PAGE)); iopte++; first_page += IO_PAGE_SIZE; } - { - int i; - u32 daddr = *dma_addrp; - - npages = size >> IO_PAGE_SHIFT; - for (i = 0; i < npages; i++) { - pci_iommu_write(iommu->iommu_flush, daddr); - daddr += IO_PAGE_SIZE; - } - } - - spin_unlock_irqrestore(&iommu->lock, flags); - return ret; } @@ -296,7 +272,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_ struct pcidev_cookie *pcp; struct pci_iommu *iommu; iopte_t *iopte; - unsigned long flags, order, npages, i, ctx; + unsigned long flags, order, npages; npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; pcp = pdev->sysdata; @@ -306,46 +282,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_ spin_lock_irqsave(&iommu->lock, flags); - if ((iopte - iommu->page_table) == - iommu->lowest_consistent_map) { - iopte_t *walk = iopte + npages; - iopte_t *limit; - - limit = (iommu->page_table + - (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS))); - while (walk < limit) { - if (!IOPTE_IS_DUMMY(iommu, walk)) - break; - walk++; - } - iommu->lowest_consistent_map = - (walk - iommu->page_table); - } - - /* Data for consistent mappings cannot enter the streaming - * buffers, so we only need to update the TSB. We flush - * the IOMMU here as well to prevent conflicts with the - * streaming mapping deferred tlb flush scheme. - */ - - ctx = 0; - if (iommu->iommu_ctxflush) - ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL; - - for (i = 0; i < npages; i++, iopte++) - iopte_make_dummy(iommu, iopte); - - if (iommu->iommu_ctxflush) { - pci_iommu_write(iommu->iommu_ctxflush, ctx); - } else { - for (i = 0; i < npages; i++) { - u32 daddr = dvma + (i << IO_PAGE_SHIFT); - - pci_iommu_write(iommu->iommu_flush, daddr); - } - } - - iommu_free_ctx(iommu, ctx); + free_npages(iommu, dvma, npages); spin_unlock_irqrestore(&iommu->lock, flags); @@ -372,25 +309,27 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct iommu = pcp->pbm->iommu; strbuf = &pcp->pbm->stc; - if (direction == PCI_DMA_NONE) - BUG(); + if (unlikely(direction == PCI_DMA_NONE)) + goto bad_no_ctx; oaddr = (unsigned long)ptr; npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; spin_lock_irqsave(&iommu->lock, flags); + base = alloc_npages(iommu, npages); + ctx = 0; + if (iommu->iommu_ctxflush) + ctx = iommu_alloc_ctx(iommu); + spin_unlock_irqrestore(&iommu->lock, flags); - base = alloc_streaming_cluster(iommu, npages); - if (base == NULL) + if (unlikely(!base)) goto bad; + bus_addr = (iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT)); ret = bus_addr | (oaddr & ~IO_PAGE_MASK); base_paddr = __pa(oaddr & IO_PAGE_MASK); - ctx = 0; - if (iommu->iommu_ctxflush) - ctx = iommu_alloc_ctx(iommu); if (strbuf->strbuf_enabled) iopte_protection = IOPTE_STREAMING(ctx); else @@ -401,12 +340,13 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE) iopte_val(*base) = iopte_protection | base_paddr; - spin_unlock_irqrestore(&iommu->lock, flags); - return ret; bad: - spin_unlock_irqrestore(&iommu->lock, flags); + iommu_free_ctx(iommu, ctx); +bad_no_ctx: + if (printk_ratelimit()) + WARN_ON(1); return PCI_DMA_ERROR_CODE; } @@ -481,10 +421,13 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int struct pci_iommu *iommu; struct pci_strbuf *strbuf; iopte_t *base; - unsigned long flags, npages, ctx; + unsigned long flags, npages, ctx, i; - if (direction == PCI_DMA_NONE) - BUG(); + if (unlikely(direction == PCI_DMA_NONE)) { + if (printk_ratelimit()) + WARN_ON(1); + return; + } pcp = pdev->sysdata; iommu = pcp->pbm->iommu; @@ -510,13 +453,14 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int /* Step 1: Kick data out of streaming buffers if necessary. */ if (strbuf->strbuf_enabled) - pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); + pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, + npages, direction); - /* Step 2: Clear out first TSB entry. */ - iopte_make_dummy(iommu, base); + /* Step 2: Clear out TSB entries. */ + for (i = 0; i < npages; i++) + iopte_make_dummy(iommu, base + i); - free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, - npages, ctx); + free_npages(iommu, bus_addr - iommu->page_table_map_base, npages); iommu_free_ctx(iommu, ctx); @@ -621,6 +565,8 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int pci_map_single(pdev, (page_address(sglist->page) + sglist->offset), sglist->length, direction); + if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE)) + return 0; sglist->dma_length = sglist->length; return 1; } @@ -629,21 +575,29 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int iommu = pcp->pbm->iommu; strbuf = &pcp->pbm->stc; - if (direction == PCI_DMA_NONE) - BUG(); + if (unlikely(direction == PCI_DMA_NONE)) + goto bad_no_ctx; /* Step 1: Prepare scatter list. */ npages = prepare_sg(sglist, nelems); - /* Step 2: Allocate a cluster. */ + /* Step 2: Allocate a cluster and context, if necessary. */ spin_lock_irqsave(&iommu->lock, flags); - base = alloc_streaming_cluster(iommu, npages); + base = alloc_npages(iommu, npages); + ctx = 0; + if (iommu->iommu_ctxflush) + ctx = iommu_alloc_ctx(iommu); + + spin_unlock_irqrestore(&iommu->lock, flags); + if (base == NULL) goto bad; - dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT); + + dma_base = iommu->page_table_map_base + + ((base - iommu->page_table) << IO_PAGE_SHIFT); /* Step 3: Normalize DMA addresses. */ used = nelems; @@ -656,30 +610,28 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int } used = nelems - used; - /* Step 4: Choose a context if necessary. */ - ctx = 0; - if (iommu->iommu_ctxflush) - ctx = iommu_alloc_ctx(iommu); - - /* Step 5: Create the mappings. */ + /* Step 4: Create the mappings. */ if (strbuf->strbuf_enabled) iopte_protection = IOPTE_STREAMING(ctx); else iopte_protection = IOPTE_CONSISTENT(ctx); if (direction != PCI_DMA_TODEVICE) iopte_protection |= IOPTE_WRITE; - fill_sg (base, sglist, used, nelems, iopte_protection); + + fill_sg(base, sglist, used, nelems, iopte_protection); + #ifdef VERIFY_SG verify_sglist(sglist, nelems, base, npages); #endif - spin_unlock_irqrestore(&iommu->lock, flags); - return used; bad: - spin_unlock_irqrestore(&iommu->lock, flags); - return PCI_DMA_ERROR_CODE; + iommu_free_ctx(iommu, ctx); +bad_no_ctx: + if (printk_ratelimit()) + WARN_ON(1); + return 0; } /* Unmap a set of streaming mode DMA translations. */ @@ -692,8 +644,10 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, unsigned long flags, ctx, i, npages; u32 bus_addr; - if (direction == PCI_DMA_NONE) - BUG(); + if (unlikely(direction == PCI_DMA_NONE)) { + if (printk_ratelimit()) + WARN_ON(1); + } pcp = pdev->sysdata; iommu = pcp->pbm->iommu; @@ -705,7 +659,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, if (sglist[i].dma_length == 0) break; i--; - npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT; + npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - + bus_addr) >> IO_PAGE_SHIFT; base = iommu->page_table + ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); @@ -726,11 +681,11 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, if (strbuf->strbuf_enabled) pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); - /* Step 2: Clear out first TSB entry. */ - iopte_make_dummy(iommu, base); + /* Step 2: Clear out the TSB entries. */ + for (i = 0; i < npages; i++) + iopte_make_dummy(iommu, base + i); - free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, - npages, ctx); + free_npages(iommu, bus_addr - iommu->page_table_map_base, npages); iommu_free_ctx(iommu, ctx); diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index 6ed1ef25e0ac..c03ed5f49d31 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c @@ -1207,13 +1207,9 @@ static void psycho_scan_bus(struct pci_controller_info *p) static void psycho_iommu_init(struct pci_controller_info *p) { struct pci_iommu *iommu = p->pbm_A.iommu; - unsigned long tsbbase, i; + unsigned long i; u64 control; - /* Setup initial software IOMMU state. */ - spin_lock_init(&iommu->lock); - iommu->ctx_lowest_free = 1; - /* Register addresses. */ iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL; iommu->iommu_tsbbase = p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE; @@ -1240,40 +1236,10 @@ static void psycho_iommu_init(struct pci_controller_info *p) /* Leave diag mode enabled for full-flushing done * in pci_iommu.c */ + pci_iommu_table_init(iommu, IO_TSB_SIZE, 0xc0000000, 0xffffffff); - iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0); - if (!iommu->dummy_page) { - prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n"); - prom_halt(); - } - memset((void *)iommu->dummy_page, 0, PAGE_SIZE); - iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page); - - /* Using assumed page size 8K with 128K entries we need 1MB iommu page - * table (128K ioptes * 8 bytes per iopte). This is - * page order 7 on UltraSparc. - */ - tsbbase = __get_free_pages(GFP_KERNEL, get_order(IO_TSB_SIZE)); - if (!tsbbase) { - prom_printf("PSYCHO_IOMMU: Error, gfp(tsb) failed.\n"); - prom_halt(); - } - iommu->page_table = (iopte_t *)tsbbase; - iommu->page_table_sz_bits = 17; - iommu->page_table_map_base = 0xc0000000; - iommu->dma_addr_mask = 0xffffffff; - pci_iommu_table_init(iommu, IO_TSB_SIZE); - - /* We start with no consistent mappings. */ - iommu->lowest_consistent_map = - 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS); - - for (i = 0; i < PBM_NCLUSTERS; i++) { - iommu->alloc_info[i].flush = 0; - iommu->alloc_info[i].next = 0; - } - - psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE, __pa(tsbbase)); + psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE, + __pa(iommu->page_table)); control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL); control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ); @@ -1281,7 +1247,7 @@ static void psycho_iommu_init(struct pci_controller_info *p) psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control); /* If necessary, hook us up for starfire IRQ translations. */ - if(this_is_starfire) + if (this_is_starfire) p->starfire_cookie = starfire_hookup(p->pbm_A.portid); else p->starfire_cookie = NULL; diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index 0ee6bd5b9ac6..da8e1364194f 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c @@ -1267,13 +1267,9 @@ static void sabre_iommu_init(struct pci_controller_info *p, u32 dma_mask) { struct pci_iommu *iommu = p->pbm_A.iommu; - unsigned long tsbbase, i, order; + unsigned long i; u64 control; - /* Setup initial software IOMMU state. */ - spin_lock_init(&iommu->lock); - iommu->ctx_lowest_free = 1; - /* Register addresses. */ iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL; iommu->iommu_tsbbase = p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE; @@ -1295,26 +1291,10 @@ static void sabre_iommu_init(struct pci_controller_info *p, /* Leave diag mode enabled for full-flushing done * in pci_iommu.c */ + pci_iommu_table_init(iommu, tsbsize * 1024 * 8, dvma_offset, dma_mask); - iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0); - if (!iommu->dummy_page) { - prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n"); - prom_halt(); - } - memset((void *)iommu->dummy_page, 0, PAGE_SIZE); - iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page); - - tsbbase = __get_free_pages(GFP_KERNEL, order = get_order(tsbsize * 1024 * 8)); - if (!tsbbase) { - prom_printf("SABRE_IOMMU: Error, gfp(tsb) failed.\n"); - prom_halt(); - } - iommu->page_table = (iopte_t *)tsbbase; - iommu->page_table_map_base = dvma_offset; - iommu->dma_addr_mask = dma_mask; - pci_iommu_table_init(iommu, PAGE_SIZE << order); - - sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE, __pa(tsbbase)); + sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE, + __pa(iommu->page_table)); control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL); control &= ~(SABRE_IOMMUCTRL_TSBSZ | SABRE_IOMMUCTRL_TBWSZ); @@ -1322,11 +1302,9 @@ static void sabre_iommu_init(struct pci_controller_info *p, switch(tsbsize) { case 64: control |= SABRE_IOMMU_TSBSZ_64K; - iommu->page_table_sz_bits = 16; break; case 128: control |= SABRE_IOMMU_TSBSZ_128K; - iommu->page_table_sz_bits = 17; break; default: prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize); @@ -1334,15 +1312,6 @@ static void sabre_iommu_init(struct pci_controller_info *p, break; } sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control); - - /* We start with no consistent mappings. */ - iommu->lowest_consistent_map = - 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS); - - for (i = 0; i < PBM_NCLUSTERS; i++) { - iommu->alloc_info[i].flush = 0; - iommu->alloc_info[i].next = 0; - } } static void pbm_register_toplevel_resources(struct pci_controller_info *p, diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index 331382e1a75d..d8c4e0919b4e 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c @@ -330,7 +330,7 @@ static int schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino) static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2) { unsigned long sync_reg = (unsigned long) _arg2; - u64 mask = 1 << (__irq_ino(__irq(bucket)) & IMAP_INO); + u64 mask = 1UL << (__irq_ino(__irq(bucket)) & IMAP_INO); u64 val; int limit; @@ -1765,7 +1765,7 @@ static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm) static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm) { struct pci_iommu *iommu = pbm->iommu; - unsigned long tsbbase, i, tagbase, database, order; + unsigned long i, tagbase, database; u32 vdma[2], dma_mask; u64 control; int err, tsbsize; @@ -1800,10 +1800,6 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm) prom_halt(); }; - /* Setup initial software IOMMU state. */ - spin_lock_init(&iommu->lock); - iommu->ctx_lowest_free = 1; - /* Register addresses, SCHIZO has iommu ctx flushing. */ iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL; iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE; @@ -1832,56 +1828,9 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm) /* Leave diag mode enabled for full-flushing done * in pci_iommu.c */ + pci_iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask); - iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0); - if (!iommu->dummy_page) { - prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n"); - prom_halt(); - } - memset((void *)iommu->dummy_page, 0, PAGE_SIZE); - iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page); - - /* Using assumed page size 8K with 128K entries we need 1MB iommu page - * table (128K ioptes * 8 bytes per iopte). This is - * page order 7 on UltraSparc. - */ - order = get_order(tsbsize * 8 * 1024); - tsbbase = __get_free_pages(GFP_KERNEL, order); - if (!tsbbase) { - prom_printf("%s: Error, gfp(tsb) failed.\n", pbm->name); - prom_halt(); - } - - iommu->page_table = (iopte_t *)tsbbase; - iommu->page_table_map_base = vdma[0]; - iommu->dma_addr_mask = dma_mask; - pci_iommu_table_init(iommu, PAGE_SIZE << order); - - switch (tsbsize) { - case 64: - iommu->page_table_sz_bits = 16; - break; - - case 128: - iommu->page_table_sz_bits = 17; - break; - - default: - prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize); - prom_halt(); - break; - }; - - /* We start with no consistent mappings. */ - iommu->lowest_consistent_map = - 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS); - - for (i = 0; i < PBM_NCLUSTERS; i++) { - iommu->alloc_info[i].flush = 0; - iommu->alloc_info[i].next = 0; - } - - schizo_write(iommu->iommu_tsbbase, __pa(tsbbase)); + schizo_write(iommu->iommu_tsbbase, __pa(iommu->page_table)); control = schizo_read(iommu->iommu_control); control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ); diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c index 946cee0257ea..9e8362ea3104 100644 --- a/arch/sparc64/kernel/power.c +++ b/arch/sparc64/kernel/power.c @@ -17,6 +17,7 @@ #include #include +#include #include #include @@ -100,46 +101,83 @@ again: return 0; } -static int __init has_button_interrupt(struct linux_ebus_device *edev) +static int __init has_button_interrupt(unsigned int irq, int prom_node) { - if (edev->irqs[0] == PCI_IRQ_NONE) + if (irq == PCI_IRQ_NONE) return 0; - if (!prom_node_has_property(edev->prom_node, "button")) + if (!prom_node_has_property(prom_node, "button")) return 0; return 1; } -void __init power_init(void) +static int __init power_probe_ebus(struct resource **resp, unsigned int *irq_p, int *prom_node_p) { struct linux_ebus *ebus; struct linux_ebus_device *edev; + + for_each_ebus(ebus) { + for_each_ebusdev(edev, ebus) { + if (!strcmp(edev->prom_name, "power")) { + *resp = &edev->resource[0]; + *irq_p = edev->irqs[0]; + *prom_node_p = edev->prom_node; + return 0; + } + } + } + return -ENODEV; +} + +static int __init power_probe_isa(struct resource **resp, unsigned int *irq_p, int *prom_node_p) +{ + struct sparc_isa_bridge *isa_bus; + struct sparc_isa_device *isa_dev; + + for_each_isa(isa_bus) { + for_each_isadev(isa_dev, isa_bus) { + if (!strcmp(isa_dev->prom_name, "power")) { + *resp = &isa_dev->resource; + *irq_p = isa_dev->irq; + *prom_node_p = isa_dev->prom_node; + return 0; + } + } + } + return -ENODEV; +} + +void __init power_init(void) +{ + struct resource *res = NULL; + unsigned int irq; + int prom_node; static int invoked; if (invoked) return; invoked = 1; - for_each_ebus(ebus) { - for_each_ebusdev(edev, ebus) { - if (!strcmp(edev->prom_name, "power")) - goto found; - } - } + if (!power_probe_ebus(&res, &irq, &prom_node)) + goto found; + + if (!power_probe_isa(&res, &irq, &prom_node)) + goto found; + return; found: - power_reg = ioremap(edev->resource[0].start, 0x4); + power_reg = ioremap(res->start, 0x4); printk("power: Control reg at %p ... ", power_reg); poweroff_method = machine_halt; /* able to use the standard halt */ - if (has_button_interrupt(edev)) { + if (has_button_interrupt(irq, prom_node)) { if (kernel_thread(powerd, NULL, CLONE_FS) < 0) { printk("Failed to start power daemon.\n"); return; } printk("powerd running.\n"); - if (request_irq(edev->irqs[0], + if (request_irq(irq, power_handler, SA_SHIRQ, "power", NULL) < 0) printk("power: Error, cannot register IRQ handler.\n"); } else { diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c index 5efbff90d668..774ecbb8a031 100644 --- a/arch/sparc64/kernel/ptrace.c +++ b/arch/sparc64/kernel/ptrace.c @@ -31,6 +31,7 @@ #include #include #include +#include /* Returning from ptrace is a bit tricky because the syscall return * low level code assumes any value returned which is negative and @@ -132,12 +133,16 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) { unsigned long start = __pa(kaddr); unsigned long end = start + len; + unsigned long dcache_line_size; + + dcache_line_size = local_cpu_data().dcache_line_size; if (tlb_type == spitfire) { - for (; start < end; start += 32) + for (; start < end; start += dcache_line_size) spitfire_put_dcache_tag(start & 0x3fe0, 0x0); } else { - for (; start < end; start += 32) + start &= ~(dcache_line_size - 1); + for (; start < end; start += dcache_line_size) __asm__ __volatile__( "stxa %%g0, [%0] %1\n\t" "membar #Sync" @@ -150,8 +155,11 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, if (write && tlb_type == spitfire) { unsigned long start = (unsigned long) kaddr; unsigned long end = start + len; + unsigned long icache_line_size; - for (; start < end; start += 32) + icache_line_size = local_cpu_data().icache_line_size; + + for (; start < end; start += icache_line_size) flushi(start); } } diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S index fafd227735fa..090dcca00d2a 100644 --- a/arch/sparc64/kernel/rtrap.S +++ b/arch/sparc64/kernel/rtrap.S @@ -256,9 +256,8 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 brnz,pn %l3, kern_rtt mov PRIMARY_CONTEXT, %l7 ldxa [%l7 + %l7] ASI_DMMU, %l0 -cplus_rtrap_insn_1: - sethi %hi(0), %l1 - sllx %l1, 32, %l1 + sethi %hi(sparc64_kern_pri_nuc_bits), %l1 + ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1 or %l0, %l1, %l0 stxa %l0, [%l7] ASI_DMMU flush %g6 @@ -313,53 +312,36 @@ kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5 wr %g1, FPRS_FEF, %fprs ldx [%o1 + %o5], %g1 add %g6, TI_XFSR, %o1 - membar #StoreLoad | #LoadLoad sll %o0, 8, %o2 add %g6, TI_FPREGS, %o3 brz,pn %l6, 1f add %g6, TI_FPREGS+0x40, %o4 + membar #Sync ldda [%o3 + %o2] ASI_BLK_P, %f0 ldda [%o4 + %o2] ASI_BLK_P, %f16 + membar #Sync 1: andcc %l2, FPRS_DU, %g0 be,pn %icc, 1f wr %g1, 0, %gsr add %o2, 0x80, %o2 + membar #Sync ldda [%o3 + %o2] ASI_BLK_P, %f32 ldda [%o4 + %o2] ASI_BLK_P, %f48 - 1: membar #Sync ldx [%o1 + %o5], %fsr 2: stb %l5, [%g6 + TI_FPDEPTH] ba,pt %xcc, rt_continue nop 5: wr %g0, FPRS_FEF, %fprs - membar #StoreLoad | #LoadLoad sll %o0, 8, %o2 add %g6, TI_FPREGS+0x80, %o3 add %g6, TI_FPREGS+0xc0, %o4 + membar #Sync ldda [%o3 + %o2] ASI_BLK_P, %f32 ldda [%o4 + %o2] ASI_BLK_P, %f48 membar #Sync wr %g0, FPRS_DU, %fprs ba,pt %xcc, rt_continue stb %l5, [%g6 + TI_FPDEPTH] - -cplus_rinsn_1: - sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1 - - .globl cheetah_plus_patch_rtrap -cheetah_plus_patch_rtrap: - /* We configure the dTLB512_0 for 4MB pages and the - * dTLB512_1 for 8K pages when in context zero. - */ - sethi %hi(cplus_rinsn_1), %o0 - sethi %hi(cplus_rtrap_insn_1), %o2 - lduw [%o0 + %lo(cplus_rinsn_1)], %o1 - or %o2, %lo(cplus_rtrap_insn_1), %o2 - stw %o1, [%o2] - flush %o2 - - retl - nop diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index ddbed3341a23..c1f34237cdf2 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -187,17 +187,13 @@ int prom_callback(long *args) } if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) { - unsigned long kernel_pctx = 0; - - if (tlb_type == cheetah_plus) - kernel_pctx |= (CTX_CHEETAH_PLUS_NUC | - CTX_CHEETAH_PLUS_CTX0); + extern unsigned long sparc64_kern_pri_context; /* Spitfire Errata #32 workaround */ __asm__ __volatile__("stxa %0, [%1] %2\n\t" "flush %%g6" : /* No outputs */ - : "r" (kernel_pctx), + : "r" (sparc64_kern_pri_context), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); @@ -464,8 +460,6 @@ static void __init boot_flags_init(char *commands) } } -extern int prom_probe_memory(void); -extern unsigned long start, end; extern void panic_setup(char *, int *); extern unsigned short root_flags; @@ -492,13 +486,8 @@ void register_prom_callbacks(void) "' linux-.soft2 to .soft2"); } -extern void paging_init(void); - void __init setup_arch(char **cmdline_p) { - unsigned long highest_paddr; - int i; - /* Initialize PROM console and command line. */ *cmdline_p = prom_getbootargs(); strcpy(saved_command_line, *cmdline_p); @@ -517,40 +506,6 @@ void __init setup_arch(char **cmdline_p) boot_flags_init(*cmdline_p); idprom_init(); - (void) prom_probe_memory(); - - /* In paging_init() we tip off this value to see if we need - * to change init_mm.pgd to point to the real alias mapping. - */ - phys_base = 0xffffffffffffffffUL; - highest_paddr = 0UL; - for (i = 0; sp_banks[i].num_bytes != 0; i++) { - unsigned long top; - - if (sp_banks[i].base_addr < phys_base) - phys_base = sp_banks[i].base_addr; - top = sp_banks[i].base_addr + - sp_banks[i].num_bytes; - if (highest_paddr < top) - highest_paddr = top; - } - pfn_base = phys_base >> PAGE_SHIFT; - - switch (tlb_type) { - default: - case spitfire: - kern_base = spitfire_get_itlb_data(sparc64_highest_locked_tlbent()); - kern_base &= _PAGE_PADDR_SF; - break; - - case cheetah: - case cheetah_plus: - kern_base = cheetah_get_litlb_data(sparc64_highest_locked_tlbent()); - kern_base &= _PAGE_PADDR; - break; - }; - - kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; if (!root_flags) root_mountflags &= ~MS_RDONLY; @@ -625,6 +580,9 @@ extern void smp_info(struct seq_file *); extern void smp_bogo(struct seq_file *); extern void mmu_info(struct seq_file *); +unsigned int dcache_parity_tl1_occurred; +unsigned int icache_parity_tl1_occurred; + static int show_cpuinfo(struct seq_file *m, void *__unused) { seq_printf(m, @@ -635,6 +593,8 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) "type\t\t: sun4u\n" "ncpus probed\t: %ld\n" "ncpus active\t: %ld\n" + "D$ parity tl1\t: %u\n" + "I$ parity tl1\t: %u\n" #ifndef CONFIG_SMP "Cpu0Bogo\t: %lu.%02lu\n" "Cpu0ClkTck\t: %016lx\n" @@ -647,7 +607,9 @@ static int show_cpuinfo(struct seq_file *m, void *__unused) (prom_prev >> 8) & 0xff, prom_prev & 0xff, (long)num_possible_cpus(), - (long)num_online_cpus() + (long)num_online_cpus(), + dcache_parity_tl1_occurred, + icache_parity_tl1_occurred #ifndef CONFIG_SMP , cpu_data(0).udelay_val/(500000/HZ), (cpu_data(0).udelay_val/(5000/HZ)) % 100, diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index b4fc6a5462b2..b137fd63f5e1 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -93,6 +93,27 @@ void __init smp_store_cpu_info(int id) cpu_data(id).pte_cache[1] = NULL; cpu_data(id).pgd_cache = NULL; cpu_data(id).idle_volume = 1; + + cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size", + 16 * 1024); + cpu_data(id).dcache_line_size = + prom_getintdefault(cpu_node, "dcache-line-size", 32); + cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size", + 16 * 1024); + cpu_data(id).icache_line_size = + prom_getintdefault(cpu_node, "icache-line-size", 32); + cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size", + 4 * 1024 * 1024); + cpu_data(id).ecache_line_size = + prom_getintdefault(cpu_node, "ecache-line-size", 64); + printk("CPU[%d]: Caches " + "D[sz(%d):line_sz(%d)] " + "I[sz(%d):line_sz(%d)] " + "E[sz(%d):line_sz(%d)]\n", + id, + cpu_data(id).dcache_size, cpu_data(id).dcache_line_size, + cpu_data(id).icache_size, cpu_data(id).icache_line_size, + cpu_data(id).ecache_size, cpu_data(id).ecache_line_size); } static void smp_setup_percpu_timer(void); @@ -980,13 +1001,6 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs) preempt_enable(); } -extern unsigned long xcall_promstop; - -void smp_promstop_others(void) -{ - smp_cross_call(&xcall_promstop, 0, 0, 0); -} - #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier #define prof_counter(__cpu) cpu_data(__cpu).counter diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S index 5f9e4fae612e..9cd272ac3ac1 100644 --- a/arch/sparc64/kernel/sys32.S +++ b/arch/sparc64/kernel/sys32.S @@ -157,173 +157,199 @@ sys32_socketcall: /* %o0=call, %o1=args */ or %g2, %lo(__socketcall_table_begin), %g2 jmpl %g2 + %o0, %g0 nop +do_einval: + retl + mov -EINVAL, %o0 - /* Each entry is exactly 32 bytes. */ .align 32 __socketcall_table_begin: + + /* Each entry is exactly 32 bytes. */ do_sys_socket: /* sys_socket(int, int, int) */ - ldswa [%o1 + 0x0] %asi, %o0 +1: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_socket), %g1 - ldswa [%o1 + 0x8] %asi, %o2 +2: ldswa [%o1 + 0x8] %asi, %o2 jmpl %g1 + %lo(sys_socket), %g0 - ldswa [%o1 + 0x4] %asi, %o1 +3: ldswa [%o1 + 0x4] %asi, %o1 nop nop nop do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */ - ldswa [%o1 + 0x0] %asi, %o0 +4: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_bind), %g1 - ldswa [%o1 + 0x8] %asi, %o2 +5: ldswa [%o1 + 0x8] %asi, %o2 jmpl %g1 + %lo(sys_bind), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +6: lduwa [%o1 + 0x4] %asi, %o1 nop nop nop do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */ - ldswa [%o1 + 0x0] %asi, %o0 +7: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_connect), %g1 - ldswa [%o1 + 0x8] %asi, %o2 +8: ldswa [%o1 + 0x8] %asi, %o2 jmpl %g1 + %lo(sys_connect), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +9: lduwa [%o1 + 0x4] %asi, %o1 nop nop nop do_sys_listen: /* sys_listen(int, int) */ - ldswa [%o1 + 0x0] %asi, %o0 +10: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_listen), %g1 jmpl %g1 + %lo(sys_listen), %g0 - ldswa [%o1 + 0x4] %asi, %o1 +11: ldswa [%o1 + 0x4] %asi, %o1 nop nop nop nop do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */ - ldswa [%o1 + 0x0] %asi, %o0 +12: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_accept), %g1 - lduwa [%o1 + 0x8] %asi, %o2 +13: lduwa [%o1 + 0x8] %asi, %o2 jmpl %g1 + %lo(sys_accept), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +14: lduwa [%o1 + 0x4] %asi, %o1 nop nop nop do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */ - ldswa [%o1 + 0x0] %asi, %o0 +15: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_getsockname), %g1 - lduwa [%o1 + 0x8] %asi, %o2 +16: lduwa [%o1 + 0x8] %asi, %o2 jmpl %g1 + %lo(sys_getsockname), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +17: lduwa [%o1 + 0x4] %asi, %o1 nop nop nop do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */ - ldswa [%o1 + 0x0] %asi, %o0 +18: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_getpeername), %g1 - lduwa [%o1 + 0x8] %asi, %o2 +19: lduwa [%o1 + 0x8] %asi, %o2 jmpl %g1 + %lo(sys_getpeername), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +20: lduwa [%o1 + 0x4] %asi, %o1 nop nop nop do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */ - ldswa [%o1 + 0x0] %asi, %o0 +21: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_socketpair), %g1 - ldswa [%o1 + 0x8] %asi, %o2 - lduwa [%o1 + 0xc] %asi, %o3 +22: ldswa [%o1 + 0x8] %asi, %o2 +23: lduwa [%o1 + 0xc] %asi, %o3 jmpl %g1 + %lo(sys_socketpair), %g0 - ldswa [%o1 + 0x4] %asi, %o1 +24: ldswa [%o1 + 0x4] %asi, %o1 nop nop do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */ - ldswa [%o1 + 0x0] %asi, %o0 +25: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_send), %g1 - lduwa [%o1 + 0x8] %asi, %o2 - lduwa [%o1 + 0xc] %asi, %o3 +26: lduwa [%o1 + 0x8] %asi, %o2 +27: lduwa [%o1 + 0xc] %asi, %o3 jmpl %g1 + %lo(sys_send), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +28: lduwa [%o1 + 0x4] %asi, %o1 nop nop do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */ - ldswa [%o1 + 0x0] %asi, %o0 +29: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_recv), %g1 - lduwa [%o1 + 0x8] %asi, %o2 - lduwa [%o1 + 0xc] %asi, %o3 +30: lduwa [%o1 + 0x8] %asi, %o2 +31: lduwa [%o1 + 0xc] %asi, %o3 jmpl %g1 + %lo(sys_recv), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +32: lduwa [%o1 + 0x4] %asi, %o1 nop nop do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */ - ldswa [%o1 + 0x0] %asi, %o0 +33: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_sendto), %g1 - lduwa [%o1 + 0x8] %asi, %o2 - lduwa [%o1 + 0xc] %asi, %o3 - lduwa [%o1 + 0x10] %asi, %o4 - ldswa [%o1 + 0x14] %asi, %o5 +34: lduwa [%o1 + 0x8] %asi, %o2 +35: lduwa [%o1 + 0xc] %asi, %o3 +36: lduwa [%o1 + 0x10] %asi, %o4 +37: ldswa [%o1 + 0x14] %asi, %o5 jmpl %g1 + %lo(sys_sendto), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +38: lduwa [%o1 + 0x4] %asi, %o1 do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */ - ldswa [%o1 + 0x0] %asi, %o0 +39: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_recvfrom), %g1 - lduwa [%o1 + 0x8] %asi, %o2 - lduwa [%o1 + 0xc] %asi, %o3 - lduwa [%o1 + 0x10] %asi, %o4 - lduwa [%o1 + 0x14] %asi, %o5 +40: lduwa [%o1 + 0x8] %asi, %o2 +41: lduwa [%o1 + 0xc] %asi, %o3 +42: lduwa [%o1 + 0x10] %asi, %o4 +43: lduwa [%o1 + 0x14] %asi, %o5 jmpl %g1 + %lo(sys_recvfrom), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +44: lduwa [%o1 + 0x4] %asi, %o1 do_sys_shutdown: /* sys_shutdown(int, int) */ - ldswa [%o1 + 0x0] %asi, %o0 +45: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(sys_shutdown), %g1 jmpl %g1 + %lo(sys_shutdown), %g0 - ldswa [%o1 + 0x4] %asi, %o1 +46: ldswa [%o1 + 0x4] %asi, %o1 nop nop nop nop do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */ - ldswa [%o1 + 0x0] %asi, %o0 +47: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(compat_sys_setsockopt), %g1 - ldswa [%o1 + 0x8] %asi, %o2 - lduwa [%o1 + 0xc] %asi, %o3 - ldswa [%o1 + 0x10] %asi, %o4 +48: ldswa [%o1 + 0x8] %asi, %o2 +49: lduwa [%o1 + 0xc] %asi, %o3 +50: ldswa [%o1 + 0x10] %asi, %o4 jmpl %g1 + %lo(compat_sys_setsockopt), %g0 - ldswa [%o1 + 0x4] %asi, %o1 +51: ldswa [%o1 + 0x4] %asi, %o1 nop do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */ - ldswa [%o1 + 0x0] %asi, %o0 +52: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(compat_sys_getsockopt), %g1 - ldswa [%o1 + 0x8] %asi, %o2 - lduwa [%o1 + 0xc] %asi, %o3 - lduwa [%o1 + 0x10] %asi, %o4 +53: ldswa [%o1 + 0x8] %asi, %o2 +54: lduwa [%o1 + 0xc] %asi, %o3 +55: lduwa [%o1 + 0x10] %asi, %o4 jmpl %g1 + %lo(compat_sys_getsockopt), %g0 - ldswa [%o1 + 0x4] %asi, %o1 +56: ldswa [%o1 + 0x4] %asi, %o1 nop do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */ - ldswa [%o1 + 0x0] %asi, %o0 +57: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(compat_sys_sendmsg), %g1 - lduwa [%o1 + 0x8] %asi, %o2 +58: lduwa [%o1 + 0x8] %asi, %o2 jmpl %g1 + %lo(compat_sys_sendmsg), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +59: lduwa [%o1 + 0x4] %asi, %o1 nop nop nop do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */ - ldswa [%o1 + 0x0] %asi, %o0 +60: ldswa [%o1 + 0x0] %asi, %o0 sethi %hi(compat_sys_recvmsg), %g1 - lduwa [%o1 + 0x8] %asi, %o2 +61: lduwa [%o1 + 0x8] %asi, %o2 jmpl %g1 + %lo(compat_sys_recvmsg), %g0 - lduwa [%o1 + 0x4] %asi, %o1 +62: lduwa [%o1 + 0x4] %asi, %o1 nop nop nop -__socketcall_table_end: - -do_einval: - retl - mov -EINVAL, %o0 -do_efault: - retl - mov -EFAULT, %o0 .section __ex_table .align 4 - .word __socketcall_table_begin, 0, __socketcall_table_end, do_efault + .word 1b, __retl_efault, 2b, __retl_efault + .word 3b, __retl_efault, 4b, __retl_efault + .word 5b, __retl_efault, 6b, __retl_efault + .word 7b, __retl_efault, 8b, __retl_efault + .word 9b, __retl_efault, 10b, __retl_efault + .word 11b, __retl_efault, 12b, __retl_efault + .word 13b, __retl_efault, 14b, __retl_efault + .word 15b, __retl_efault, 16b, __retl_efault + .word 17b, __retl_efault, 18b, __retl_efault + .word 19b, __retl_efault, 20b, __retl_efault + .word 21b, __retl_efault, 22b, __retl_efault + .word 23b, __retl_efault, 24b, __retl_efault + .word 25b, __retl_efault, 26b, __retl_efault + .word 27b, __retl_efault, 28b, __retl_efault + .word 29b, __retl_efault, 30b, __retl_efault + .word 31b, __retl_efault, 32b, __retl_efault + .word 33b, __retl_efault, 34b, __retl_efault + .word 35b, __retl_efault, 36b, __retl_efault + .word 37b, __retl_efault, 38b, __retl_efault + .word 39b, __retl_efault, 40b, __retl_efault + .word 41b, __retl_efault, 42b, __retl_efault + .word 43b, __retl_efault, 44b, __retl_efault + .word 45b, __retl_efault, 46b, __retl_efault + .word 47b, __retl_efault, 48b, __retl_efault + .word 49b, __retl_efault, 50b, __retl_efault + .word 51b, __retl_efault, 52b, __retl_efault + .word 53b, __retl_efault, 54b, __retl_efault + .word 55b, __retl_efault, 56b, __retl_efault + .word 57b, __retl_efault, 58b, __retl_efault + .word 59b, __retl_efault, 60b, __retl_efault + .word 61b, __retl_efault, 62b, __retl_efault .previous diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S index 3a145fc39cf2..9478551cb020 100644 --- a/arch/sparc64/kernel/trampoline.S +++ b/arch/sparc64/kernel/trampoline.S @@ -119,8 +119,8 @@ startup_continue: sethi %hi(itlb_load), %g2 or %g2, %lo(itlb_load), %g2 stx %g2, [%sp + 2047 + 128 + 0x18] - sethi %hi(mmu_ihandle_cache), %g2 - lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 + sethi %hi(prom_mmu_ihandle_cache), %g2 + lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 stx %g2, [%sp + 2047 + 128 + 0x20] sethi %hi(KERNBASE), %g2 stx %g2, [%sp + 2047 + 128 + 0x28] @@ -156,8 +156,8 @@ startup_continue: sethi %hi(itlb_load), %g2 or %g2, %lo(itlb_load), %g2 stx %g2, [%sp + 2047 + 128 + 0x18] - sethi %hi(mmu_ihandle_cache), %g2 - lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 + sethi %hi(prom_mmu_ihandle_cache), %g2 + lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 stx %g2, [%sp + 2047 + 128 + 0x20] sethi %hi(KERNBASE + 0x400000), %g2 stx %g2, [%sp + 2047 + 128 + 0x28] @@ -190,8 +190,8 @@ do_dtlb: sethi %hi(dtlb_load), %g2 or %g2, %lo(dtlb_load), %g2 stx %g2, [%sp + 2047 + 128 + 0x18] - sethi %hi(mmu_ihandle_cache), %g2 - lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 + sethi %hi(prom_mmu_ihandle_cache), %g2 + lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 stx %g2, [%sp + 2047 + 128 + 0x20] sethi %hi(KERNBASE), %g2 stx %g2, [%sp + 2047 + 128 + 0x28] @@ -228,8 +228,8 @@ do_dtlb: sethi %hi(dtlb_load), %g2 or %g2, %lo(dtlb_load), %g2 stx %g2, [%sp + 2047 + 128 + 0x18] - sethi %hi(mmu_ihandle_cache), %g2 - lduw [%g2 + %lo(mmu_ihandle_cache)], %g2 + sethi %hi(prom_mmu_ihandle_cache), %g2 + lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2 stx %g2, [%sp + 2047 + 128 + 0x20] sethi %hi(KERNBASE + 0x400000), %g2 stx %g2, [%sp + 2047 + 128 + 0x28] @@ -336,20 +336,13 @@ do_unlock: call init_irqwork_curcpu nop - BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f) - ba,pt %xcc, 2f - nop - -1: /* Start using proper page size encodings in ctx register. */ - sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3 + /* Start using proper page size encodings in ctx register. */ + sethi %hi(sparc64_kern_pri_context), %g3 + ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 mov PRIMARY_CONTEXT, %g1 - sllx %g3, 32, %g3 - sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2 - or %g3, %g2, %g3 - stxa %g3, [%g1] ASI_DMMU + stxa %g2, [%g1] ASI_DMMU membar #Sync -2: rdpr %pstate, %o1 or %o1, PSTATE_IE, %o1 wrpr %o1, 0, %pstate diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index b280b2ef674f..5570e7bb22bb 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -189,19 +189,18 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un if (regs->tstate & TSTATE_PRIV) { /* Test if this comes from uaccess places. */ - unsigned long fixup; - unsigned long g2 = regs->u_regs[UREG_G2]; + const struct exception_table_entry *entry; - if ((fixup = search_extables_range(regs->tpc, &g2))) { - /* Ouch, somebody is trying ugly VM hole tricks on us... */ + entry = search_exception_tables(regs->tpc); + if (entry) { + /* Ouch, somebody is trying VM hole tricks on us... */ #ifdef DEBUG_EXCEPTIONS printk("Exception: PC<%016lx> faddr\n", regs->tpc); - printk("EX_TABLE: insn<%016lx> fixup<%016lx> " - "g2<%016lx>\n", regs->tpc, fixup, g2); + printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n", + regs->tpc, entry->fixup); #endif - regs->tpc = fixup; + regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; - regs->u_regs[UREG_G2] = g2; return; } /* Shit... */ @@ -758,26 +757,12 @@ void __init cheetah_ecache_flush_init(void) ecache_flush_size = (2 * largest_size); ecache_flush_linesize = smallest_linesize; - /* Discover a physically contiguous chunk of physical - * memory in 'sp_banks' of size ecache_flush_size calculated - * above. Store the physical base of this area at - * ecache_flush_physbase. - */ - for (node = 0; ; node++) { - if (sp_banks[node].num_bytes == 0) - break; - if (sp_banks[node].num_bytes >= ecache_flush_size) { - ecache_flush_physbase = sp_banks[node].base_addr; - break; - } - } + ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size); - /* Note: Zero would be a valid value of ecache_flush_physbase so - * don't use that as the success test. :-) - */ - if (sp_banks[node].num_bytes == 0) { + if (ecache_flush_physbase == ~0UL) { prom_printf("cheetah_ecache_flush_init: Cannot find %d byte " - "contiguous physical memory.\n", ecache_flush_size); + "contiguous physical memory.\n", + ecache_flush_size); prom_halt(); } @@ -869,14 +854,19 @@ static void cheetah_flush_ecache_line(unsigned long physaddr) */ static void __cheetah_flush_icache(void) { - unsigned long i; + unsigned int icache_size, icache_line_size; + unsigned long addr; + + icache_size = local_cpu_data().icache_size; + icache_line_size = local_cpu_data().icache_line_size; /* Clear the valid bits in all the tags. */ - for (i = 0; i < (1 << 15); i += (1 << 5)) { + for (addr = 0; addr < icache_size; addr += icache_line_size) { __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ - : "r" (i | (2 << 3)), "i" (ASI_IC_TAG)); + : "r" (addr | (2 << 3)), + "i" (ASI_IC_TAG)); } } @@ -904,13 +894,17 @@ static void cheetah_flush_icache(void) static void cheetah_flush_dcache(void) { - unsigned long i; + unsigned int dcache_size, dcache_line_size; + unsigned long addr; - for (i = 0; i < (1 << 16); i += (1 << 5)) { + dcache_size = local_cpu_data().dcache_size; + dcache_line_size = local_cpu_data().dcache_line_size; + + for (addr = 0; addr < dcache_size; addr += dcache_line_size) { __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ - : "r" (i), "i" (ASI_DCACHE_TAG)); + : "r" (addr), "i" (ASI_DCACHE_TAG)); } } @@ -921,24 +915,29 @@ static void cheetah_flush_dcache(void) */ static void cheetah_plus_zap_dcache_parity(void) { - unsigned long i; + unsigned int dcache_size, dcache_line_size; + unsigned long addr; - for (i = 0; i < (1 << 16); i += (1 << 5)) { - unsigned long tag = (i >> 14); - unsigned long j; + dcache_size = local_cpu_data().dcache_size; + dcache_line_size = local_cpu_data().dcache_line_size; + + for (addr = 0; addr < dcache_size; addr += dcache_line_size) { + unsigned long tag = (addr >> 14); + unsigned long line; __asm__ __volatile__("membar #Sync\n\t" "stxa %0, [%1] %2\n\t" "membar #Sync" : /* no outputs */ - : "r" (tag), "r" (i), + : "r" (tag), "r" (addr), "i" (ASI_DCACHE_UTAG)); - for (j = i; j < i + (1 << 5); j += (1 << 3)) + for (line = addr; line < addr + dcache_line_size; line += 8) __asm__ __volatile__("membar #Sync\n\t" "stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ - : "r" (j), "i" (ASI_DCACHE_DATA)); + : "r" (line), + "i" (ASI_DCACHE_DATA)); } } @@ -1332,16 +1331,12 @@ static int cheetah_fix_ce(unsigned long physaddr) /* Return non-zero if PADDR is a valid physical memory address. */ static int cheetah_check_main_memory(unsigned long paddr) { - int i; + unsigned long vaddr = PAGE_OFFSET + paddr; - for (i = 0; ; i++) { - if (sp_banks[i].num_bytes == 0) - break; - if (paddr >= sp_banks[i].base_addr && - paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes)) - return 1; - } - return 0; + if (vaddr > (unsigned long) high_memory) + return 0; + + return kern_addr_valid(vaddr); } void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar) @@ -1596,10 +1591,10 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned /* OK, usermode access. */ recoverable = 1; } else { - unsigned long g2 = regs->u_regs[UREG_G2]; - unsigned long fixup = search_extables_range(regs->tpc, &g2); + const struct exception_table_entry *entry; - if (fixup != 0UL) { + entry = search_exception_tables(regs->tpc); + if (entry) { /* OK, kernel access to userspace. */ recoverable = 1; @@ -1618,9 +1613,8 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned * recoverable condition. */ if (recoverable) { - regs->tpc = fixup; + regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; - regs->u_regs[UREG_G2] = g2; } } } diff --git a/arch/sparc64/kernel/una_asm.S b/arch/sparc64/kernel/una_asm.S index da48400bcc95..1f5b5b708ce7 100644 --- a/arch/sparc64/kernel/una_asm.S +++ b/arch/sparc64/kernel/una_asm.S @@ -6,13 +6,6 @@ .text -kernel_unaligned_trap_fault: - call kernel_mna_trap_fault - nop - retl - nop - .size kern_unaligned_trap_fault, .-kern_unaligned_trap_fault - .globl __do_int_store __do_int_store: rd %asi, %o4 @@ -51,24 +44,24 @@ __do_int_store: 0: wr %o4, 0x0, %asi retl - nop + mov 0, %o0 .size __do_int_store, .-__do_int_store .section __ex_table - .word 4b, kernel_unaligned_trap_fault - .word 5b, kernel_unaligned_trap_fault - .word 6b, kernel_unaligned_trap_fault - .word 7b, kernel_unaligned_trap_fault - .word 8b, kernel_unaligned_trap_fault - .word 9b, kernel_unaligned_trap_fault - .word 10b, kernel_unaligned_trap_fault - .word 11b, kernel_unaligned_trap_fault - .word 12b, kernel_unaligned_trap_fault - .word 13b, kernel_unaligned_trap_fault - .word 14b, kernel_unaligned_trap_fault - .word 15b, kernel_unaligned_trap_fault - .word 16b, kernel_unaligned_trap_fault - .word 17b, kernel_unaligned_trap_fault + .word 4b, __retl_efault + .word 5b, __retl_efault + .word 6b, __retl_efault + .word 7b, __retl_efault + .word 8b, __retl_efault + .word 9b, __retl_efault + .word 10b, __retl_efault + .word 11b, __retl_efault + .word 12b, __retl_efault + .word 13b, __retl_efault + .word 14b, __retl_efault + .word 15b, __retl_efault + .word 16b, __retl_efault + .word 17b, __retl_efault .previous .globl do_int_load @@ -133,21 +126,21 @@ do_int_load: 0: wr %o5, 0x0, %asi retl - nop + mov 0, %o0 .size __do_int_load, .-__do_int_load .section __ex_table - .word 4b, kernel_unaligned_trap_fault - .word 5b, kernel_unaligned_trap_fault - .word 6b, kernel_unaligned_trap_fault - .word 7b, kernel_unaligned_trap_fault - .word 8b, kernel_unaligned_trap_fault - .word 9b, kernel_unaligned_trap_fault - .word 10b, kernel_unaligned_trap_fault - .word 11b, kernel_unaligned_trap_fault - .word 12b, kernel_unaligned_trap_fault - .word 13b, kernel_unaligned_trap_fault - .word 14b, kernel_unaligned_trap_fault - .word 15b, kernel_unaligned_trap_fault - .word 16b, kernel_unaligned_trap_fault + .word 4b, __retl_efault + .word 5b, __retl_efault + .word 6b, __retl_efault + .word 7b, __retl_efault + .word 8b, __retl_efault + .word 9b, __retl_efault + .word 10b, __retl_efault + .word 11b, __retl_efault + .word 12b, __retl_efault + .word 13b, __retl_efault + .word 14b, __retl_efault + .word 15b, __retl_efault + .word 16b, __retl_efault .previous diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c index 42718f6a7d36..70faf630603b 100644 --- a/arch/sparc64/kernel/unaligned.c +++ b/arch/sparc64/kernel/unaligned.c @@ -180,14 +180,14 @@ static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs) die_if_kernel(str, regs); } -extern void do_int_load(unsigned long *dest_reg, int size, - unsigned long *saddr, int is_signed, int asi); +extern int do_int_load(unsigned long *dest_reg, int size, + unsigned long *saddr, int is_signed, int asi); -extern void __do_int_store(unsigned long *dst_addr, int size, - unsigned long src_val, int asi); +extern int __do_int_store(unsigned long *dst_addr, int size, + unsigned long src_val, int asi); -static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr, - struct pt_regs *regs, int asi, int orig_asi) +static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr, + struct pt_regs *regs, int asi, int orig_asi) { unsigned long zero = 0; unsigned long *src_val_p = &zero; @@ -219,7 +219,7 @@ static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr, break; }; } - __do_int_store(dst_addr, size, src_val, asi); + return __do_int_store(dst_addr, size, src_val, asi); } static inline void advance(struct pt_regs *regs) @@ -242,14 +242,14 @@ static inline int ok_for_kernel(unsigned int insn) return !floating_point_load_or_store_p(insn); } -void kernel_mna_trap_fault(void) +static void kernel_mna_trap_fault(void) { struct pt_regs *regs = current_thread_info()->kern_una_regs; unsigned int insn = current_thread_info()->kern_una_insn; - unsigned long g2 = regs->u_regs[UREG_G2]; - unsigned long fixup = search_extables_range(regs->tpc, &g2); + const struct exception_table_entry *entry; - if (!fixup) { + entry = search_exception_tables(regs->tpc); + if (!entry) { unsigned long address; address = compute_effective_address(regs, insn, @@ -270,9 +270,8 @@ void kernel_mna_trap_fault(void) die_if_kernel("Oops", regs); /* Not reached */ } - regs->tpc = fixup; + regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; - regs->u_regs [UREG_G2] = g2; regs->tstate &= ~TSTATE_ASI; regs->tstate |= (ASI_AIUS << 24UL); @@ -294,8 +293,8 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u kernel_mna_trap_fault(); } else { - unsigned long addr; - int orig_asi, asi; + unsigned long addr, *reg_addr; + int orig_asi, asi, err; addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f)); @@ -319,11 +318,12 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u }; switch (dir) { case load: - do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs), - size, (unsigned long *) addr, - decode_signedness(insn), asi); - if (unlikely(asi != orig_asi)) { - unsigned long val_in = *(unsigned long *) addr; + reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs); + err = do_int_load(reg_addr, size, + (unsigned long *) addr, + decode_signedness(insn), asi); + if (likely(!err) && unlikely(asi != orig_asi)) { + unsigned long val_in = *reg_addr; switch (size) { case 2: val_in = swab16(val_in); @@ -339,21 +339,24 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u BUG(); break; }; - *(unsigned long *) addr = val_in; + *reg_addr = val_in; } break; case store: - do_int_store(((insn>>25)&0x1f), size, - (unsigned long *) addr, regs, - asi, orig_asi); + err = do_int_store(((insn>>25)&0x1f), size, + (unsigned long *) addr, regs, + asi, orig_asi); break; default: panic("Impossible kernel unaligned trap."); /* Not reached... */ } - advance(regs); + if (unlikely(err)) + kernel_mna_trap_fault(); + else + advance(regs); } } diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c index 9080e7cd4bb0..0340041f6143 100644 --- a/arch/sparc64/kernel/us3_cpufreq.c +++ b/arch/sparc64/kernel/us3_cpufreq.c @@ -208,7 +208,10 @@ static int __init us3_freq_init(void) impl = ((ver >> 32) & 0xffff); if (manuf == CHEETAH_MANUF && - (impl == CHEETAH_IMPL || impl == CHEETAH_PLUS_IMPL)) { + (impl == CHEETAH_IMPL || + impl == CHEETAH_PLUS_IMPL || + impl == JAGUAR_IMPL || + impl == PANTHER_IMPL)) { struct cpufreq_driver *driver; ret = -ENOMEM; diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S index f47d0be39378..2af0cf0a8640 100644 --- a/arch/sparc64/kernel/vmlinux.lds.S +++ b/arch/sparc64/kernel/vmlinux.lds.S @@ -9,8 +9,7 @@ ENTRY(_start) jiffies = jiffies_64; SECTIONS { - swapper_pmd_dir = 0x0000000000402000; - empty_pg_dir = 0x0000000000403000; + swapper_low_pmd_dir = 0x0000000000402000; . = 0x4000; .text 0x0000000000404000 : { diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S index 99c809a1e5ac..39160926267b 100644 --- a/arch/sparc64/kernel/winfixup.S +++ b/arch/sparc64/kernel/winfixup.S @@ -16,23 +16,14 @@ .text set_pcontext: -cplus_winfixup_insn_1: - sethi %hi(0), %l1 + sethi %hi(sparc64_kern_pri_context), %l1 + ldx [%l1 + %lo(sparc64_kern_pri_context)], %l1 mov PRIMARY_CONTEXT, %g1 - sllx %l1, 32, %l1 -cplus_winfixup_insn_2: - sethi %hi(0), %g2 - or %l1, %g2, %l1 stxa %l1, [%g1] ASI_DMMU flush %g6 retl nop -cplus_wfinsn_1: - sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1 -cplus_wfinsn_2: - sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2 - .align 32 /* Here are the rules, pay attention. @@ -395,23 +386,3 @@ window_dax_from_user_common: add %sp, PTREGS_OFF, %o0 ba,pt %xcc, rtrap clr %l6 - - - .globl cheetah_plus_patch_winfixup -cheetah_plus_patch_winfixup: - sethi %hi(cplus_wfinsn_1), %o0 - sethi %hi(cplus_winfixup_insn_1), %o2 - lduw [%o0 + %lo(cplus_wfinsn_1)], %o1 - or %o2, %lo(cplus_winfixup_insn_1), %o2 - stw %o1, [%o2] - flush %o2 - - sethi %hi(cplus_wfinsn_2), %o0 - sethi %hi(cplus_winfixup_insn_2), %o2 - lduw [%o0 + %lo(cplus_wfinsn_2)], %o1 - or %o2, %lo(cplus_winfixup_insn_2), %o2 - stw %o1, [%o2] - flush %o2 - - retl - nop diff --git a/arch/sparc64/lib/VISsave.S b/arch/sparc64/lib/VISsave.S index 4e18989bd602..a0ded5c5aa5c 100644 --- a/arch/sparc64/lib/VISsave.S +++ b/arch/sparc64/lib/VISsave.S @@ -59,15 +59,17 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 be,pn %icc, 9b add %g6, TI_FPREGS, %g2 andcc %o5, FPRS_DL, %g0 - membar #StoreStore | #LoadStore be,pn %icc, 4f add %g6, TI_FPREGS+0x40, %g3 + membar #Sync stda %f0, [%g2 + %g1] ASI_BLK_P stda %f16, [%g3 + %g1] ASI_BLK_P + membar #Sync andcc %o5, FPRS_DU, %g0 be,pn %icc, 5f 4: add %g1, 128, %g1 + membar #Sync stda %f32, [%g2 + %g1] ASI_BLK_P stda %f48, [%g3 + %g1] ASI_BLK_P @@ -87,7 +89,7 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3 sll %g1, 5, %g1 add %g6, TI_FPREGS+0xc0, %g3 wr %g0, FPRS_FEF, %fprs - membar #StoreStore | #LoadStore + membar #Sync stda %f32, [%g2 + %g1] ASI_BLK_P stda %f48, [%g3 + %g1] ASI_BLK_P membar #Sync @@ -128,8 +130,8 @@ VISenterhalf: be,pn %icc, 4f add %g6, TI_FPREGS, %g2 - membar #StoreStore | #LoadStore add %g6, TI_FPREGS+0x40, %g3 + membar #Sync stda %f0, [%g2 + %g1] ASI_BLK_P stda %f16, [%g3 + %g1] ASI_BLK_P membar #Sync diff --git a/arch/sparc64/lib/strncpy_from_user.S b/arch/sparc64/lib/strncpy_from_user.S index 09cbbaa0ebf4..e1264650ca7a 100644 --- a/arch/sparc64/lib/strncpy_from_user.S +++ b/arch/sparc64/lib/strncpy_from_user.S @@ -125,15 +125,11 @@ __strncpy_from_user: add %o2, %o3, %o0 .size __strncpy_from_user, .-__strncpy_from_user - .section .fixup,#alloc,#execinstr - .align 4 -4: retl - mov -EFAULT, %o0 - .section __ex_table,#alloc .align 4 - .word 60b, 4b - .word 61b, 4b - .word 62b, 4b - .word 63b, 4b - .word 64b, 4b + .word 60b, __retl_efault + .word 61b, __retl_efault + .word 62b, __retl_efault + .word 63b, __retl_efault + .word 64b, __retl_efault + .previous diff --git a/arch/sparc64/lib/user_fixup.c b/arch/sparc64/lib/user_fixup.c index 0278e34125db..19d1fdb17d0e 100644 --- a/arch/sparc64/lib/user_fixup.c +++ b/arch/sparc64/lib/user_fixup.c @@ -11,61 +11,56 @@ /* Calculating the exact fault address when using * block loads and stores can be very complicated. + * * Instead of trying to be clever and handling all * of the cases, just fix things up simply here. */ +static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset) +{ + unsigned long fault_addr = current_thread_info()->fault_address; + unsigned long end = start + size; + + if (fault_addr < start || fault_addr >= end) { + *offset = 0; + } else { + *offset = start - fault_addr; + size = end - fault_addr; + } + return size; +} + unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size) { - char *dst = to; - const char __user *src = from; + unsigned long offset; - while (size) { - if (__get_user(*dst, src)) - break; - dst++; - src++; - size--; - } - - if (size) - memset(dst, 0, size); + size = compute_size((unsigned long) from, size, &offset); + if (likely(size)) + memset(to + offset, 0, size); return size; } unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size) { - char __user *dst = to; - const char *src = from; + unsigned long offset; - while (size) { - if (__put_user(*src, dst)) - break; - dst++; - src++; - size--; - } - - return size; + return compute_size((unsigned long) to, size, &offset); } unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size) { - char __user *dst = to; - char __user *src = from; + unsigned long fault_addr = current_thread_info()->fault_address; + unsigned long start = (unsigned long) to; + unsigned long end = start + size; - while (size) { - char tmp; + if (fault_addr >= start && fault_addr < end) + return end - fault_addr; - if (__get_user(tmp, src)) - break; - if (__put_user(tmp, dst)) - break; - dst++; - src++; - size--; - } + start = (unsigned long) from; + end = start + size; + if (fault_addr >= start && fault_addr < end) + return end - fault_addr; return size; } diff --git a/arch/sparc64/mm/Makefile b/arch/sparc64/mm/Makefile index cda87333a77b..9d0960e69f48 100644 --- a/arch/sparc64/mm/Makefile +++ b/arch/sparc64/mm/Makefile @@ -5,6 +5,6 @@ EXTRA_AFLAGS := -ansi EXTRA_CFLAGS := -Werror -obj-y := ultra.o tlb.o fault.o init.o generic.o extable.o +obj-y := ultra.o tlb.o fault.o init.o generic.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o diff --git a/arch/sparc64/mm/extable.c b/arch/sparc64/mm/extable.c deleted file mode 100644 index ec334297ff4f..000000000000 --- a/arch/sparc64/mm/extable.c +++ /dev/null @@ -1,80 +0,0 @@ -/* - * linux/arch/sparc64/mm/extable.c - */ - -#include -#include -#include - -extern const struct exception_table_entry __start___ex_table[]; -extern const struct exception_table_entry __stop___ex_table[]; - -void sort_extable(struct exception_table_entry *start, - struct exception_table_entry *finish) -{ -} - -/* Caller knows they are in a range if ret->fixup == 0 */ -const struct exception_table_entry * -search_extable(const struct exception_table_entry *start, - const struct exception_table_entry *last, - unsigned long value) -{ - const struct exception_table_entry *walk; - - /* Single insn entries are encoded as: - * word 1: insn address - * word 2: fixup code address - * - * Range entries are encoded as: - * word 1: first insn address - * word 2: 0 - * word 3: last insn address + 4 bytes - * word 4: fixup code address - * - * See asm/uaccess.h for more details. - */ - - /* 1. Try to find an exact match. */ - for (walk = start; walk <= last; walk++) { - if (walk->fixup == 0) { - /* A range entry, skip both parts. */ - walk++; - continue; - } - - if (walk->insn == value) - return walk; - } - - /* 2. Try to find a range match. */ - for (walk = start; walk <= (last - 1); walk++) { - if (walk->fixup) - continue; - - if (walk[0].insn <= value && walk[1].insn > value) - return walk; - - walk++; - } - - return NULL; -} - -/* Special extable search, which handles ranges. Returns fixup */ -unsigned long search_extables_range(unsigned long addr, unsigned long *g2) -{ - const struct exception_table_entry *entry; - - entry = search_exception_tables(addr); - if (!entry) - return 0; - - /* Inside range? Fix g2 and return correct fixup */ - if (!entry->fixup) { - *g2 = (addr - entry->insn) / 4; - return (entry + 1)->fixup; - } - - return entry->fixup; -} diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index db1e3310e907..31fbc67719a1 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c @@ -32,8 +32,6 @@ #define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0])) -extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; - /* * To debug kernel to catch accesses to certain virtual/physical addresses. * Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints. @@ -71,53 +69,6 @@ void set_brkpt(unsigned long addr, unsigned char mask, int flags, int mode) : "memory"); } -/* Nice, simple, prom library does all the sweating for us. ;) */ -unsigned long __init prom_probe_memory (void) -{ - register struct linux_mlist_p1275 *mlist; - register unsigned long bytes, base_paddr, tally; - register int i; - - i = 0; - mlist = *prom_meminfo()->p1275_available; - bytes = tally = mlist->num_bytes; - base_paddr = mlist->start_adr; - - sp_banks[0].base_addr = base_paddr; - sp_banks[0].num_bytes = bytes; - - while (mlist->theres_more != (void *) 0) { - i++; - mlist = mlist->theres_more; - bytes = mlist->num_bytes; - tally += bytes; - if (i >= SPARC_PHYS_BANKS-1) { - printk ("The machine has more banks than " - "this kernel can support\n" - "Increase the SPARC_PHYS_BANKS " - "setting (currently %d)\n", - SPARC_PHYS_BANKS); - i = SPARC_PHYS_BANKS-1; - break; - } - - sp_banks[i].base_addr = mlist->start_adr; - sp_banks[i].num_bytes = mlist->num_bytes; - } - - i++; - sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL; - sp_banks[i].num_bytes = 0; - - /* Now mask all bank sizes on a page boundary, it is all we can - * use anyways. - */ - for (i = 0; sp_banks[i].num_bytes != 0; i++) - sp_banks[i].num_bytes &= PAGE_MASK; - - return tally; -} - static void __kprobes unhandled_fault(unsigned long address, struct task_struct *tsk, struct pt_regs *regs) @@ -242,7 +193,6 @@ static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn) static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, unsigned int insn, unsigned long address) { - unsigned long g2; unsigned char asi = ASI_P; if ((!insn) && (regs->tstate & TSTATE_PRIV)) @@ -273,11 +223,9 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, } } - g2 = regs->u_regs[UREG_G2]; - /* Is this in ex_table? */ if (regs->tstate & TSTATE_PRIV) { - unsigned long fixup; + const struct exception_table_entry *entry; if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) { if (insn & 0x2000) @@ -288,10 +236,9 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code, /* Look in asi.h: All _S asis have LS bit set */ if ((asi & 0x1) && - (fixup = search_extables_range(regs->tpc, &g2))) { - regs->tpc = fixup; + (entry = search_exception_tables(regs->tpc))) { + regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; - regs->u_regs[UREG_G2] = g2; return; } } else { @@ -461,7 +408,7 @@ good_area: } up_read(&mm->mmap_sem); - goto fault_done; + return; /* * Something tried to access memory that isn't in our memory map.. @@ -473,8 +420,7 @@ bad_area: handle_kernel_fault: do_kernel_fault(regs, si_code, fault_code, insn, address); - - goto fault_done; + return; /* * We ran out of memory, or some other thing happened to us that made @@ -505,9 +451,4 @@ do_sigbus: /* Kernel mode? Handle exceptions or die */ if (regs->tstate & TSTATE_PRIV) goto handle_kernel_fault; - -fault_done: - /* These values are no longer needed, clear them. */ - set_thread_fault_code(0); - current_thread_info()->fault_address = 0; } diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index fdb1ebb308c9..1e44ee26cee8 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -20,6 +20,8 @@ #include #include #include +#include +#include #include #include @@ -40,24 +42,80 @@ extern void device_scan(void); -struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; +#define MAX_BANKS 32 -unsigned long *sparc64_valid_addr_bitmap; +static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; +static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata; +static int pavail_ents __initdata; +static int pavail_rescan_ents __initdata; + +static int cmp_p64(const void *a, const void *b) +{ + const struct linux_prom64_registers *x = a, *y = b; + + if (x->phys_addr > y->phys_addr) + return 1; + if (x->phys_addr < y->phys_addr) + return -1; + return 0; +} + +static void __init read_obp_memory(const char *property, + struct linux_prom64_registers *regs, + int *num_ents) +{ + int node = prom_finddevice("/memory"); + int prop_size = prom_getproplen(node, property); + int ents, ret, i; + + ents = prop_size / sizeof(struct linux_prom64_registers); + if (ents > MAX_BANKS) { + prom_printf("The machine has more %s property entries than " + "this kernel can support (%d).\n", + property, MAX_BANKS); + prom_halt(); + } + + ret = prom_getproperty(node, property, (char *) regs, prop_size); + if (ret == -1) { + prom_printf("Couldn't get %s property from /memory.\n"); + prom_halt(); + } + + *num_ents = ents; + + /* Sanitize what we got from the firmware, by page aligning + * everything. + */ + for (i = 0; i < ents; i++) { + unsigned long base, size; + + base = regs[i].phys_addr; + size = regs[i].reg_size; + + size &= PAGE_MASK; + if (base & ~PAGE_MASK) { + unsigned long new_base = PAGE_ALIGN(base); + + size -= new_base - base; + if ((long) size < 0L) + size = 0UL; + base = new_base; + } + regs[i].phys_addr = base; + regs[i].reg_size = size; + } + sort(regs, ents, sizeof(struct linux_prom64_registers), + cmp_p64, NULL); +} + +unsigned long *sparc64_valid_addr_bitmap __read_mostly; /* Ugly, but necessary... -DaveM */ -unsigned long phys_base; -unsigned long kern_base; -unsigned long kern_size; -unsigned long pfn_base; - -/* This is even uglier. We have a problem where the kernel may not be - * located at phys_base. However, initial __alloc_bootmem() calls need to - * be adjusted to be within the 4-8Megs that the kernel is mapped to, else - * those page mappings wont work. Things are ok after inherit_prom_mappings - * is called though. Dave says he'll clean this up some other time. - * -- BenC - */ -static unsigned long bootmap_base; +unsigned long phys_base __read_mostly; +unsigned long kern_base __read_mostly; +unsigned long kern_size __read_mostly; +unsigned long pfn_base __read_mostly; /* get_new_mmu_context() uses "cache + 1". */ DEFINE_SPINLOCK(ctx_alloc_lock); @@ -73,7 +131,13 @@ extern unsigned long sparc_ramdisk_image64; extern unsigned int sparc_ramdisk_image; extern unsigned int sparc_ramdisk_size; -struct page *mem_map_zero; +struct page *mem_map_zero __read_mostly; + +unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; + +unsigned long sparc64_kern_pri_context __read_mostly; +unsigned long sparc64_kern_pri_nuc_bits __read_mostly; +unsigned long sparc64_kern_sec_context __read_mostly; int bigkernel = 0; @@ -179,8 +243,6 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c : "g1", "g7"); } -extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long address, pte_t pte, int code); - void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { struct page *page; @@ -207,10 +269,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p put_cpu(); } - - if (get_thread_fault_code()) - __update_mmu_cache(CTX_NRBITS(vma->vm_mm->context), - address, pte, get_thread_fault_code()); } void flush_dcache_page(struct page *page) @@ -310,6 +368,11 @@ struct linux_prom_translation { unsigned long data; }; +/* Exported for kernel TLB miss handling in ktlb.S */ +struct linux_prom_translation prom_trans[512] __read_mostly; +unsigned int prom_trans_ents __read_mostly; +unsigned int swapper_pgd_zero __read_mostly; + extern unsigned long prom_boot_page; extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); extern int prom_get_mmu_ihandle(void); @@ -318,297 +381,162 @@ extern void register_prom_callbacks(void); /* Exported for SMP bootup purposes. */ unsigned long kern_locked_tte_data; -void __init early_pgtable_allocfail(char *type) -{ - prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type); - prom_halt(); -} - -#define BASE_PAGE_SIZE 8192 -static pmd_t *prompmd; - /* * Translate PROM's mapping we capture at boot time into physical address. * The second parameter is only set from prom_callback() invocations. */ unsigned long prom_virt_to_phys(unsigned long promva, int *error) { - pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff); - pte_t *ptep; - unsigned long base; + int i; - if (pmd_none(*pmdp)) { - if (error) - *error = 1; - return(0); + for (i = 0; i < prom_trans_ents; i++) { + struct linux_prom_translation *p = &prom_trans[i]; + + if (promva >= p->virt && + promva < (p->virt + p->size)) { + unsigned long base = p->data & _PAGE_PADDR; + + if (error) + *error = 0; + return base + (promva & (8192 - 1)); + } } - ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff); - if (!pte_present(*ptep)) { - if (error) - *error = 1; - return(0); - } - if (error) { - *error = 0; - return(pte_val(*ptep)); - } - base = pte_val(*ptep) & _PAGE_PADDR; - return(base + (promva & (BASE_PAGE_SIZE - 1))); + if (error) + *error = 1; + return 0UL; } -static void inherit_prom_mappings(void) +/* The obp translations are saved based on 8k pagesize, since obp can + * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> + * HI_OBP_ADDRESS range are handled in ktlb.S and do not use the vpte + * scheme (also, see rant in inherit_locked_prom_mappings()). + */ +static inline int in_obp_range(unsigned long vaddr) { - struct linux_prom_translation *trans; - unsigned long phys_page, tte_vaddr, tte_data; - void (*remap_func)(unsigned long, unsigned long, int); - pmd_t *pmdp; - pte_t *ptep; - int node, n, i, tsz; - extern unsigned int obp_iaddr_patch[2], obp_daddr_patch[2]; + return (vaddr >= LOW_OBP_ADDRESS && + vaddr < HI_OBP_ADDRESS); +} + +static int cmp_ptrans(const void *a, const void *b) +{ + const struct linux_prom_translation *x = a, *y = b; + + if (x->virt > y->virt) + return 1; + if (x->virt < y->virt) + return -1; + return 0; +} + +/* Read OBP translations property into 'prom_trans[]'. */ +static void __init read_obp_translations(void) +{ + int n, node, ents, first, last, i; node = prom_finddevice("/virtual-memory"); n = prom_getproplen(node, "translations"); - if (n == 0 || n == -1) { - prom_printf("Couldn't get translation property\n"); + if (unlikely(n == 0 || n == -1)) { + prom_printf("prom_mappings: Couldn't get size.\n"); prom_halt(); } - n += 5 * sizeof(struct linux_prom_translation); - for (tsz = 1; tsz < n; tsz <<= 1) - /* empty */; - trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, bootmap_base); - if (trans == NULL) { - prom_printf("inherit_prom_mappings: Cannot alloc translations.\n"); + if (unlikely(n > sizeof(prom_trans))) { + prom_printf("prom_mappings: Size %Zd is too big.\n", n); prom_halt(); } - memset(trans, 0, tsz); - if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) { - prom_printf("Couldn't get translation property\n"); + if ((n = prom_getproperty(node, "translations", + (char *)&prom_trans[0], + sizeof(prom_trans))) == -1) { + prom_printf("prom_mappings: Couldn't get property.\n"); prom_halt(); } - n = n / sizeof(*trans); - /* - * The obp translations are saved based on 8k pagesize, since obp can - * use a mixture of pagesizes. Misses to the 0xf0000000 - 0x100000000, - * ie obp range, are handled in entry.S and do not use the vpte scheme - * (see rant in inherit_locked_prom_mappings()). - */ -#define OBP_PMD_SIZE 2048 - prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, bootmap_base); - if (prompmd == NULL) - early_pgtable_allocfail("pmd"); - memset(prompmd, 0, OBP_PMD_SIZE); - for (i = 0; i < n; i++) { - unsigned long vaddr; + n = n / sizeof(struct linux_prom_translation); - if (trans[i].virt >= LOW_OBP_ADDRESS && trans[i].virt < HI_OBP_ADDRESS) { - for (vaddr = trans[i].virt; - ((vaddr < trans[i].virt + trans[i].size) && - (vaddr < HI_OBP_ADDRESS)); - vaddr += BASE_PAGE_SIZE) { - unsigned long val; + ents = n; - pmdp = prompmd + ((vaddr >> 23) & 0x7ff); - if (pmd_none(*pmdp)) { - ptep = __alloc_bootmem(BASE_PAGE_SIZE, - BASE_PAGE_SIZE, - bootmap_base); - if (ptep == NULL) - early_pgtable_allocfail("pte"); - memset(ptep, 0, BASE_PAGE_SIZE); - pmd_set(pmdp, ptep); - } - ptep = (pte_t *)__pmd_page(*pmdp) + - ((vaddr >> 13) & 0x3ff); + sort(prom_trans, ents, sizeof(struct linux_prom_translation), + cmp_ptrans, NULL); - val = trans[i].data; - - /* Clear diag TTE bits. */ - if (tlb_type == spitfire) - val &= ~0x0003fe0000000000UL; - - set_pte_at(&init_mm, vaddr, - ptep, __pte(val | _PAGE_MODIFIED)); - trans[i].data += BASE_PAGE_SIZE; - } - } + /* Now kick out all the non-OBP entries. */ + for (i = 0; i < ents; i++) { + if (in_obp_range(prom_trans[i].virt)) + break; } - phys_page = __pa(prompmd); - obp_iaddr_patch[0] |= (phys_page >> 10); - obp_iaddr_patch[1] |= (phys_page & 0x3ff); - flushi((long)&obp_iaddr_patch[0]); - obp_daddr_patch[0] |= (phys_page >> 10); - obp_daddr_patch[1] |= (phys_page & 0x3ff); - flushi((long)&obp_daddr_patch[0]); + first = i; + for (; i < ents; i++) { + if (!in_obp_range(prom_trans[i].virt)) + break; + } + last = i; - /* Now fixup OBP's idea about where we really are mapped. */ - prom_printf("Remapping the kernel... "); + for (i = 0; i < (last - first); i++) { + struct linux_prom_translation *src = &prom_trans[i + first]; + struct linux_prom_translation *dest = &prom_trans[i]; - /* Spitfire Errata #32 workaround */ - /* NOTE: Using plain zero for the context value is - * correct here, we are not using the Linux trap - * tables yet so we should not use the special - * UltraSPARC-III+ page size encodings yet. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); + *dest = *src; + } + for (; i < ents; i++) { + struct linux_prom_translation *dest = &prom_trans[i]; + dest->virt = dest->size = dest->data = 0x0UL; + } - switch (tlb_type) { - default: - case spitfire: - phys_page = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()); - break; - - case cheetah: - case cheetah_plus: - phys_page = cheetah_get_litlb_data(sparc64_highest_locked_tlbent()); - break; - }; - - phys_page &= _PAGE_PADDR; - phys_page += ((unsigned long)&prom_boot_page - - (unsigned long)KERNBASE); + prom_trans_ents = last - first; if (tlb_type == spitfire) { - /* Lock this into i/d tlb entry 59 */ - __asm__ __volatile__( - "stxa %%g0, [%2] %3\n\t" - "stxa %0, [%1] %4\n\t" - "membar #Sync\n\t" - "flush %%g6\n\t" - "stxa %%g0, [%2] %5\n\t" - "stxa %0, [%1] %6\n\t" - "membar #Sync\n\t" - "flush %%g6" - : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP | - _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W), - "r" (59 << 3), "r" (TLB_TAG_ACCESS), - "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), - "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS) - : "memory"); - } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { - /* Lock this into i/d tlb-0 entry 11 */ - __asm__ __volatile__( - "stxa %%g0, [%2] %3\n\t" - "stxa %0, [%1] %4\n\t" - "membar #Sync\n\t" - "flush %%g6\n\t" - "stxa %%g0, [%2] %5\n\t" - "stxa %0, [%1] %6\n\t" - "membar #Sync\n\t" - "flush %%g6" - : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP | - _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W), - "r" ((0 << 16) | (11 << 3)), "r" (TLB_TAG_ACCESS), - "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), - "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS) - : "memory"); - } else { - /* Implement me :-) */ - BUG(); + /* Clear diag TTE bits. */ + for (i = 0; i < prom_trans_ents; i++) + prom_trans[i].data &= ~0x0003fe0000000000UL; } +} + +static void __init remap_kernel(void) +{ + unsigned long phys_page, tte_vaddr, tte_data; + int tlb_ent = sparc64_highest_locked_tlbent(); tte_vaddr = (unsigned long) KERNBASE; - - /* Spitfire Errata #32 workaround */ - /* NOTE: Using plain zero for the context value is - * correct here, we are not using the Linux trap - * tables yet so we should not use the special - * UltraSPARC-III+ page size encodings yet. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - if (tlb_type == spitfire) - tte_data = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()); - else - tte_data = cheetah_get_ldtlb_data(sparc64_highest_locked_tlbent()); + phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; + tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB | + _PAGE_CP | _PAGE_CV | _PAGE_P | + _PAGE_L | _PAGE_W)); kern_locked_tte_data = tte_data; - remap_func = (void *) ((unsigned long) &prom_remap - - (unsigned long) &prom_boot_page); - - - /* Spitfire Errata #32 workaround */ - /* NOTE: Using plain zero for the context value is - * correct here, we are not using the Linux trap - * tables yet so we should not use the special - * UltraSPARC-III+ page size encodings yet. - */ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "flush %%g6" - : /* No outputs */ - : "r" (0), - "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); - - remap_func((tlb_type == spitfire ? - (spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR) : - (cheetah_get_litlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR)), - (unsigned long) KERNBASE, - prom_get_mmu_ihandle()); - - if (bigkernel) - remap_func(((tte_data + 0x400000) & _PAGE_PADDR), - (unsigned long) KERNBASE + 0x400000, prom_get_mmu_ihandle()); - - /* Flush out that temporary mapping. */ - spitfire_flush_dtlb_nucleus_page(0x0); - spitfire_flush_itlb_nucleus_page(0x0); - - /* Now lock us back into the TLBs via OBP. */ - prom_dtlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr); - prom_itlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr); + /* Now lock us into the TLBs via OBP. */ + prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); + prom_itlb_load(tlb_ent, tte_data, tte_vaddr); if (bigkernel) { - prom_dtlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000, - tte_vaddr + 0x400000); - prom_itlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000, - tte_vaddr + 0x400000); + tlb_ent -= 1; + prom_dtlb_load(tlb_ent, + tte_data + 0x400000, + tte_vaddr + 0x400000); + prom_itlb_load(tlb_ent, + tte_data + 0x400000, + tte_vaddr + 0x400000); } - - /* Re-read translations property. */ - if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) { - prom_printf("Couldn't get translation property\n"); - prom_halt(); + sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; + if (tlb_type == cheetah_plus) { + sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | + CTX_CHEETAH_PLUS_NUC); + sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; + sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; } - n = n / sizeof(*trans); +} - for (i = 0; i < n; i++) { - unsigned long vaddr = trans[i].virt; - unsigned long size = trans[i].size; - if (vaddr < 0xf0000000UL) { - unsigned long avoid_start = (unsigned long) KERNBASE; - unsigned long avoid_end = avoid_start + (4 * 1024 * 1024); - - if (bigkernel) - avoid_end += (4 * 1024 * 1024); - if (vaddr < avoid_start) { - unsigned long top = vaddr + size; - - if (top > avoid_start) - top = avoid_start; - prom_unmap(top - vaddr, vaddr); - } - if ((vaddr + size) > avoid_end) { - unsigned long bottom = vaddr; - - if (bottom < avoid_end) - bottom = avoid_end; - prom_unmap((vaddr + size) - bottom, bottom); - } - } - } +static void __init inherit_prom_mappings(void) +{ + read_obp_translations(); + /* Now fixup OBP's idea about where we really are mapped. */ + prom_printf("Remapping the kernel... "); + remap_kernel(); prom_printf("done.\n"); + prom_printf("Registering callbacks... "); register_prom_callbacks(); + prom_printf("done.\n"); } /* The OBP specifications for sun4u mark 0xfffffffc00000000 and @@ -792,8 +720,8 @@ void inherit_locked_prom_mappings(int save_p) } } if (tlb_type == spitfire) { - int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel; - for (i = 0; i < high; i++) { + int high = sparc64_highest_unlocked_tlb_ent; + for (i = 0; i <= high; i++) { unsigned long data; /* Spitfire Errata #32 workaround */ @@ -881,9 +809,9 @@ void inherit_locked_prom_mappings(int save_p) } } } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { - int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel; + int high = sparc64_highest_unlocked_tlb_ent; - for (i = 0; i < high; i++) { + for (i = 0; i <= high; i++) { unsigned long data; data = cheetah_get_ldtlb_data(i); @@ -1276,14 +1204,14 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) int i; #ifdef CONFIG_DEBUG_BOOTMEM - prom_printf("bootmem_init: Scan sp_banks, "); + prom_printf("bootmem_init: Scan pavail, "); #endif bytes_avail = 0UL; - for (i = 0; sp_banks[i].num_bytes != 0; i++) { - end_of_phys_memory = sp_banks[i].base_addr + - sp_banks[i].num_bytes; - bytes_avail += sp_banks[i].num_bytes; + for (i = 0; i < pavail_ents; i++) { + end_of_phys_memory = pavail[i].phys_addr + + pavail[i].reg_size; + bytes_avail += pavail[i].reg_size; if (cmdline_memory_size) { if (bytes_avail > cmdline_memory_size) { unsigned long slack = bytes_avail - cmdline_memory_size; @@ -1291,12 +1219,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) bytes_avail -= slack; end_of_phys_memory -= slack; - sp_banks[i].num_bytes -= slack; - if (sp_banks[i].num_bytes == 0) { - sp_banks[i].base_addr = 0xdeadbeef; + pavail[i].reg_size -= slack; + if ((long)pavail[i].reg_size <= 0L) { + pavail[i].phys_addr = 0xdeadbeefUL; + pavail[i].reg_size = 0UL; + pavail_ents = i; } else { - sp_banks[i+1].num_bytes = 0; - sp_banks[i+1].base_addr = 0xdeadbeef; + pavail[i+1].reg_size = 0Ul; + pavail[i+1].phys_addr = 0xdeadbeefUL; + pavail_ents = i + 1; } break; } @@ -1347,17 +1278,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) #endif bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn); - bootmap_base = bootmap_pfn << PAGE_SHIFT; - /* Now register the available physical memory with the * allocator. */ - for (i = 0; sp_banks[i].num_bytes != 0; i++) { + for (i = 0; i < pavail_ents; i++) { #ifdef CONFIG_DEBUG_BOOTMEM - prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n", - i, sp_banks[i].base_addr, sp_banks[i].num_bytes); + prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n", + i, pavail[i].phys_addr, pavail[i].reg_size); #endif - free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes); + free_bootmem(pavail[i].phys_addr, pavail[i].reg_size); } #ifdef CONFIG_BLK_DEV_INITRD @@ -1398,121 +1327,167 @@ unsigned long __init bootmem_init(unsigned long *pages_avail) return end_pfn; } +#ifdef CONFIG_DEBUG_PAGEALLOC +static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot) +{ + unsigned long vstart = PAGE_OFFSET + pstart; + unsigned long vend = PAGE_OFFSET + pend; + unsigned long alloc_bytes = 0UL; + + if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { + prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", + vstart, vend); + prom_halt(); + } + + while (vstart < vend) { + unsigned long this_end, paddr = __pa(vstart); + pgd_t *pgd = pgd_offset_k(vstart); + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + pud = pud_offset(pgd, vstart); + if (pud_none(*pud)) { + pmd_t *new; + + new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + alloc_bytes += PAGE_SIZE; + pud_populate(&init_mm, pud, new); + } + + pmd = pmd_offset(pud, vstart); + if (!pmd_present(*pmd)) { + pte_t *new; + + new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + alloc_bytes += PAGE_SIZE; + pmd_populate_kernel(&init_mm, pmd, new); + } + + pte = pte_offset_kernel(pmd, vstart); + this_end = (vstart + PMD_SIZE) & PMD_MASK; + if (this_end > vend) + this_end = vend; + + while (vstart < this_end) { + pte_val(*pte) = (paddr | pgprot_val(prot)); + + vstart += PAGE_SIZE; + paddr += PAGE_SIZE; + pte++; + } + } + + return alloc_bytes; +} + +static struct linux_prom64_registers pall[MAX_BANKS] __initdata; +static int pall_ents __initdata; + +extern unsigned int kvmap_linear_patch[1]; + +static void __init kernel_physical_mapping_init(void) +{ + unsigned long i, mem_alloced = 0UL; + + read_obp_memory("reg", &pall[0], &pall_ents); + + for (i = 0; i < pall_ents; i++) { + unsigned long phys_start, phys_end; + + phys_start = pall[i].phys_addr; + phys_end = phys_start + pall[i].reg_size; + mem_alloced += kernel_map_range(phys_start, phys_end, + PAGE_KERNEL); + } + + printk("Allocated %ld bytes for kernel page tables.\n", + mem_alloced); + + kvmap_linear_patch[0] = 0x01000000; /* nop */ + flushi(&kvmap_linear_patch[0]); + + __flush_tlb_all(); +} + +void kernel_map_pages(struct page *page, int numpages, int enable) +{ + unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; + unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); + + kernel_map_range(phys_start, phys_end, + (enable ? PAGE_KERNEL : __pgprot(0))); + + /* we should perform an IPI and flush all tlbs, + * but that can deadlock->flush only current cpu. + */ + __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, + PAGE_OFFSET + phys_end); +} +#endif + +unsigned long __init find_ecache_flush_span(unsigned long size) +{ + int i; + + for (i = 0; i < pavail_ents; i++) { + if (pavail[i].reg_size >= size) + return pavail[i].phys_addr; + } + + return ~0UL; +} + /* paging_init() sets up the page tables */ extern void cheetah_ecache_flush_init(void); static unsigned long last_valid_pfn; +pgd_t swapper_pg_dir[2048]; void __init paging_init(void) { - extern pmd_t swapper_pmd_dir[1024]; - extern unsigned int sparc64_vpte_patchme1[1]; - extern unsigned int sparc64_vpte_patchme2[1]; - unsigned long alias_base = kern_base + PAGE_OFFSET; - unsigned long second_alias_page = 0; - unsigned long pt, flags, end_pfn, pages_avail; - unsigned long shift = alias_base - ((unsigned long)KERNBASE); - unsigned long real_end; + unsigned long end_pfn, pages_avail, shift; + unsigned long real_end, i; + + /* Find available physical memory... */ + read_obp_memory("available", &pavail[0], &pavail_ents); + + phys_base = 0xffffffffffffffffUL; + for (i = 0; i < pavail_ents; i++) + phys_base = min(phys_base, pavail[i].phys_addr); + + pfn_base = phys_base >> PAGE_SHIFT; + + kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; + kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; set_bit(0, mmu_context_bmap); + shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); + real_end = (unsigned long)_end; if ((real_end > ((unsigned long)KERNBASE + 0x400000))) bigkernel = 1; -#ifdef CONFIG_BLK_DEV_INITRD - if (sparc_ramdisk_image || sparc_ramdisk_image64) - real_end = (PAGE_ALIGN(real_end) + PAGE_ALIGN(sparc_ramdisk_size)); -#endif - - /* We assume physical memory starts at some 4mb multiple, - * if this were not true we wouldn't boot up to this point - * anyways. - */ - pt = kern_base | _PAGE_VALID | _PAGE_SZ4MB; - pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W; - local_irq_save(flags); - if (tlb_type == spitfire) { - __asm__ __volatile__( - " stxa %1, [%0] %3\n" - " stxa %2, [%5] %4\n" - " membar #Sync\n" - " flush %%g6\n" - " nop\n" - " nop\n" - " nop\n" - : /* No outputs */ - : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt), - "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3) - : "memory"); - if (real_end >= KERNBASE + 0x340000) { - second_alias_page = alias_base + 0x400000; - __asm__ __volatile__( - " stxa %1, [%0] %3\n" - " stxa %2, [%5] %4\n" - " membar #Sync\n" - " flush %%g6\n" - " nop\n" - " nop\n" - " nop\n" - : /* No outputs */ - : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000), - "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3) - : "memory"); - } - } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { - __asm__ __volatile__( - " stxa %1, [%0] %3\n" - " stxa %2, [%5] %4\n" - " membar #Sync\n" - " flush %%g6\n" - " nop\n" - " nop\n" - " nop\n" - : /* No outputs */ - : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt), - "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (13<<3)) - : "memory"); - if (real_end >= KERNBASE + 0x340000) { - second_alias_page = alias_base + 0x400000; - __asm__ __volatile__( - " stxa %1, [%0] %3\n" - " stxa %2, [%5] %4\n" - " membar #Sync\n" - " flush %%g6\n" - " nop\n" - " nop\n" - " nop\n" - : /* No outputs */ - : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000), - "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (12<<3)) - : "memory"); - } + if ((real_end > ((unsigned long)KERNBASE + 0x800000))) { + prom_printf("paging_init: Kernel > 8MB, too large.\n"); + prom_halt(); } - local_irq_restore(flags); - - /* Now set kernel pgd to upper alias so physical page computations + + /* Set kernel pgd to upper alias so physical page computations * work. */ init_mm.pgd += ((shift) / (sizeof(pgd_t))); - memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir)); + memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir)); /* Now can init the kernel/bad page tables. */ pud_set(pud_offset(&swapper_pg_dir[0], 0), - swapper_pmd_dir + (shift / sizeof(pgd_t))); + swapper_low_pmd_dir + (shift / sizeof(pgd_t))); - sparc64_vpte_patchme1[0] |= - (((unsigned long)pgd_val(init_mm.pgd[0])) >> 10); - sparc64_vpte_patchme2[0] |= - (((unsigned long)pgd_val(init_mm.pgd[0])) & 0x3ff); - flushi((long)&sparc64_vpte_patchme1[0]); + swapper_pgd_zero = pgd_val(swapper_pg_dir[0]); - /* Setup bootmem... */ - pages_avail = 0; - last_valid_pfn = end_pfn = bootmem_init(&pages_avail); - - /* Inherit non-locked OBP mappings. */ inherit_prom_mappings(); /* Ok, we can use our TLB miss and window trap handlers safely. @@ -1527,13 +1502,16 @@ void __init paging_init(void) inherit_locked_prom_mappings(1); - /* We only created DTLB mapping of this stuff. */ - spitfire_flush_dtlb_nucleus_page(alias_base); - if (second_alias_page) - spitfire_flush_dtlb_nucleus_page(second_alias_page); - __flush_tlb_all(); + /* Setup bootmem... */ + pages_avail = 0; + last_valid_pfn = end_pfn = bootmem_init(&pages_avail); + +#ifdef CONFIG_DEBUG_PAGEALLOC + kernel_physical_mapping_init(); +#endif + { unsigned long zones_size[MAX_NR_ZONES]; unsigned long zholes_size[MAX_NR_ZONES]; @@ -1554,128 +1532,35 @@ void __init paging_init(void) device_scan(); } -/* Ok, it seems that the prom can allocate some more memory chunks - * as a side effect of some prom calls we perform during the - * boot sequence. My most likely theory is that it is from the - * prom_set_traptable() call, and OBP is allocating a scratchpad - * for saving client program register state etc. - */ -static void __init sort_memlist(struct linux_mlist_p1275 *thislist) -{ - int swapi = 0; - int i, mitr; - unsigned long tmpaddr, tmpsize; - unsigned long lowest; - - for (i = 0; thislist[i].theres_more != 0; i++) { - lowest = thislist[i].start_adr; - for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++) - if (thislist[mitr].start_adr < lowest) { - lowest = thislist[mitr].start_adr; - swapi = mitr; - } - if (lowest == thislist[i].start_adr) - continue; - tmpaddr = thislist[swapi].start_adr; - tmpsize = thislist[swapi].num_bytes; - for (mitr = swapi; mitr > i; mitr--) { - thislist[mitr].start_adr = thislist[mitr-1].start_adr; - thislist[mitr].num_bytes = thislist[mitr-1].num_bytes; - } - thislist[i].start_adr = tmpaddr; - thislist[i].num_bytes = tmpsize; - } -} - -void __init rescan_sp_banks(void) -{ - struct linux_prom64_registers memlist[64]; - struct linux_mlist_p1275 avail[64], *mlist; - unsigned long bytes, base_paddr; - int num_regs, node = prom_finddevice("/memory"); - int i; - - num_regs = prom_getproperty(node, "available", - (char *) memlist, sizeof(memlist)); - num_regs = (num_regs / sizeof(struct linux_prom64_registers)); - for (i = 0; i < num_regs; i++) { - avail[i].start_adr = memlist[i].phys_addr; - avail[i].num_bytes = memlist[i].reg_size; - avail[i].theres_more = &avail[i + 1]; - } - avail[i - 1].theres_more = NULL; - sort_memlist(avail); - - mlist = &avail[0]; - i = 0; - bytes = mlist->num_bytes; - base_paddr = mlist->start_adr; - - sp_banks[0].base_addr = base_paddr; - sp_banks[0].num_bytes = bytes; - - while (mlist->theres_more != NULL){ - i++; - mlist = mlist->theres_more; - bytes = mlist->num_bytes; - if (i >= SPARC_PHYS_BANKS-1) { - printk ("The machine has more banks than " - "this kernel can support\n" - "Increase the SPARC_PHYS_BANKS " - "setting (currently %d)\n", - SPARC_PHYS_BANKS); - i = SPARC_PHYS_BANKS-1; - break; - } - - sp_banks[i].base_addr = mlist->start_adr; - sp_banks[i].num_bytes = mlist->num_bytes; - } - - i++; - sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL; - sp_banks[i].num_bytes = 0; - - for (i = 0; sp_banks[i].num_bytes != 0; i++) - sp_banks[i].num_bytes &= PAGE_MASK; -} - static void __init taint_real_pages(void) { - struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS]; int i; - for (i = 0; i < SPARC_PHYS_BANKS; i++) { - saved_sp_banks[i].base_addr = - sp_banks[i].base_addr; - saved_sp_banks[i].num_bytes = - sp_banks[i].num_bytes; - } + read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents); - rescan_sp_banks(); - - /* Find changes discovered in the sp_bank rescan and + /* Find changes discovered in the physmem available rescan and * reserve the lost portions in the bootmem maps. */ - for (i = 0; saved_sp_banks[i].num_bytes; i++) { + for (i = 0; i < pavail_ents; i++) { unsigned long old_start, old_end; - old_start = saved_sp_banks[i].base_addr; + old_start = pavail[i].phys_addr; old_end = old_start + - saved_sp_banks[i].num_bytes; + pavail[i].reg_size; while (old_start < old_end) { int n; - for (n = 0; sp_banks[n].num_bytes; n++) { + for (n = 0; pavail_rescan_ents; n++) { unsigned long new_start, new_end; - new_start = sp_banks[n].base_addr; - new_end = new_start + sp_banks[n].num_bytes; + new_start = pavail_rescan[n].phys_addr; + new_end = new_start + + pavail_rescan[n].reg_size; if (new_start <= old_start && new_end >= (old_start + PAGE_SIZE)) { - set_bit (old_start >> 22, - sparc64_valid_addr_bitmap); + set_bit(old_start >> 22, + sparc64_valid_addr_bitmap); goto do_next_page; } } @@ -1695,8 +1580,7 @@ void __init mem_init(void) i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6); i += 1; - sparc64_valid_addr_bitmap = (unsigned long *) - __alloc_bootmem(i << 3, SMP_CACHE_BYTES, bootmap_base); + sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3); if (sparc64_valid_addr_bitmap == NULL) { prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); prom_halt(); @@ -1749,7 +1633,7 @@ void __init mem_init(void) cheetah_ecache_flush_init(); } -void free_initmem (void) +void free_initmem(void) { unsigned long addr, initend; diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S index b2ee9b53227f..e4c9151fa116 100644 --- a/arch/sparc64/mm/ultra.S +++ b/arch/sparc64/mm/ultra.S @@ -144,42 +144,29 @@ __flush_icache_page: /* %o0 = phys_page */ #define DTAG_MASK 0x3 + /* This routine is Spitfire specific so the hardcoded + * D-cache size and line-size are OK. + */ .align 64 .globl __flush_dcache_page __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ sethi %uhi(PAGE_OFFSET), %g1 sllx %g1, 32, %g1 - sub %o0, %g1, %o0 - clr %o4 - srlx %o0, 11, %o0 - sethi %hi(1 << 14), %o2 -1: ldxa [%o4] ASI_DCACHE_TAG, %o3 ! LSU Group - add %o4, (1 << 5), %o4 ! IEU0 - ldxa [%o4] ASI_DCACHE_TAG, %g1 ! LSU Group - add %o4, (1 << 5), %o4 ! IEU0 - ldxa [%o4] ASI_DCACHE_TAG, %g2 ! LSU Group o3 available - add %o4, (1 << 5), %o4 ! IEU0 - andn %o3, DTAG_MASK, %o3 ! IEU1 - ldxa [%o4] ASI_DCACHE_TAG, %g3 ! LSU Group - add %o4, (1 << 5), %o4 ! IEU0 - andn %g1, DTAG_MASK, %g1 ! IEU1 - cmp %o0, %o3 ! IEU1 Group - be,a,pn %xcc, dflush1 ! CTI - sub %o4, (4 << 5), %o4 ! IEU0 (Group) - cmp %o0, %g1 ! IEU1 Group - andn %g2, DTAG_MASK, %g2 ! IEU0 - be,a,pn %xcc, dflush2 ! CTI - sub %o4, (3 << 5), %o4 ! IEU0 (Group) - cmp %o0, %g2 ! IEU1 Group - andn %g3, DTAG_MASK, %g3 ! IEU0 - be,a,pn %xcc, dflush3 ! CTI - sub %o4, (2 << 5), %o4 ! IEU0 (Group) - cmp %o0, %g3 ! IEU1 Group - be,a,pn %xcc, dflush4 ! CTI - sub %o4, (1 << 5), %o4 ! IEU0 -2: cmp %o4, %o2 ! IEU1 Group - bne,pt %xcc, 1b ! CTI - nop ! IEU0 + sub %o0, %g1, %o0 ! physical address + srlx %o0, 11, %o0 ! make D-cache TAG + sethi %hi(1 << 14), %o2 ! D-cache size + sub %o2, (1 << 5), %o2 ! D-cache line size +1: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG + andcc %o3, DTAG_MASK, %g0 ! Valid? + be,pn %xcc, 2f ! Nope, branch + andn %o3, DTAG_MASK, %o3 ! Clear valid bits + cmp %o3, %o0 ! TAG match? + bne,pt %xcc, 2f ! Nope, branch + nop + stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG + membar #Sync +2: brnz,pt %o2, 1b + sub %o2, (1 << 5), %o2 ! D-cache line size /* The I-cache does not snoop local stores so we * better flush that too when necessary. @@ -189,48 +176,9 @@ __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */ retl nop -dflush1:stxa %g0, [%o4] ASI_DCACHE_TAG - add %o4, (1 << 5), %o4 -dflush2:stxa %g0, [%o4] ASI_DCACHE_TAG - add %o4, (1 << 5), %o4 -dflush3:stxa %g0, [%o4] ASI_DCACHE_TAG - add %o4, (1 << 5), %o4 -dflush4:stxa %g0, [%o4] ASI_DCACHE_TAG - add %o4, (1 << 5), %o4 - membar #Sync - ba,pt %xcc, 2b - nop #endif /* DCACHE_ALIASING_POSSIBLE */ - .previous .text - .align 32 -__prefill_dtlb: - rdpr %pstate, %g7 - wrpr %g7, PSTATE_IE, %pstate - mov TLB_TAG_ACCESS, %g1 - stxa %o5, [%g1] ASI_DMMU - stxa %o2, [%g0] ASI_DTLB_DATA_IN - flush %g6 - retl - wrpr %g7, %pstate -__prefill_itlb: - rdpr %pstate, %g7 - wrpr %g7, PSTATE_IE, %pstate - mov TLB_TAG_ACCESS, %g1 - stxa %o5, [%g1] ASI_IMMU - stxa %o2, [%g0] ASI_ITLB_DATA_IN - flush %g6 - retl - wrpr %g7, %pstate - - .globl __update_mmu_cache -__update_mmu_cache: /* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */ - srlx %o1, PAGE_SHIFT, %o1 - andcc %o3, FAULT_CODE_DTLB, %g0 - sllx %o1, PAGE_SHIFT, %o5 - bne,pt %xcc, __prefill_dtlb - or %o5, %o0, %o5 - ba,a,pt %xcc, __prefill_itlb + .previous /* Cheetah specific versions, patched at boot time. */ __cheetah_flush_tlb_mm: /* 18 insns */ @@ -283,7 +231,7 @@ __cheetah_flush_tlb_pending: /* 26 insns */ wrpr %g7, 0x0, %pstate #ifdef DCACHE_ALIASING_POSSIBLE -flush_dcpage_cheetah: /* 11 insns */ +__cheetah_flush_dcache_page: /* 11 insns */ sethi %uhi(PAGE_OFFSET), %g1 sllx %g1, 32, %g1 sub %o0, %g1, %o0 @@ -329,8 +277,8 @@ cheetah_patch_cachetlbops: #ifdef DCACHE_ALIASING_POSSIBLE sethi %hi(__flush_dcache_page), %o0 or %o0, %lo(__flush_dcache_page), %o0 - sethi %hi(flush_dcpage_cheetah), %o1 - or %o1, %lo(flush_dcpage_cheetah), %o1 + sethi %hi(__cheetah_flush_dcache_page), %o1 + or %o1, %lo(__cheetah_flush_dcache_page), %o1 call cheetah_patch_one mov 11, %o2 #endif /* DCACHE_ALIASING_POSSIBLE */ @@ -505,22 +453,6 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address nop nop - .globl xcall_promstop -xcall_promstop: - rdpr %pstate, %g2 - wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate - rdpr %pil, %g2 - wrpr %g0, 15, %pil - sethi %hi(109f), %g7 - b,pt %xcc, etrap_irq -109: or %g7, %lo(109b), %g7 - flushw - call prom_stopself - nop - /* We should not return, just spin if we do... */ -1: b,a,pt %xcc, 1b - nop - .data errata32_hwbug: diff --git a/arch/sparc64/prom/Makefile b/arch/sparc64/prom/Makefile index 8f2420d9e9e6..3d33ed27bc27 100644 --- a/arch/sparc64/prom/Makefile +++ b/arch/sparc64/prom/Makefile @@ -6,5 +6,5 @@ EXTRA_AFLAGS := -ansi EXTRA_CFLAGS := -Werror -lib-y := bootstr.o devops.o init.o memory.o misc.o \ - tree.o console.o printf.o p1275.o map.o cif.o +lib-y := bootstr.o devops.o init.o misc.o \ + tree.o console.o printf.o p1275.o cif.o diff --git a/arch/sparc64/prom/console.c b/arch/sparc64/prom/console.c index 028a53fcb1ec..eae5db8dda56 100644 --- a/arch/sparc64/prom/console.c +++ b/arch/sparc64/prom/console.c @@ -67,7 +67,7 @@ prom_putchar(char c) } void -prom_puts(char *s, int len) +prom_puts(const char *s, int len) { p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)| P1275_INOUT(3,1), diff --git a/arch/sparc64/prom/devops.c b/arch/sparc64/prom/devops.c index 2c99b21b6981..4641839eb39a 100644 --- a/arch/sparc64/prom/devops.c +++ b/arch/sparc64/prom/devops.c @@ -16,7 +16,7 @@ * Returns 0 on failure. */ int -prom_devopen(char *dstr) +prom_devopen(const char *dstr) { return p1275_cmd ("open", P1275_ARG(0,P1275_ARG_IN_STRING)| P1275_INOUT(1,1), diff --git a/arch/sparc64/prom/init.c b/arch/sparc64/prom/init.c index 817faae058cd..f3cc2d8578b2 100644 --- a/arch/sparc64/prom/init.c +++ b/arch/sparc64/prom/init.c @@ -27,7 +27,6 @@ int prom_chosen_node; * failure. It gets passed the pointer to the PROM vector. */ -extern void prom_meminit(void); extern void prom_cif_init(void *, void *); void __init prom_init(void *cif_handler, void *cif_stack) @@ -46,7 +45,7 @@ void __init prom_init(void *cif_handler, void *cif_stack) if((prom_root_node == 0) || (prom_root_node == -1)) prom_halt(); - prom_chosen_node = prom_finddevice("/chosen"); + prom_chosen_node = prom_finddevice(prom_chosen_path); if (!prom_chosen_node || prom_chosen_node == -1) prom_halt(); @@ -90,8 +89,6 @@ void __init prom_init(void *cif_handler, void *cif_stack) printk ("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust); - prom_meminit(); - /* Initialization successful. */ return; diff --git a/arch/sparc64/prom/map.S b/arch/sparc64/prom/map.S deleted file mode 100644 index 21b3f9c99ea7..000000000000 --- a/arch/sparc64/prom/map.S +++ /dev/null @@ -1,72 +0,0 @@ -/* $Id: map.S,v 1.2 1999/11/19 05:53:02 davem Exp $ - * map.S: Tricky coding required to fixup the kernel OBP maps - * properly. - * - * Copyright (C) 1999 David S. Miller (davem@redhat.com) - */ - - .text - .align 8192 - .globl prom_boot_page -prom_boot_page: -call_method: - .asciz "call-method" - .align 8 -map: - .asciz "map" - .align 8 - - /* When we are invoked, our caller has remapped us to - * page zero, therefore we must use PC relative addressing - * for everything after we begin performing the unmap/map - * calls. - */ - .globl prom_remap -prom_remap: /* %o0 = physpage, %o1 = virtpage, %o2 = mmu_ihandle */ - rd %pc, %g1 - srl %o2, 0, %o2 ! kill sign extension - sethi %hi(p1275buf), %g2 - or %g2, %lo(p1275buf), %g2 - ldx [%g2 + 0x10], %g3 ! prom_cif_stack - save %g3, -(192 + 128), %sp - ldx [%g2 + 0x08], %l0 ! prom_cif_handler - mov %g6, %i3 - mov %g4, %i4 - mov %g5, %i5 - flushw - - sethi %hi(prom_remap - call_method), %g7 - or %g7, %lo(prom_remap - call_method), %g7 - sub %g1, %g7, %l2 ! call-method string - sethi %hi(prom_remap - map), %g7 - or %g7, %lo(prom_remap - map), %g7 - sub %g1, %g7, %l4 ! map string - - /* OK, map the 4MB region we really live at. */ - stx %l2, [%sp + 2047 + 128 + 0x00] ! call-method - mov 7, %l5 - stx %l5, [%sp + 2047 + 128 + 0x08] ! num_args - mov 1, %l5 - stx %l5, [%sp + 2047 + 128 + 0x10] ! num_rets - stx %l4, [%sp + 2047 + 128 + 0x18] ! map - stx %i2, [%sp + 2047 + 128 + 0x20] ! mmu_ihandle - mov -1, %l5 - stx %l5, [%sp + 2047 + 128 + 0x28] ! mode == default - sethi %hi(4 * 1024 * 1024), %l5 - stx %l5, [%sp + 2047 + 128 + 0x30] ! size - stx %i1, [%sp + 2047 + 128 + 0x38] ! vaddr - stx %g0, [%sp + 2047 + 128 + 0x40] ! filler - stx %i0, [%sp + 2047 + 128 + 0x48] ! paddr - call %l0 - add %sp, (2047 + 128), %o0 ! argument array - - /* Restore hard-coded globals. */ - mov %i3, %g6 - mov %i4, %g4 - mov %i5, %g5 - - /* Wheee.... we are done. */ - ret - restore - - .align 8192 diff --git a/arch/sparc64/prom/memory.c b/arch/sparc64/prom/memory.c deleted file mode 100644 index f4a8143e052c..000000000000 --- a/arch/sparc64/prom/memory.c +++ /dev/null @@ -1,152 +0,0 @@ -/* $Id: memory.c,v 1.5 1999/08/31 06:55:04 davem Exp $ - * memory.c: Prom routine for acquiring various bits of information - * about RAM on the machine, both virtual and physical. - * - * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) - * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) - */ - -#include -#include - -#include -#include - -/* This routine, for consistency, returns the ram parameters in the - * V0 prom memory descriptor format. I choose this format because I - * think it was the easiest to work with. I feel the religious - * arguments now... ;) Also, I return the linked lists sorted to - * prevent paging_init() upset stomach as I have not yet written - * the pepto-bismol kernel module yet. - */ - -struct linux_prom64_registers prom_reg_memlist[64]; -struct linux_prom64_registers prom_reg_tmp[64]; - -struct linux_mlist_p1275 prom_phys_total[64]; -struct linux_mlist_p1275 prom_prom_taken[64]; -struct linux_mlist_p1275 prom_phys_avail[64]; - -struct linux_mlist_p1275 *prom_ptot_ptr = prom_phys_total; -struct linux_mlist_p1275 *prom_ptak_ptr = prom_prom_taken; -struct linux_mlist_p1275 *prom_pavl_ptr = prom_phys_avail; - -struct linux_mem_p1275 prom_memlist; - - -/* Internal Prom library routine to sort a linux_mlist_p1275 memory - * list. Used below in initialization. - */ -static void __init -prom_sortmemlist(struct linux_mlist_p1275 *thislist) -{ - int swapi = 0; - int i, mitr; - unsigned long tmpaddr, tmpsize; - unsigned long lowest; - - for(i=0; thislist[i].theres_more; i++) { - lowest = thislist[i].start_adr; - for(mitr = i+1; thislist[mitr-1].theres_more; mitr++) - if(thislist[mitr].start_adr < lowest) { - lowest = thislist[mitr].start_adr; - swapi = mitr; - } - if(lowest == thislist[i].start_adr) continue; - tmpaddr = thislist[swapi].start_adr; - tmpsize = thislist[swapi].num_bytes; - for(mitr = swapi; mitr > i; mitr--) { - thislist[mitr].start_adr = thislist[mitr-1].start_adr; - thislist[mitr].num_bytes = thislist[mitr-1].num_bytes; - } - thislist[i].start_adr = tmpaddr; - thislist[i].num_bytes = tmpsize; - } -} - -/* Initialize the memory lists based upon the prom version. */ -void __init prom_meminit(void) -{ - int node = 0; - unsigned int iter, num_regs; - - node = prom_finddevice("/memory"); - num_regs = prom_getproperty(node, "available", - (char *) prom_reg_memlist, - sizeof(prom_reg_memlist)); - num_regs = (num_regs/sizeof(struct linux_prom64_registers)); - for(iter=0; iter /* Reset and reboot the machine with the command 'bcommand'. */ -void prom_reboot(char *bcommand) +void prom_reboot(const char *bcommand) { p1275_cmd("boot", P1275_ARG(0, P1275_ARG_IN_STRING) | P1275_INOUT(1, 0), bcommand); } /* Forth evaluate the expression contained in 'fstring'. */ -void prom_feval(char *fstring) +void prom_feval(const char *fstring) { if (!fstring || fstring[0] == 0) return; @@ -68,19 +68,11 @@ void prom_cmdline(void) local_irq_restore(flags); } -#ifdef CONFIG_SMP -extern void smp_promstop_others(void); -#endif - /* Drop into the prom, but completely terminate the program. * No chance of continuing. */ void prom_halt(void) { -#ifdef CONFIG_SMP - smp_promstop_others(); - udelay(8000); -#endif again: p1275_cmd("exit", P1275_INOUT(0, 0)); goto again; /* PROM is out to get me -DaveM */ @@ -88,10 +80,6 @@ again: void prom_halt_power_off(void) { -#ifdef CONFIG_SMP - smp_promstop_others(); - udelay(8000); -#endif p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0)); /* if nothing else helps, we just halt */ @@ -148,21 +136,19 @@ void prom_set_trap_table(unsigned long tba) p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba); } -int mmu_ihandle_cache = 0; - int prom_get_mmu_ihandle(void) { int node, ret; - if (mmu_ihandle_cache != 0) - return mmu_ihandle_cache; + if (prom_mmu_ihandle_cache != 0) + return prom_mmu_ihandle_cache; - node = prom_finddevice("/chosen"); - ret = prom_getint(node, "mmu"); + node = prom_finddevice(prom_chosen_path); + ret = prom_getint(node, prom_mmu_name); if (ret == -1 || ret == 0) - mmu_ihandle_cache = -1; + prom_mmu_ihandle_cache = -1; else - mmu_ihandle_cache = ret; + prom_mmu_ihandle_cache = ret; return ret; } @@ -190,7 +176,7 @@ long prom_itlb_load(unsigned long index, unsigned long tte_data, unsigned long vaddr) { - return p1275_cmd("call-method", + return p1275_cmd(prom_callmethod_name, (P1275_ARG(0, P1275_ARG_IN_STRING) | P1275_ARG(2, P1275_ARG_IN_64B) | P1275_ARG(3, P1275_ARG_IN_64B) | @@ -207,7 +193,7 @@ long prom_dtlb_load(unsigned long index, unsigned long tte_data, unsigned long vaddr) { - return p1275_cmd("call-method", + return p1275_cmd(prom_callmethod_name, (P1275_ARG(0, P1275_ARG_IN_STRING) | P1275_ARG(2, P1275_ARG_IN_64B) | P1275_ARG(3, P1275_ARG_IN_64B) | @@ -223,13 +209,13 @@ long prom_dtlb_load(unsigned long index, int prom_map(int mode, unsigned long size, unsigned long vaddr, unsigned long paddr) { - int ret = p1275_cmd("call-method", + int ret = p1275_cmd(prom_callmethod_name, (P1275_ARG(0, P1275_ARG_IN_STRING) | P1275_ARG(3, P1275_ARG_IN_64B) | P1275_ARG(4, P1275_ARG_IN_64B) | P1275_ARG(6, P1275_ARG_IN_64B) | P1275_INOUT(7, 1)), - "map", + prom_map_name, prom_get_mmu_ihandle(), mode, size, @@ -244,12 +230,12 @@ int prom_map(int mode, unsigned long size, void prom_unmap(unsigned long size, unsigned long vaddr) { - p1275_cmd("call-method", + p1275_cmd(prom_callmethod_name, (P1275_ARG(0, P1275_ARG_IN_STRING) | P1275_ARG(2, P1275_ARG_IN_64B) | P1275_ARG(3, P1275_ARG_IN_64B) | P1275_INOUT(4, 0)), - "unmap", + prom_unmap_name, prom_get_mmu_ihandle(), size, vaddr); @@ -258,7 +244,7 @@ void prom_unmap(unsigned long size, unsigned long vaddr) /* Set aside physical memory which is not touched or modified * across soft resets. */ -unsigned long prom_retain(char *name, +unsigned long prom_retain(const char *name, unsigned long pa_low, unsigned long pa_high, long size, long align) { @@ -290,7 +276,7 @@ int prom_getunumber(int syndrome_code, unsigned long phys_addr, char *buf, int buflen) { - return p1275_cmd("call-method", + return p1275_cmd(prom_callmethod_name, (P1275_ARG(0, P1275_ARG_IN_STRING) | P1275_ARG(3, P1275_ARG_OUT_BUF) | P1275_ARG(6, P1275_ARG_IN_64B) | diff --git a/arch/sparc64/prom/p1275.c b/arch/sparc64/prom/p1275.c index 59fe38bba39e..a5a7c5712028 100644 --- a/arch/sparc64/prom/p1275.c +++ b/arch/sparc64/prom/p1275.c @@ -46,7 +46,7 @@ static inline unsigned long spitfire_get_primary_context(void) */ DEFINE_SPINLOCK(prom_entry_lock); -long p1275_cmd (char *service, long fmt, ...) +long p1275_cmd(const char *service, long fmt, ...) { char *p, *q; unsigned long flags; diff --git a/arch/sparc64/prom/printf.c b/arch/sparc64/prom/printf.c index a6df82cafa0d..660943ee4c2a 100644 --- a/arch/sparc64/prom/printf.c +++ b/arch/sparc64/prom/printf.c @@ -34,7 +34,7 @@ prom_write(const char *buf, unsigned int n) } void -prom_printf(char *fmt, ...) +prom_printf(const char *fmt, ...) { va_list args; int i; diff --git a/arch/sparc64/prom/tree.c b/arch/sparc64/prom/tree.c index ccf73258ebf7..b1ff9e87dcc6 100644 --- a/arch/sparc64/prom/tree.c +++ b/arch/sparc64/prom/tree.c @@ -69,7 +69,7 @@ prom_getsibling(int node) * Return -1 on error. */ __inline__ int -prom_getproplen(int node, char *prop) +prom_getproplen(int node, const char *prop) { if((!node) || (!prop)) return -1; return p1275_cmd ("getproplen", @@ -83,20 +83,20 @@ prom_getproplen(int node, char *prop) * was successful the length will be returned, else -1 is returned. */ __inline__ int -prom_getproperty(int node, char *prop, char *buffer, int bufsize) +prom_getproperty(int node, const char *prop, char *buffer, int bufsize) { int plen; plen = prom_getproplen(node, prop); - if((plen > bufsize) || (plen == 0) || (plen == -1)) + if ((plen > bufsize) || (plen == 0) || (plen == -1)) { return -1; - else { + } else { /* Ok, things seem all right. */ - return p1275_cmd ("getprop", - P1275_ARG(1,P1275_ARG_IN_STRING)| - P1275_ARG(2,P1275_ARG_OUT_BUF)| - P1275_INOUT(4, 1), - node, prop, buffer, P1275_SIZE(plen)); + return p1275_cmd(prom_getprop_name, + P1275_ARG(1,P1275_ARG_IN_STRING)| + P1275_ARG(2,P1275_ARG_OUT_BUF)| + P1275_INOUT(4, 1), + node, prop, buffer, P1275_SIZE(plen)); } } @@ -104,7 +104,7 @@ prom_getproperty(int node, char *prop, char *buffer, int bufsize) * on failure. */ __inline__ int -prom_getint(int node, char *prop) +prom_getint(int node, const char *prop) { int intprop; @@ -119,7 +119,7 @@ prom_getint(int node, char *prop) */ int -prom_getintdefault(int node, char *property, int deflt) +prom_getintdefault(int node, const char *property, int deflt) { int retval; @@ -131,7 +131,7 @@ prom_getintdefault(int node, char *property, int deflt) /* Acquire a boolean property, 1=TRUE 0=FALSE. */ int -prom_getbool(int node, char *prop) +prom_getbool(int node, const char *prop) { int retval; @@ -145,7 +145,7 @@ prom_getbool(int node, char *prop) * buffer. */ void -prom_getstring(int node, char *prop, char *user_buf, int ubuf_size) +prom_getstring(int node, const char *prop, char *user_buf, int ubuf_size) { int len; @@ -160,7 +160,7 @@ prom_getstring(int node, char *prop, char *user_buf, int ubuf_size) * YES = 1 NO = 0 */ int -prom_nodematch(int node, char *name) +prom_nodematch(int node, const char *name) { char namebuf[128]; prom_getproperty(node, "name", namebuf, sizeof(namebuf)); @@ -172,7 +172,7 @@ prom_nodematch(int node, char *name) * 'nodename'. Return node if successful, zero if not. */ int -prom_searchsiblings(int node_start, char *nodename) +prom_searchsiblings(int node_start, const char *nodename) { int thisnode, error; @@ -294,7 +294,7 @@ prom_firstprop(int node, char *buffer) * property types for this node. */ __inline__ char * -prom_nextprop(int node, char *oprop, char *buffer) +prom_nextprop(int node, const char *oprop, char *buffer) { char buf[32]; @@ -314,15 +314,17 @@ prom_nextprop(int node, char *oprop, char *buffer) } int -prom_finddevice(char *name) +prom_finddevice(const char *name) { - if(!name) return 0; - return p1275_cmd ("finddevice", P1275_ARG(0,P1275_ARG_IN_STRING)| - P1275_INOUT(1, 1), - name); + if (!name) + return 0; + return p1275_cmd(prom_finddev_name, + P1275_ARG(0,P1275_ARG_IN_STRING)| + P1275_INOUT(1, 1), + name); } -int prom_node_has_property(int node, char *prop) +int prom_node_has_property(int node, const char *prop) { char buf [32]; @@ -339,7 +341,7 @@ int prom_node_has_property(int node, char *prop) * of 'size' bytes. Return the number of bytes the prom accepted. */ int -prom_setprop(int node, char *pname, char *value, int size) +prom_setprop(int node, const char *pname, char *value, int size) { if(size == 0) return 0; if((pname == 0) || (value == 0)) return 0; @@ -364,7 +366,7 @@ prom_inst2pkg(int inst) * FIXME: Should work for v0 as well */ int -prom_pathtoinode(char *path) +prom_pathtoinode(const char *path) { int node, inst; diff --git a/arch/sparc64/solaris/socksys.c b/arch/sparc64/solaris/socksys.c index d7c1c76582cc..fc6669e8dde1 100644 --- a/arch/sparc64/solaris/socksys.c +++ b/arch/sparc64/solaris/socksys.c @@ -49,7 +49,7 @@ IPPROTO_EGP, IPPROTO_PUP, IPPROTO_UDP, IPPROTO_IDP, IPPROTO_RAW, #else -extern void * mykmalloc(size_t s, int gfp); +extern void * mykmalloc(size_t s, gfp_t gfp); extern void mykfree(void *); #endif diff --git a/arch/sparc64/solaris/timod.c b/arch/sparc64/solaris/timod.c index aaad29c35c83..b84e5456b025 100644 --- a/arch/sparc64/solaris/timod.c +++ b/arch/sparc64/solaris/timod.c @@ -39,7 +39,7 @@ static char * page = NULL ; #else -void * mykmalloc(size_t s, int gfp) +void * mykmalloc(size_t s, gfp_t gfp) { static char * page; static size_t free; diff --git a/arch/um/Makefile b/arch/um/Makefile index 5b5af95721ab..e1ffad224605 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -28,8 +28,6 @@ SYMLINK_HEADERS := $(foreach header,$(SYMLINK_HEADERS),include/asm-um/$(header)) ARCH_SYMLINKS = include/asm-um/arch $(ARCH_DIR)/include/sysdep $(ARCH_DIR)/os \ $(SYMLINK_HEADERS) $(ARCH_DIR)/include/uml-config.h -GEN_HEADERS += $(ARCH_DIR)/include/task.h $(ARCH_DIR)/include/kern_constants.h - um-modes-$(CONFIG_MODE_TT) += tt um-modes-$(CONFIG_MODE_SKAS) += skas @@ -45,9 +43,7 @@ endif ARCH_INCLUDE := -I$(ARCH_DIR)/include ifneq ($(KBUILD_SRC),) -ARCH_INCLUDE += -I$(ARCH_DIR)/include2 ARCH_INCLUDE += -I$(srctree)/$(ARCH_DIR)/include -MRPROPER_DIRS += $(ARCH_DIR)/include2 endif SYS_DIR := $(ARCH_DIR)/include/sysdep-$(SUBARCH) @@ -87,10 +83,6 @@ CONFIG_KERNEL_HALF_GIGS ?= 0 SIZE = (($(CONFIG_NEST_LEVEL) + $(CONFIG_KERNEL_HALF_GIGS)) * 0x20000000) -ifeq ($(CONFIG_MODE_SKAS), y) -$(SYS_HEADERS) : $(ARCH_DIR)/include/skas_ptregs.h -endif - .PHONY: linux all: linux @@ -111,7 +103,8 @@ else $(shell cd $(ARCH_DIR) && ln -sf Kconfig.$(SUBARCH) Kconfig.arch) endif -archprepare: $(ARCH_SYMLINKS) $(SYS_HEADERS) $(GEN_HEADERS) +archprepare: $(ARCH_SYMLINKS) $(ARCH_DIR)/include/user_constants.h +prepare: $(ARCH_DIR)/include/kern_constants.h LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib @@ -146,22 +139,20 @@ endef #When cleaning we don't include .config, so we don't include #TT or skas makefiles and don't clean skas_ptregs.h. CLEAN_FILES += linux x.i gmon.out $(ARCH_DIR)/include/uml-config.h \ - $(GEN_HEADERS) $(ARCH_DIR)/include/skas_ptregs.h \ - $(ARCH_DIR)/include/user_constants.h $(ARCH_DIR)/Kconfig.arch + $(ARCH_DIR)/include/user_constants.h \ + $(ARCH_DIR)/include/kern_constants.h $(ARCH_DIR)/Kconfig.arch MRPROPER_FILES += $(SYMLINK_HEADERS) $(ARCH_SYMLINKS) \ $(addprefix $(ARCH_DIR)/kernel/,$(KERN_SYMLINKS)) $(ARCH_DIR)/os archclean: - $(Q)$(MAKE) $(clean)=$(ARCH_DIR)/util - $(Q)$(MAKE) $(clean)=$(ARCH_DIR)/os-$(OS)/util @find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \ -o -name '*.gcov' \) -type f -print | xargs rm -f $(SYMLINK_HEADERS): @echo ' SYMLINK $@' ifneq ($(KBUILD_SRC),) - ln -fsn $(srctree)/include/asm-um/$(basename $(notdir $@))-$(SUBARCH)$(suffix $@) $@ + $(Q)ln -fsn $(srctree)/include/asm-um/$(basename $(notdir $@))-$(SUBARCH)$(suffix $@) $@ else $(Q)cd $(TOPDIR)/$(dir $@) ; \ ln -sf $(basename $(notdir $@))-$(SUBARCH)$(suffix $@) $(notdir $@) @@ -180,9 +171,7 @@ $(ARCH_DIR)/include/sysdep: @echo ' SYMLINK $@' ifneq ($(KBUILD_SRC),) $(Q)mkdir -p $(ARCH_DIR)/include - $(Q)mkdir -p $(ARCH_DIR)/include2 - $(Q)ln -fsn sysdep-$(SUBARCH) $(ARCH_DIR)/include/sysdep - $(Q)ln -fsn $(srctree)/$(ARCH_DIR)/include/sysdep-$(SUBARCH) $(ARCH_DIR)/include2/sysdep + $(Q)ln -fsn $(srctree)/$(ARCH_DIR)/include/sysdep-$(SUBARCH) $(ARCH_DIR)/include/sysdep else $(Q)cd $(ARCH_DIR)/include && ln -sf sysdep-$(SUBARCH) sysdep endif @@ -202,8 +191,6 @@ endef define filechk_gen-asm-offsets (set -e; \ - echo "#ifndef __ASM_OFFSETS_H__"; \ - echo "#define __ASM_OFFSETS_H__"; \ echo "/*"; \ echo " * DO NOT MODIFY."; \ echo " *"; \ @@ -212,8 +199,7 @@ define filechk_gen-asm-offsets echo " */"; \ echo ""; \ sed -ne "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"; \ - echo ""; \ - echo "#endif" ) + echo ""; ) endef $(ARCH_DIR)/include/uml-config.h : include/linux/autoconf.h @@ -222,50 +208,18 @@ $(ARCH_DIR)/include/uml-config.h : include/linux/autoconf.h $(ARCH_DIR)/user-offsets.s: $(ARCH_DIR)/sys-$(SUBARCH)/user-offsets.c $(CC) $(USER_CFLAGS) -S -o $@ $< -$(ARCH_DIR)/user-offsets.h: $(ARCH_DIR)/user-offsets.s +$(ARCH_DIR)/include/user_constants.h: $(ARCH_DIR)/user-offsets.s $(call filechk,gen-asm-offsets) -CLEAN_FILES += $(ARCH_DIR)/user-offsets.s $(ARCH_DIR)/user-offsets.h +CLEAN_FILES += $(ARCH_DIR)/user-offsets.s $(ARCH_DIR)/kernel-offsets.s: $(ARCH_DIR)/sys-$(SUBARCH)/kernel-offsets.c \ - $(ARCH_SYMLINKS) \ - $(SYS_DIR)/sc.h \ - include/asm include/linux/version.h \ - include/config/MARKER \ - $(ARCH_DIR)/include/user_constants.h + archprepare $(CC) $(CFLAGS) $(NOSTDINC_FLAGS) $(CPPFLAGS) -S -o $@ $< -$(ARCH_DIR)/kernel-offsets.h: $(ARCH_DIR)/kernel-offsets.s +$(ARCH_DIR)/include/kern_constants.h: $(ARCH_DIR)/kernel-offsets.s $(call filechk,gen-asm-offsets) -CLEAN_FILES += $(ARCH_DIR)/kernel-offsets.s $(ARCH_DIR)/kernel-offsets.h - -$(ARCH_DIR)/include/task.h: $(ARCH_DIR)/util/mk_task - $(call filechk,gen_header) - -$(ARCH_DIR)/include/user_constants.h: $(ARCH_DIR)/os-$(OS)/util/mk_user_constants - $(call filechk,gen_header) - -$(ARCH_DIR)/include/kern_constants.h: $(ARCH_DIR)/util/mk_constants - $(call filechk,gen_header) - -$(ARCH_DIR)/include/skas_ptregs.h: $(ARCH_DIR)/kernel/skas/util/mk_ptregs - $(call filechk,gen_header) - -$(ARCH_DIR)/os-$(OS)/util/mk_user_constants: $(ARCH_DIR)/os-$(OS)/util FORCE ; - -$(ARCH_DIR)/util/mk_task $(ARCH_DIR)/util/mk_constants: $(ARCH_DIR)/include/user_constants.h $(ARCH_DIR)/util \ - FORCE ; - -$(ARCH_DIR)/kernel/skas/util/mk_ptregs: $(ARCH_DIR)/kernel/skas/util FORCE ; - -$(ARCH_DIR)/util: scripts_basic $(SYS_DIR)/sc.h $(ARCH_DIR)/kernel-offsets.h FORCE - $(Q)$(MAKE) $(build)=$@ - -$(ARCH_DIR)/kernel/skas/util: scripts_basic $(ARCH_DIR)/user-offsets.h FORCE - $(Q)$(MAKE) $(build)=$@ - -$(ARCH_DIR)/os-$(OS)/util: scripts_basic $(ARCH_DIR)/user-offsets.h FORCE - $(Q)$(MAKE) $(build)=$@ +CLEAN_FILES += $(ARCH_DIR)/kernel-offsets.s export SUBARCH USER_CFLAGS OS diff --git a/arch/um/Makefile-i386 b/arch/um/Makefile-i386 index 1ab431a53ac3..2ee8a2858117 100644 --- a/arch/um/Makefile-i386 +++ b/arch/um/Makefile-i386 @@ -32,25 +32,3 @@ CFLAGS += -U__$(SUBARCH)__ -U$(SUBARCH) ifneq ($(CONFIG_GPROF),y) ARCH_CFLAGS += -DUM_FASTCALL endif - -SYS_UTIL_DIR := $(ARCH_DIR)/sys-i386/util -SYS_HEADERS := $(SYS_DIR)/sc.h $(SYS_DIR)/thread.h - -prepare: $(SYS_HEADERS) - -$(SYS_DIR)/sc.h: $(SYS_UTIL_DIR)/mk_sc - $(call filechk,gen_header) - -$(SYS_DIR)/thread.h: $(SYS_UTIL_DIR)/mk_thread - $(call filechk,gen_header) - -$(SYS_UTIL_DIR)/mk_sc: scripts_basic $(ARCH_DIR)/user-offsets.h FORCE - $(Q)$(MAKE) $(build)=$(SYS_UTIL_DIR) $@ - -$(SYS_UTIL_DIR)/mk_thread: scripts_basic $(ARCH_DIR)/kernel-offsets.h FORCE - $(Q)$(MAKE) $(build)=$(SYS_UTIL_DIR) $@ - -$(SYS_UTIL_DIR): scripts_basic include/asm FORCE - $(Q)$(MAKE) $(build)=$(SYS_UTIL_DIR) - -CLEAN_FILES += $(SYS_HEADERS) diff --git a/arch/um/Makefile-skas b/arch/um/Makefile-skas index fd18ec572271..ac35de5316a6 100644 --- a/arch/um/Makefile-skas +++ b/arch/um/Makefile-skas @@ -10,5 +10,3 @@ CFLAGS-$(CONFIG_GCOV) += $(GCOV_OPT) CFLAGS-$(CONFIG_GPROF) += $(GPROF_OPT) LINK-$(CONFIG_GCOV) += $(GCOV_OPT) LINK-$(CONFIG_GPROF) += $(GPROF_OPT) - -GEN_HEADERS += $(ARCH_DIR)/include/skas_ptregs.h diff --git a/arch/um/Makefile-x86_64 b/arch/um/Makefile-x86_64 index 436abbba409b..4f118d5cc2ee 100644 --- a/arch/um/Makefile-x86_64 +++ b/arch/um/Makefile-x86_64 @@ -12,24 +12,3 @@ CHECKFLAGS += -m64 ELF_ARCH := i386:x86-64 ELF_FORMAT := elf64-x86-64 - -SYS_UTIL_DIR := $(ARCH_DIR)/sys-x86_64/util -SYS_DIR := $(ARCH_DIR)/include/sysdep-x86_64 - -SYS_HEADERS = $(SYS_DIR)/sc.h $(SYS_DIR)/thread.h - -prepare: $(SYS_HEADERS) - -$(SYS_DIR)/sc.h: $(SYS_UTIL_DIR)/mk_sc - $(call filechk,gen_header) - -$(SYS_DIR)/thread.h: $(SYS_UTIL_DIR)/mk_thread - $(call filechk,gen_header) - -$(SYS_UTIL_DIR)/mk_sc: scripts_basic $(ARCH_DIR)/user-offsets.h FORCE - $(Q)$(MAKE) $(build)=$(SYS_UTIL_DIR) $@ - -$(SYS_UTIL_DIR)/mk_thread: scripts_basic $(GEN_HEADERS) $(ARCH_DIR)/kernel-offsets.h FORCE - $(Q)$(MAKE) $(build)=$(SYS_UTIL_DIR) $@ - -CLEAN_FILES += $(SYS_HEADERS) diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile index 783e18cae090..de17d4c6e02d 100644 --- a/arch/um/drivers/Makefile +++ b/arch/um/drivers/Makefile @@ -13,7 +13,7 @@ mcast-objs := mcast_kern.o mcast_user.o net-objs := net_kern.o net_user.o mconsole-objs := mconsole_kern.o mconsole_user.o hostaudio-objs := hostaudio_kern.o -ubd-objs := ubd_kern.o +ubd-objs := ubd_kern.o ubd_user.o port-objs := port_kern.o port_user.o harddog-objs := harddog_kern.o harddog_user.o diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c index 14a12d6b3df6..16e7dc89f61d 100644 --- a/arch/um/drivers/chan_kern.c +++ b/arch/um/drivers/chan_kern.c @@ -19,18 +19,44 @@ #include "line.h" #include "os.h" -#ifdef CONFIG_NOCONFIG_CHAN +/* XXX: could well be moved to somewhere else, if needed. */ +static int my_printf(const char * fmt, ...) + __attribute__ ((format (printf, 1, 2))); -/* The printk's here are wrong because we are complaining that there is no - * output device, but printk is printing to that output device. The user will - * never see the error. printf would be better, except it can't run on a - * kernel stack because it will overflow it. - * Use printk for now since that will avoid crashing. - */ +static int my_printf(const char * fmt, ...) +{ + /* Yes, can be called on atomic context.*/ + char *buf = kmalloc(4096, GFP_ATOMIC); + va_list args; + int r; + + if (!buf) { + /* We print directly fmt. + * Yes, yes, yes, feel free to complain. */ + r = strlen(fmt); + } else { + va_start(args, fmt); + r = vsprintf(buf, fmt, args); + va_end(args); + fmt = buf; + } + + if (r) + r = os_write_file(1, fmt, r); + return r; + +} + +#ifdef CONFIG_NOCONFIG_CHAN +/* Despite its name, there's no added trailing newline. */ +static int my_puts(const char * buf) +{ + return os_write_file(1, buf, strlen(buf)); +} static void *not_configged_init(char *str, int device, struct chan_opts *opts) { - printk(KERN_ERR "Using a channel type which is configured out of " + my_puts("Using a channel type which is configured out of " "UML\n"); return(NULL); } @@ -38,27 +64,27 @@ static void *not_configged_init(char *str, int device, struct chan_opts *opts) static int not_configged_open(int input, int output, int primary, void *data, char **dev_out) { - printk(KERN_ERR "Using a channel type which is configured out of " + my_puts("Using a channel type which is configured out of " "UML\n"); return(-ENODEV); } static void not_configged_close(int fd, void *data) { - printk(KERN_ERR "Using a channel type which is configured out of " + my_puts("Using a channel type which is configured out of " "UML\n"); } static int not_configged_read(int fd, char *c_out, void *data) { - printk(KERN_ERR "Using a channel type which is configured out of " + my_puts("Using a channel type which is configured out of " "UML\n"); return(-EIO); } static int not_configged_write(int fd, const char *buf, int len, void *data) { - printk(KERN_ERR "Using a channel type which is configured out of " + my_puts("Using a channel type which is configured out of " "UML\n"); return(-EIO); } @@ -66,7 +92,7 @@ static int not_configged_write(int fd, const char *buf, int len, void *data) static int not_configged_console_write(int fd, const char *buf, int len, void *data) { - printk(KERN_ERR "Using a channel type which is configured out of " + my_puts("Using a channel type which is configured out of " "UML\n"); return(-EIO); } @@ -74,14 +100,14 @@ static int not_configged_console_write(int fd, const char *buf, int len, static int not_configged_window_size(int fd, void *data, unsigned short *rows, unsigned short *cols) { - printk(KERN_ERR "Using a channel type which is configured out of " + my_puts("Using a channel type which is configured out of " "UML\n"); return(-ENODEV); } static void not_configged_free(void *data) { - printf(KERN_ERR "Using a channel type which is configured out of " + my_puts("Using a channel type which is configured out of " "UML\n"); } @@ -457,7 +483,7 @@ static struct chan *parse_chan(char *str, int pri, int device, } } if(ops == NULL){ - printk(KERN_ERR "parse_chan couldn't parse \"%s\"\n", + my_printf("parse_chan couldn't parse \"%s\"\n", str); return(NULL); } @@ -465,7 +491,7 @@ static struct chan *parse_chan(char *str, int pri, int device, data = (*ops->init)(str, device, opts); if(data == NULL) return(NULL); - chan = kmalloc(sizeof(*chan), GFP_KERNEL); + chan = kmalloc(sizeof(*chan), GFP_ATOMIC); if(chan == NULL) return(NULL); *chan = ((struct chan) { .list = LIST_HEAD_INIT(chan->list), .primary = 1, diff --git a/arch/um/drivers/cow.h b/arch/um/drivers/cow.h index 4fcf3a8d13f4..dc36b222100b 100644 --- a/arch/um/drivers/cow.h +++ b/arch/um/drivers/cow.h @@ -3,15 +3,40 @@ #include -#if defined(__BIG_ENDIAN) -# define ntohll(x) (x) -# define htonll(x) (x) -#elif defined(__LITTLE_ENDIAN) -# define ntohll(x) bswap_64(x) -# define htonll(x) bswap_64(x) +#if defined(__KERNEL__) + +# include + +# if defined(__BIG_ENDIAN) +# define ntohll(x) (x) +# define htonll(x) (x) +# elif defined(__LITTLE_ENDIAN) +# define ntohll(x) be64_to_cpu(x) +# define htonll(x) cpu_to_be64(x) +# else +# error "Could not determine byte order" +# endif + #else -#error "__BYTE_ORDER not defined" +/* For the definition of ntohl, htonl and __BYTE_ORDER */ +#include +#include +#if defined(__BYTE_ORDER) + +# if __BYTE_ORDER == __BIG_ENDIAN +# define ntohll(x) (x) +# define htonll(x) (x) +# elif __BYTE_ORDER == __LITTLE_ENDIAN +# define ntohll(x) bswap_64(x) +# define htonll(x) bswap_64(x) +# else +# error "Could not determine byte order: __BYTE_ORDER uncorrectly defined" +# endif + +#else /* ! defined(__BYTE_ORDER) */ +# error "Could not determine byte order: __BYTE_ORDER not defined" #endif +#endif /* ! defined(__KERNEL__) */ extern int init_cow_file(int fd, char *cow_file, char *backing_file, int sectorsize, int alignment, int *bitmap_offset_out, diff --git a/arch/um/drivers/cow_user.c b/arch/um/drivers/cow_user.c index a8ce6fc3ef26..fbe2217db5dd 100644 --- a/arch/um/drivers/cow_user.c +++ b/arch/um/drivers/cow_user.c @@ -9,7 +9,6 @@ #include #include #include -#include #include "os.h" diff --git a/arch/um/drivers/port_kern.c b/arch/um/drivers/port_kern.c index c41efd207fcc..189839e4f1d4 100644 --- a/arch/um/drivers/port_kern.c +++ b/arch/um/drivers/port_kern.c @@ -7,7 +7,6 @@ #include "linux/sched.h" #include "linux/slab.h" #include "linux/interrupt.h" -#include "linux/irq.h" #include "linux/spinlock.h" #include "linux/errno.h" #include "asm/atomic.h" diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index e77a38da4350..f73134333f64 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -35,7 +35,6 @@ #include "linux/blkpg.h" #include "linux/genhd.h" #include "linux/spinlock.h" -#include "asm/atomic.h" #include "asm/segment.h" #include "asm/uaccess.h" #include "asm/irq.h" @@ -54,21 +53,20 @@ #include "mem.h" #include "mem_kern.h" #include "cow.h" -#include "aio.h" enum ubd_req { UBD_READ, UBD_WRITE }; struct io_thread_req { - enum aio_type op; + enum ubd_req op; int fds[2]; unsigned long offsets[2]; unsigned long long offset; unsigned long length; char *buffer; int sectorsize; - int bitmap_offset; - long bitmap_start; - long bitmap_end; + unsigned long sector_mask; + unsigned long long cow_offset; + unsigned long bitmap_words[2]; int error; }; @@ -82,31 +80,28 @@ extern int create_cow_file(char *cow_file, char *backing_file, unsigned long *bitmap_len_out, int *data_offset_out); extern int read_cow_bitmap(int fd, void *buf, int offset, int len); -extern void do_io(struct io_thread_req *req, struct request *r, - unsigned long *bitmap); +extern void do_io(struct io_thread_req *req); -static inline int ubd_test_bit(__u64 bit, void *data) +static inline int ubd_test_bit(__u64 bit, unsigned char *data) { - unsigned char *buffer = data; __u64 n; int bits, off; - bits = sizeof(buffer[0]) * 8; + bits = sizeof(data[0]) * 8; n = bit / bits; off = bit % bits; - return((buffer[n] & (1 << off)) != 0); + return((data[n] & (1 << off)) != 0); } -static inline void ubd_set_bit(__u64 bit, void *data) +static inline void ubd_set_bit(__u64 bit, unsigned char *data) { - unsigned char *buffer = data; __u64 n; int bits, off; - bits = sizeof(buffer[0]) * 8; + bits = sizeof(data[0]) * 8; n = bit / bits; off = bit % bits; - buffer[n] |= (1 << off); + data[n] |= (1 << off); } /*End stuff from ubd_user.h*/ @@ -115,6 +110,8 @@ static inline void ubd_set_bit(__u64 bit, void *data) static DEFINE_SPINLOCK(ubd_io_lock); static DEFINE_SPINLOCK(ubd_lock); +static void (*do_ubd)(void); + static int ubd_open(struct inode * inode, struct file * filp); static int ubd_release(struct inode * inode, struct file * file); static int ubd_ioctl(struct inode * inode, struct file * file, @@ -161,8 +158,6 @@ struct cow { int data_offset; }; -#define MAX_SG 64 - struct ubd { char *file; int count; @@ -173,7 +168,6 @@ struct ubd { int no_cow; struct cow cow; struct platform_device pdev; - struct scatterlist sg[MAX_SG]; }; #define DEFAULT_COW { \ @@ -466,114 +460,81 @@ __uml_help(fakehd, ); static void do_ubd_request(request_queue_t * q); -static int in_ubd; + +/* Only changed by ubd_init, which is an initcall. */ +int thread_fd = -1; /* Changed by ubd_handler, which is serialized because interrupts only * happen on CPU 0. */ int intr_count = 0; -static void ubd_end_request(struct request *req, int bytes, int uptodate) -{ - if (!end_that_request_first(req, uptodate, bytes >> 9)) { - add_disk_randomness(req->rq_disk); - end_that_request_last(req); - } -} - /* call ubd_finish if you need to serialize */ -static void __ubd_finish(struct request *req, int bytes) +static void __ubd_finish(struct request *req, int error) { - if(bytes < 0){ - ubd_end_request(req, 0, 0); - return; - } + int nsect; - ubd_end_request(req, bytes, 1); + if(error){ + end_request(req, 0); + return; + } + nsect = req->current_nr_sectors; + req->sector += nsect; + req->buffer += nsect << 9; + req->errors = 0; + req->nr_sectors -= nsect; + req->current_nr_sectors = 0; + end_request(req, 1); } -static inline void ubd_finish(struct request *req, int bytes) +static inline void ubd_finish(struct request *req, int error) { - spin_lock(&ubd_io_lock); - __ubd_finish(req, bytes); - spin_unlock(&ubd_io_lock); + spin_lock(&ubd_io_lock); + __ubd_finish(req, error); + spin_unlock(&ubd_io_lock); } -struct bitmap_io { - atomic_t count; - struct aio_context aio; -}; +/* Called without ubd_io_lock held */ +static void ubd_handler(void) +{ + struct io_thread_req req; + struct request *rq = elv_next_request(ubd_queue); + int n; -struct ubd_aio { - struct aio_context aio; - struct request *req; - int len; - struct bitmap_io *bitmap; - void *bitmap_buf; -}; - -static int ubd_reply_fd = -1; + do_ubd = NULL; + intr_count++; + n = os_read_file(thread_fd, &req, sizeof(req)); + if(n != sizeof(req)){ + printk(KERN_ERR "Pid %d - spurious interrupt in ubd_handler, " + "err = %d\n", os_getpid(), -n); + spin_lock(&ubd_io_lock); + end_request(rq, 0); + spin_unlock(&ubd_io_lock); + return; + } + + ubd_finish(rq, req.error); + reactivate_fd(thread_fd, UBD_IRQ); + do_ubd_request(ubd_queue); +} static irqreturn_t ubd_intr(int irq, void *dev, struct pt_regs *unused) { - struct aio_thread_reply reply; - struct ubd_aio *aio; - struct request *req; - int err, n, fd = (int) (long) dev; - - while(1){ - err = os_read_file(fd, &reply, sizeof(reply)); - if(err == -EAGAIN) - break; - if(err < 0){ - printk("ubd_aio_handler - read returned err %d\n", - -err); - break; - } - - aio = container_of(reply.data, struct ubd_aio, aio); - n = reply.err; - - if(n == 0){ - req = aio->req; - req->nr_sectors -= aio->len >> 9; - - if((aio->bitmap != NULL) && - (atomic_dec_and_test(&aio->bitmap->count))){ - aio->aio = aio->bitmap->aio; - aio->len = 0; - kfree(aio->bitmap); - aio->bitmap = NULL; - submit_aio(&aio->aio); - } - else { - if((req->nr_sectors == 0) && - (aio->bitmap == NULL)){ - int len = req->hard_nr_sectors << 9; - ubd_finish(req, len); - } - - if(aio->bitmap_buf != NULL) - kfree(aio->bitmap_buf); - kfree(aio); - } - } - else if(n < 0){ - ubd_finish(aio->req, n); - if(aio->bitmap != NULL) - kfree(aio->bitmap); - if(aio->bitmap_buf != NULL) - kfree(aio->bitmap_buf); - kfree(aio); - } - } - reactivate_fd(fd, UBD_IRQ); - - do_ubd_request(ubd_queue); - + ubd_handler(); return(IRQ_HANDLED); } +/* Only changed by ubd_init, which is an initcall. */ +static int io_pid = -1; + +void kill_io_thread(void) +{ + if(io_pid != -1) + os_kill_process(io_pid, 1); +} + +__uml_exitcall(kill_io_thread); + static int ubd_file_size(struct ubd *dev, __u64 *size_out) { char *file; @@ -608,7 +569,7 @@ static int ubd_open_dev(struct ubd *dev) &dev->cow.data_offset, create_ptr); if((dev->fd == -ENOENT) && create_cow){ - dev->fd = create_cow_file(dev->file, dev->cow.file, + dev->fd = create_cow_file(dev->file, dev->cow.file, dev->openflags, 1 << 9, PAGE_SIZE, &dev->cow.bitmap_offset, &dev->cow.bitmap_len, @@ -870,10 +831,6 @@ int ubd_init(void) { int i; - ubd_reply_fd = init_aio_irq(UBD_IRQ, "ubd", ubd_intr); - if(ubd_reply_fd < 0) - printk("Setting up ubd AIO failed, err = %d\n", ubd_reply_fd); - devfs_mk_dir("ubd"); if (register_blkdev(MAJOR_NR, "ubd")) return -1; @@ -884,7 +841,6 @@ int ubd_init(void) return -1; } - blk_queue_max_hw_segments(ubd_queue, MAX_SG); if (fake_major != MAJOR_NR) { char name[sizeof("ubd_nnn\0")]; @@ -896,12 +852,40 @@ int ubd_init(void) driver_register(&ubd_driver); for (i = 0; i < MAX_DEV; i++) ubd_add(i); - return 0; } late_initcall(ubd_init); +int ubd_driver_init(void){ + unsigned long stack; + int err; + + /* Set by CONFIG_BLK_DEV_UBD_SYNC or ubd=sync.*/ + if(global_openflags.s){ + printk(KERN_INFO "ubd: Synchronous mode\n"); + /* Letting ubd=sync be like using ubd#s= instead of ubd#= is + * enough. So use anyway the io thread. */ + } + stack = alloc_stack(0, 0); + io_pid = start_io_thread(stack + PAGE_SIZE - sizeof(void *), + &thread_fd); + if(io_pid < 0){ + printk(KERN_ERR + "ubd : Failed to start I/O thread (errno = %d) - " + "falling back to synchronous I/O\n", -io_pid); + io_pid = -1; + return(0); + } + err = um_request_irq(UBD_IRQ, thread_fd, IRQ_READ, ubd_intr, + SA_INTERRUPT, "ubd", ubd_dev); + if(err != 0) + printk(KERN_ERR "um_request_irq failed - errno = %d\n", -err); + return(err); +} + +device_initcall(ubd_driver_init); + static int ubd_open(struct inode *inode, struct file *filp) { struct gendisk *disk = inode->i_bdev->bd_disk; @@ -939,55 +923,105 @@ static int ubd_release(struct inode * inode, struct file * file) return(0); } -static void cowify_bitmap(struct io_thread_req *req, unsigned long *bitmap) +static void cowify_bitmap(__u64 io_offset, int length, unsigned long *cow_mask, + __u64 *cow_offset, unsigned long *bitmap, + __u64 bitmap_offset, unsigned long *bitmap_words, + __u64 bitmap_len) { - __u64 sector = req->offset / req->sectorsize; - int i; + __u64 sector = io_offset >> 9; + int i, update_bitmap = 0; - for(i = 0; i < req->length / req->sectorsize; i++){ - if(ubd_test_bit(sector + i, bitmap)) - continue; + for(i = 0; i < length >> 9; i++){ + if(cow_mask != NULL) + ubd_set_bit(i, (unsigned char *) cow_mask); + if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) + continue; - if(req->bitmap_start == -1) - req->bitmap_start = sector + i; - req->bitmap_end = sector + i + 1; + update_bitmap = 1; + ubd_set_bit(sector + i, (unsigned char *) bitmap); + } - ubd_set_bit(sector + i, bitmap); - } + if(!update_bitmap) + return; + + *cow_offset = sector / (sizeof(unsigned long) * 8); + + /* This takes care of the case where we're exactly at the end of the + * device, and *cow_offset + 1 is off the end. So, just back it up + * by one word. Thanks to Lynn Kerby for the fix and James McMechan + * for the original diagnosis. + */ + if(*cow_offset == ((bitmap_len + sizeof(unsigned long) - 1) / + sizeof(unsigned long) - 1)) + (*cow_offset)--; + + bitmap_words[0] = bitmap[*cow_offset]; + bitmap_words[1] = bitmap[*cow_offset + 1]; + + *cow_offset *= sizeof(unsigned long); + *cow_offset += bitmap_offset; +} + +static void cowify_req(struct io_thread_req *req, unsigned long *bitmap, + __u64 bitmap_offset, __u64 bitmap_len) +{ + __u64 sector = req->offset >> 9; + int i; + + if(req->length > (sizeof(req->sector_mask) * 8) << 9) + panic("Operation too long"); + + if(req->op == UBD_READ) { + for(i = 0; i < req->length >> 9; i++){ + if(ubd_test_bit(sector + i, (unsigned char *) bitmap)) + ubd_set_bit(i, (unsigned char *) + &req->sector_mask); + } + } + else cowify_bitmap(req->offset, req->length, &req->sector_mask, + &req->cow_offset, bitmap, bitmap_offset, + req->bitmap_words, bitmap_len); } /* Called with ubd_io_lock held */ -static int prepare_request(struct request *req, struct io_thread_req *io_req, - unsigned long long offset, int page_offset, - int len, struct page *page) +static int prepare_request(struct request *req, struct io_thread_req *io_req) { struct gendisk *disk = req->rq_disk; struct ubd *dev = disk->private_data; + __u64 offset; + int len; + + if(req->rq_status == RQ_INACTIVE) return(1); /* This should be impossible now */ if((rq_data_dir(req) == WRITE) && !dev->openflags.w){ printk("Write attempted on readonly ubd device %s\n", disk->disk_name); - ubd_end_request(req, 0, 0); + end_request(req, 0); return(1); } + offset = ((__u64) req->sector) << 9; + len = req->current_nr_sectors << 9; + io_req->fds[0] = (dev->cow.file != NULL) ? dev->cow.fd : dev->fd; io_req->fds[1] = dev->fd; + io_req->cow_offset = -1; io_req->offset = offset; io_req->length = len; io_req->error = 0; - io_req->op = (rq_data_dir(req) == READ) ? AIO_READ : AIO_WRITE; + io_req->sector_mask = 0; + + io_req->op = (rq_data_dir(req) == READ) ? UBD_READ : UBD_WRITE; io_req->offsets[0] = 0; io_req->offsets[1] = dev->cow.data_offset; - io_req->buffer = page_address(page) + page_offset; + io_req->buffer = req->buffer; io_req->sectorsize = 1 << 9; - io_req->bitmap_offset = dev->cow.bitmap_offset; - io_req->bitmap_start = -1; - io_req->bitmap_end = -1; - if((dev->cow.file != NULL) && (io_req->op == UBD_WRITE)) - cowify_bitmap(io_req, dev->cow.bitmap); + if(dev->cow.file != NULL) + cowify_req(io_req, dev->cow.bitmap, dev->cow.bitmap_offset, + dev->cow.bitmap_len); + return(0); } @@ -996,36 +1030,30 @@ static void do_ubd_request(request_queue_t *q) { struct io_thread_req io_req; struct request *req; - __u64 sector; - int err; + int err, n; - if(in_ubd) - return; - in_ubd = 1; - while((req = elv_next_request(q)) != NULL){ - struct gendisk *disk = req->rq_disk; - struct ubd *dev = disk->private_data; - int n, i; - - blkdev_dequeue_request(req); - - sector = req->sector; - n = blk_rq_map_sg(q, req, dev->sg); - - for(i = 0; i < n; i++){ - struct scatterlist *sg = &dev->sg[i]; - - err = prepare_request(req, &io_req, sector << 9, - sg->offset, sg->length, - sg->page); - if(err) - continue; - - sector += sg->length >> 9; - do_io(&io_req, req, dev->cow.bitmap); + if(thread_fd == -1){ + while((req = elv_next_request(q)) != NULL){ + err = prepare_request(req, &io_req); + if(!err){ + do_io(&io_req); + __ubd_finish(req, io_req.error); + } + } + } + else { + if(do_ubd || (req = elv_next_request(q)) == NULL) + return; + err = prepare_request(req, &io_req); + if(!err){ + do_ubd = ubd_handler; + n = os_write_file(thread_fd, (char *) &io_req, + sizeof(io_req)); + if(n != sizeof(io_req)) + printk("write to io thread failed, " + "errno = %d\n", -n); } } - in_ubd = 0; } static int ubd_ioctl(struct inode * inode, struct file * file, @@ -1241,95 +1269,131 @@ int create_cow_file(char *cow_file, char *backing_file, struct openflags flags, return(err); } -void do_io(struct io_thread_req *req, struct request *r, unsigned long *bitmap) +static int update_bitmap(struct io_thread_req *req) { - struct ubd_aio *aio; - struct bitmap_io *bitmap_io = NULL; - char *buf; - void *bitmap_buf = NULL; - unsigned long len, sector; - int nsectors, start, end, bit, err; - __u64 off; + int n; - if(req->bitmap_start != -1){ - /* Round up to the nearest word */ - int round = sizeof(unsigned long); - len = (req->bitmap_end - req->bitmap_start + - round * 8 - 1) / (round * 8); - len *= round; + if(req->cow_offset == -1) + return(0); - off = req->bitmap_start / (8 * round); - off *= round; + n = os_seek_file(req->fds[1], req->cow_offset); + if(n < 0){ + printk("do_io - bitmap lseek failed : err = %d\n", -n); + return(1); + } - bitmap_io = kmalloc(sizeof(*bitmap_io), GFP_KERNEL); - if(bitmap_io == NULL){ - printk("Failed to kmalloc bitmap IO\n"); - req->error = 1; - return; - } + n = os_write_file(req->fds[1], &req->bitmap_words, + sizeof(req->bitmap_words)); + if(n != sizeof(req->bitmap_words)){ + printk("do_io - bitmap update failed, err = %d fd = %d\n", -n, + req->fds[1]); + return(1); + } - bitmap_buf = kmalloc(len, GFP_KERNEL); - if(bitmap_buf == NULL){ - printk("do_io : kmalloc of bitmap chunk " - "failed\n"); - kfree(bitmap_io); - req->error = 1; - return; - } - memcpy(bitmap_buf, &bitmap[off / sizeof(bitmap[0])], len); - - *bitmap_io = ((struct bitmap_io) - { .count = ATOMIC_INIT(0), - .aio = INIT_AIO(AIO_WRITE, req->fds[1], - bitmap_buf, len, - req->bitmap_offset + off, - ubd_reply_fd) } ); - } - - nsectors = req->length / req->sectorsize; - start = 0; - end = nsectors; - bit = 0; - do { - if(bitmap != NULL){ - sector = req->offset / req->sectorsize; - bit = ubd_test_bit(sector + start, bitmap); - end = start; - while((end < nsectors) && - (ubd_test_bit(sector + end, bitmap) == bit)) - end++; - } - - off = req->offsets[bit] + req->offset + - start * req->sectorsize; - len = (end - start) * req->sectorsize; - buf = &req->buffer[start * req->sectorsize]; - - aio = kmalloc(sizeof(*aio), GFP_KERNEL); - if(aio == NULL){ - req->error = 1; - return; - } - - *aio = ((struct ubd_aio) - { .aio = INIT_AIO(req->op, req->fds[bit], buf, - len, off, ubd_reply_fd), - .len = len, - .req = r, - .bitmap = bitmap_io, - .bitmap_buf = bitmap_buf }); - - if(aio->bitmap != NULL) - atomic_inc(&aio->bitmap->count); - - err = submit_aio(&aio->aio); - if(err){ - printk("do_io - submit_aio failed, " - "err = %d\n", err); - req->error = 1; - return; - } - - start = end; - } while(start < nsectors); + return(0); } + +void do_io(struct io_thread_req *req) +{ + char *buf; + unsigned long len; + int n, nsectors, start, end, bit; + int err; + __u64 off; + + nsectors = req->length / req->sectorsize; + start = 0; + do { + bit = ubd_test_bit(start, (unsigned char *) &req->sector_mask); + end = start; + while((end < nsectors) && + (ubd_test_bit(end, (unsigned char *) + &req->sector_mask) == bit)) + end++; + + off = req->offset + req->offsets[bit] + + start * req->sectorsize; + len = (end - start) * req->sectorsize; + buf = &req->buffer[start * req->sectorsize]; + + err = os_seek_file(req->fds[bit], off); + if(err < 0){ + printk("do_io - lseek failed : err = %d\n", -err); + req->error = 1; + return; + } + if(req->op == UBD_READ){ + n = 0; + do { + buf = &buf[n]; + len -= n; + n = os_read_file(req->fds[bit], buf, len); + if (n < 0) { + printk("do_io - read failed, err = %d " + "fd = %d\n", -n, req->fds[bit]); + req->error = 1; + return; + } + } while((n < len) && (n != 0)); + if (n < len) memset(&buf[n], 0, len - n); + } else { + n = os_write_file(req->fds[bit], buf, len); + if(n != len){ + printk("do_io - write failed err = %d " + "fd = %d\n", -n, req->fds[bit]); + req->error = 1; + return; + } + } + + start = end; + } while(start < nsectors); + + req->error = update_bitmap(req); +} + +/* Changed in start_io_thread, which is serialized by being called only + * from ubd_init, which is an initcall. + */ +int kernel_fd = -1; + +/* Only changed by the io thread */ +int io_count = 0; + +int io_thread(void *arg) +{ + struct io_thread_req req; + int n; + + ignore_sigwinch_sig(); + while(1){ + n = os_read_file(kernel_fd, &req, sizeof(req)); + if(n != sizeof(req)){ + if(n < 0) + printk("io_thread - read failed, fd = %d, " + "err = %d\n", kernel_fd, -n); + else { + printk("io_thread - short read, fd = %d, " + "length = %d\n", kernel_fd, n); + } + continue; + } + io_count++; + do_io(&req); + n = os_write_file(kernel_fd, &req, sizeof(req)); + if(n != sizeof(req)) + printk("io_thread - write failed, fd = %d, err = %d\n", + kernel_fd, -n); + } +} + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/drivers/ubd_user.c b/arch/um/drivers/ubd_user.c new file mode 100644 index 000000000000..b94d2bc4fe06 --- /dev/null +++ b/arch/um/drivers/ubd_user.c @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com) + * Copyright (C) 2001 Ridgerun,Inc (glonnon@ridgerun.com) + * Licensed under the GPL + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "asm/types.h" +#include "user_util.h" +#include "kern_util.h" +#include "user.h" +#include "ubd_user.h" +#include "os.h" +#include "cow.h" + +#include +#include + +void ignore_sigwinch_sig(void) +{ + signal(SIGWINCH, SIG_IGN); +} + +int start_io_thread(unsigned long sp, int *fd_out) +{ + int pid, fds[2], err; + + err = os_pipe(fds, 1, 1); + if(err < 0){ + printk("start_io_thread - os_pipe failed, err = %d\n", -err); + goto out; + } + + kernel_fd = fds[0]; + *fd_out = fds[1]; + + pid = clone(io_thread, (void *) sp, CLONE_FILES | CLONE_VM | SIGCHLD, + NULL); + if(pid < 0){ + printk("start_io_thread - clone failed : errno = %d\n", errno); + err = -errno; + goto out_close; + } + + return(pid); + + out_close: + os_close_file(fds[0]); + os_close_file(fds[1]); + kernel_fd = -1; + *fd_out = -1; + out: + return(err); +} + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/include/aio.h b/arch/um/include/aio.h index 83f16877ab08..423bae9153f8 100644 --- a/arch/um/include/aio.h +++ b/arch/um/include/aio.h @@ -14,27 +14,15 @@ struct aio_thread_reply { }; struct aio_context { - enum aio_type type; - int fd; - void *data; - int len; - unsigned long long offset; int reply_fd; struct aio_context *next; }; -#define INIT_AIO(aio_type, aio_fd, aio_data, aio_len, aio_offset, \ - aio_reply_fd) \ - { .type = aio_type, \ - .fd = aio_fd, \ - .data = aio_data, \ - .len = aio_len, \ - .offset = aio_offset, \ - .reply_fd = aio_reply_fd } - #define INIT_AIO_CONTEXT { .reply_fd = -1, \ .next = NULL } -extern int submit_aio(struct aio_context *aio); +extern int submit_aio(enum aio_type type, int fd, char *buf, int len, + unsigned long long offset, int reply_fd, + struct aio_context *aio); #endif diff --git a/arch/um/include/common-offsets.h b/arch/um/include/common-offsets.h index 782ac3a3baf9..356390d1f8b9 100644 --- a/arch/um/include/common-offsets.h +++ b/arch/um/include/common-offsets.h @@ -1,7 +1,7 @@ /* for use by sys-$SUBARCH/kernel-offsets.c */ -OFFSET(TASK_REGS, task_struct, thread.regs); -OFFSET(TASK_PID, task_struct, pid); +OFFSET(HOST_TASK_REGS, task_struct, thread.regs); +OFFSET(HOST_TASK_PID, task_struct, pid); DEFINE(UM_KERN_PAGE_SIZE, PAGE_SIZE); DEFINE(UM_NSEC_PER_SEC, NSEC_PER_SEC); DEFINE_STR(UM_KERN_EMERG, KERN_EMERG); diff --git a/arch/um/include/os.h b/arch/um/include/os.h index 583329d0a539..2e58e304b8be 100644 --- a/arch/um/include/os.h +++ b/arch/um/include/os.h @@ -6,6 +6,7 @@ #ifndef __OS_H__ #define __OS_H__ +#include "uml-config.h" #include "asm/types.h" #include "../os/include/file.h" @@ -157,6 +158,14 @@ extern int os_lock_file(int fd, int excl); extern void os_early_checks(void); extern int can_do_skas(void); +/* Make sure they are clear when running in TT mode. Required by + * SEGV_MAYBE_FIXABLE */ +#ifdef UML_CONFIG_MODE_SKAS +#define clear_can_do_skas() do { ptrace_faultinfo = proc_mm = 0; } while (0) +#else +#define clear_can_do_skas() do {} while (0) +#endif + /* mem.c */ extern int create_mem_file(unsigned long len); diff --git a/arch/um/include/registers.h b/arch/um/include/registers.h index 0a35e6d0baa0..4892e5fcef07 100644 --- a/arch/um/include/registers.h +++ b/arch/um/include/registers.h @@ -15,16 +15,6 @@ extern void save_registers(int pid, union uml_pt_regs *regs); extern void restore_registers(int pid, union uml_pt_regs *regs); extern void init_registers(int pid); extern void get_safe_registers(unsigned long * regs); +extern void get_thread_regs(union uml_pt_regs *uml_regs, void *buffer); #endif - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/include/skas_ptregs.h b/arch/um/include/skas_ptregs.h new file mode 100644 index 000000000000..73db19e9c077 --- /dev/null +++ b/arch/um/include/skas_ptregs.h @@ -0,0 +1,6 @@ +#ifndef __SKAS_PT_REGS_ +#define __SKAS_PT_REGS_ + +#include + +#endif diff --git a/arch/um/include/sysdep-i386/sc.h b/arch/um/include/sysdep-i386/sc.h new file mode 100644 index 000000000000..c57d1780ad37 --- /dev/null +++ b/arch/um/include/sysdep-i386/sc.h @@ -0,0 +1,44 @@ +#ifndef __SYSDEP_I386_SC_H +#define __SYSDEP_I386_SC_H + +#include + +#define SC_OFFSET(sc, field) \ + *((unsigned long *) &(((char *) (sc))[HOST_##field])) +#define SC_FP_OFFSET(sc, field) \ + *((unsigned long *) &(((char *) (SC_FPSTATE(sc)))[HOST_##field])) +#define SC_FP_OFFSET_PTR(sc, field, type) \ + ((type *) &(((char *) (SC_FPSTATE(sc)))[HOST_##field])) + +#define SC_IP(sc) SC_OFFSET(sc, SC_IP) +#define SC_SP(sc) SC_OFFSET(sc, SC_SP) +#define SC_FS(sc) SC_OFFSET(sc, SC_FS) +#define SC_GS(sc) SC_OFFSET(sc, SC_GS) +#define SC_DS(sc) SC_OFFSET(sc, SC_DS) +#define SC_ES(sc) SC_OFFSET(sc, SC_ES) +#define SC_SS(sc) SC_OFFSET(sc, SC_SS) +#define SC_CS(sc) SC_OFFSET(sc, SC_CS) +#define SC_EFLAGS(sc) SC_OFFSET(sc, SC_EFLAGS) +#define SC_EAX(sc) SC_OFFSET(sc, SC_EAX) +#define SC_EBX(sc) SC_OFFSET(sc, SC_EBX) +#define SC_ECX(sc) SC_OFFSET(sc, SC_ECX) +#define SC_EDX(sc) SC_OFFSET(sc, SC_EDX) +#define SC_EDI(sc) SC_OFFSET(sc, SC_EDI) +#define SC_ESI(sc) SC_OFFSET(sc, SC_ESI) +#define SC_EBP(sc) SC_OFFSET(sc, SC_EBP) +#define SC_TRAPNO(sc) SC_OFFSET(sc, SC_TRAPNO) +#define SC_ERR(sc) SC_OFFSET(sc, SC_ERR) +#define SC_CR2(sc) SC_OFFSET(sc, SC_CR2) +#define SC_FPSTATE(sc) SC_OFFSET(sc, SC_FPSTATE) +#define SC_SIGMASK(sc) SC_OFFSET(sc, SC_SIGMASK) +#define SC_FP_CW(sc) SC_FP_OFFSET(sc, SC_FP_CW) +#define SC_FP_SW(sc) SC_FP_OFFSET(sc, SC_FP_SW) +#define SC_FP_TAG(sc) SC_FP_OFFSET(sc, SC_FP_TAG) +#define SC_FP_IPOFF(sc) SC_FP_OFFSET(sc, SC_FP_IPOFF) +#define SC_FP_CSSEL(sc) SC_FP_OFFSET(sc, SC_FP_CSSEL) +#define SC_FP_DATAOFF(sc) SC_FP_OFFSET(sc, SC_FP_DATAOFF) +#define SC_FP_DATASEL(sc) SC_FP_OFFSET(sc, SC_FP_DATASEL) +#define SC_FP_ST(sc) SC_FP_OFFSET_PTR(sc, SC_FP_ST, struct _fpstate) +#define SC_FXSR_ENV(sc) SC_FP_OFFSET_PTR(sc, SC_FXSR_ENV, void) + +#endif diff --git a/arch/um/include/sysdep-i386/sigcontext.h b/arch/um/include/sysdep-i386/sigcontext.h index 1fe729265167..23fd2644d7ed 100644 --- a/arch/um/include/sysdep-i386/sigcontext.h +++ b/arch/um/include/sysdep-i386/sigcontext.h @@ -6,6 +6,7 @@ #ifndef __SYS_SIGCONTEXT_I386_H #define __SYS_SIGCONTEXT_I386_H +#include "uml-config.h" #include #define IP_RESTART_SYSCALL(ip) ((ip) -= 2) @@ -26,7 +27,14 @@ #define SC_START_SYSCALL(sc) do SC_EAX(sc) = -ENOSYS; while(0) /* This is Page Fault */ -#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14) +#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14) + +/* SKAS3 has no trap_no on i386, but get_skas_faultinfo() sets it to 0. */ +#ifdef UML_CONFIG_MODE_SKAS +#define SEGV_MAYBE_FIXABLE(fi) ((fi)->trap_no == 0 && ptrace_faultinfo) +#else +#define SEGV_MAYBE_FIXABLE(fi) 0 +#endif extern unsigned long *sc_sigmask(void *sc_ptr); extern int sc_get_fpregs(unsigned long buf, void *sc_ptr); diff --git a/arch/um/include/sysdep-i386/thread.h b/arch/um/include/sysdep-i386/thread.h new file mode 100644 index 000000000000..243fed44d780 --- /dev/null +++ b/arch/um/include/sysdep-i386/thread.h @@ -0,0 +1,11 @@ +#ifndef __UM_THREAD_H +#define __UM_THREAD_H + +#include + +#define TASK_DEBUGREGS(task) ((unsigned long *) &(((char *) (task))[HOST_TASK_DEBUGREGS])) +#ifdef UML_CONFIG_MODE_TT +#define TASK_EXTERN_PID(task) *((int *) &(((char *) (task))[HOST_TASK_EXTERN_PID])) +#endif + +#endif diff --git a/arch/um/include/sysdep-x86_64/ptrace.h b/arch/um/include/sysdep-x86_64/ptrace.h index 331aa2d1f3f5..8d353f0feec1 100644 --- a/arch/um/include/sysdep-x86_64/ptrace.h +++ b/arch/um/include/sysdep-x86_64/ptrace.h @@ -183,10 +183,6 @@ struct syscall_args { case RBP: val = UPT_RBP(regs); break; \ case ORIG_RAX: val = UPT_ORIG_RAX(regs); break; \ case CS: val = UPT_CS(regs); break; \ - case DS: val = UPT_DS(regs); break; \ - case ES: val = UPT_ES(regs); break; \ - case FS: val = UPT_FS(regs); break; \ - case GS: val = UPT_GS(regs); break; \ case EFLAGS: val = UPT_EFLAGS(regs); break; \ default : \ panic("Bad register in UPT_REG : %d\n", reg); \ @@ -218,10 +214,6 @@ struct syscall_args { case RBP: UPT_RBP(regs) = __upt_val; break; \ case ORIG_RAX: UPT_ORIG_RAX(regs) = __upt_val; break; \ case CS: UPT_CS(regs) = __upt_val; break; \ - case DS: UPT_DS(regs) = __upt_val; break; \ - case ES: UPT_ES(regs) = __upt_val; break; \ - case FS: UPT_FS(regs) = __upt_val; break; \ - case GS: UPT_GS(regs) = __upt_val; break; \ case EFLAGS: UPT_EFLAGS(regs) = __upt_val; break; \ default : \ panic("Bad register in UPT_SET : %d\n", reg); \ diff --git a/arch/um/include/sysdep-x86_64/sc.h b/arch/um/include/sysdep-x86_64/sc.h new file mode 100644 index 000000000000..a160d9fcc596 --- /dev/null +++ b/arch/um/include/sysdep-x86_64/sc.h @@ -0,0 +1,45 @@ +#ifndef __SYSDEP_X86_64_SC_H +#define __SYSDEP_X86_64_SC_H + +/* Copyright (C) 2003 - 2004 PathScale, Inc + * Released under the GPL + */ + +#include + +#define SC_OFFSET(sc, field) \ + *((unsigned long *) &(((char *) (sc))[HOST_##field])) + +#define SC_RBX(sc) SC_OFFSET(sc, SC_RBX) +#define SC_RCX(sc) SC_OFFSET(sc, SC_RCX) +#define SC_RDX(sc) SC_OFFSET(sc, SC_RDX) +#define SC_RSI(sc) SC_OFFSET(sc, SC_RSI) +#define SC_RDI(sc) SC_OFFSET(sc, SC_RDI) +#define SC_RBP(sc) SC_OFFSET(sc, SC_RBP) +#define SC_RAX(sc) SC_OFFSET(sc, SC_RAX) +#define SC_R8(sc) SC_OFFSET(sc, SC_R8) +#define SC_R9(sc) SC_OFFSET(sc, SC_R9) +#define SC_R10(sc) SC_OFFSET(sc, SC_R10) +#define SC_R11(sc) SC_OFFSET(sc, SC_R11) +#define SC_R12(sc) SC_OFFSET(sc, SC_R12) +#define SC_R13(sc) SC_OFFSET(sc, SC_R13) +#define SC_R14(sc) SC_OFFSET(sc, SC_R14) +#define SC_R15(sc) SC_OFFSET(sc, SC_R15) +#define SC_IP(sc) SC_OFFSET(sc, SC_IP) +#define SC_SP(sc) SC_OFFSET(sc, SC_SP) +#define SC_CR2(sc) SC_OFFSET(sc, SC_CR2) +#define SC_ERR(sc) SC_OFFSET(sc, SC_ERR) +#define SC_TRAPNO(sc) SC_OFFSET(sc, SC_TRAPNO) +#define SC_CS(sc) SC_OFFSET(sc, SC_CS) +#define SC_FS(sc) SC_OFFSET(sc, SC_FS) +#define SC_GS(sc) SC_OFFSET(sc, SC_GS) +#define SC_EFLAGS(sc) SC_OFFSET(sc, SC_EFLAGS) +#define SC_SIGMASK(sc) SC_OFFSET(sc, SC_SIGMASK) +#if 0 +#define SC_ORIG_RAX(sc) SC_OFFSET(sc, SC_ORIG_RAX) +#define SC_DS(sc) SC_OFFSET(sc, SC_DS) +#define SC_ES(sc) SC_OFFSET(sc, SC_ES) +#define SC_SS(sc) SC_OFFSET(sc, SC_SS) +#endif + +#endif diff --git a/arch/um/include/sysdep-x86_64/sigcontext.h b/arch/um/include/sysdep-x86_64/sigcontext.h index 2a78260d15a0..41073235e7ad 100644 --- a/arch/um/include/sysdep-x86_64/sigcontext.h +++ b/arch/um/include/sysdep-x86_64/sigcontext.h @@ -31,7 +31,10 @@ #define SC_START_SYSCALL(sc) do SC_RAX(sc) = -ENOSYS; while(0) /* This is Page Fault */ -#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14) +#define SEGV_IS_FIXABLE(fi) ((fi)->trap_no == 14) + +/* No broken SKAS API, which doesn't pass trap_no, here. */ +#define SEGV_MAYBE_FIXABLE(fi) 0 extern unsigned long *sc_sigmask(void *sc_ptr); diff --git a/arch/um/include/sysdep-x86_64/thread.h b/arch/um/include/sysdep-x86_64/thread.h new file mode 100644 index 000000000000..cbef3e1697f4 --- /dev/null +++ b/arch/um/include/sysdep-x86_64/thread.h @@ -0,0 +1,10 @@ +#ifndef __UM_THREAD_H +#define __UM_THREAD_H + +#include + +#ifdef UML_CONFIG_MODE_TT +#define TASK_EXTERN_PID(task) *((int *) &(((char *) (task))[HOST_TASK_EXTERN_PID])) +#endif + +#endif diff --git a/arch/um/include/task.h b/arch/um/include/task.h new file mode 100644 index 000000000000..6375ba7203c9 --- /dev/null +++ b/arch/um/include/task.h @@ -0,0 +1,9 @@ +#ifndef __TASK_H +#define __TASK_H + +#include + +#define TASK_REGS(task) ((union uml_pt_regs *) &(((char *) (task))[HOST_TASK_REGS])) +#define TASK_PID(task) *((int *) &(((char *) (task))[HOST_TASK_PID])) + +#endif diff --git a/arch/um/include/user.h b/arch/um/include/user.h index 57ee9e261228..0f865ef46918 100644 --- a/arch/um/include/user.h +++ b/arch/um/include/user.h @@ -14,7 +14,9 @@ extern void *um_kmalloc_atomic(int size); extern void kfree(void *ptr); extern int in_aton(char *str); extern int open_gdb_chan(void); -extern int strlcpy(char *, const char *, int); +/* These use size_t, however unsigned long is correct on both i386 and x86_64. */ +extern unsigned long strlcpy(char *, const char *, unsigned long); +extern unsigned long strlcat(char *, const char *, unsigned long); extern void *um_vmalloc(int size); extern void vfree(void *ptr); diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index dcd814971995..bbf94bf2921e 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -9,7 +9,6 @@ #include "linux/kernel.h" #include "linux/module.h" #include "linux/smp.h" -#include "linux/irq.h" #include "linux/kernel_stat.h" #include "linux/interrupt.h" #include "linux/random.h" diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index ea008b031a8f..462cc9d65386 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -252,7 +252,7 @@ void paging_init(void) #endif } -struct page *arch_validate(struct page *page, int mask, int order) +struct page *arch_validate(struct page *page, gfp_t mask, int order) { unsigned long addr, zero = 0; int i; diff --git a/arch/um/kernel/process_kern.c b/arch/um/kernel/process_kern.c index 39cf568ccfaf..0d73ceeece72 100644 --- a/arch/um/kernel/process_kern.c +++ b/arch/um/kernel/process_kern.c @@ -80,9 +80,10 @@ void free_stack(unsigned long stack, int order) unsigned long alloc_stack(int order, int atomic) { unsigned long page; - int flags = GFP_KERNEL; + gfp_t flags = GFP_KERNEL; - if(atomic) flags |= GFP_ATOMIC; + if (atomic) + flags = GFP_ATOMIC; page = __get_free_pages(flags, order); if(page == 0) return(0); diff --git a/arch/um/kernel/sigio_user.c b/arch/um/kernel/sigio_user.c index e89218958f38..a52751108aa1 100644 --- a/arch/um/kernel/sigio_user.c +++ b/arch/um/kernel/sigio_user.c @@ -340,7 +340,7 @@ static int setup_initial_poll(int fd) { struct pollfd *p; - p = um_kmalloc(sizeof(struct pollfd)); + p = um_kmalloc_atomic(sizeof(struct pollfd)); if(p == NULL){ printk("setup_initial_poll : failed to allocate poll\n"); return(-1); diff --git a/arch/um/kernel/skas/Makefile b/arch/um/kernel/skas/Makefile index db36c7c95940..8de471b59c1c 100644 --- a/arch/um/kernel/skas/Makefile +++ b/arch/um/kernel/skas/Makefile @@ -6,8 +6,6 @@ obj-y := clone.o exec_kern.o mem.o mem_user.o mmu.o process.o process_kern.o \ syscall.o tlb.o trap_user.o uaccess.o -subdir- := util - USER_OBJS := process.o clone.o include arch/um/scripts/Makefile.rules diff --git a/arch/um/kernel/skas/util/Makefile b/arch/um/kernel/skas/util/Makefile deleted file mode 100644 index f7b7eba83340..000000000000 --- a/arch/um/kernel/skas/util/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -hostprogs-y := mk_ptregs -always := $(hostprogs-y) - -mk_ptregs-objs := mk_ptregs-$(SUBARCH).o -HOSTCFLAGS_mk_ptregs-$(SUBARCH).o := -I$(objtree)/arch/um diff --git a/arch/um/kernel/skas/util/mk_ptregs-i386.c b/arch/um/kernel/skas/util/mk_ptregs-i386.c deleted file mode 100644 index 1f96e1eeb8a7..000000000000 --- a/arch/um/kernel/skas/util/mk_ptregs-i386.c +++ /dev/null @@ -1,49 +0,0 @@ -#include -#include - -#define SHOW(name) printf("#define %s %d\n", #name, name) - -int main(int argc, char **argv) -{ - printf("/* Automatically generated by " - "arch/um/kernel/skas/util/mk_ptregs */\n"); - printf("\n"); - printf("#ifndef __SKAS_PT_REGS_\n"); - printf("#define __SKAS_PT_REGS_\n"); - printf("\n"); - SHOW(HOST_FRAME_SIZE); - SHOW(HOST_FP_SIZE); - SHOW(HOST_XFP_SIZE); - - SHOW(HOST_IP); - SHOW(HOST_SP); - SHOW(HOST_EFLAGS); - SHOW(HOST_EAX); - SHOW(HOST_EBX); - SHOW(HOST_ECX); - SHOW(HOST_EDX); - SHOW(HOST_ESI); - SHOW(HOST_EDI); - SHOW(HOST_EBP); - SHOW(HOST_CS); - SHOW(HOST_SS); - SHOW(HOST_DS); - SHOW(HOST_FS); - SHOW(HOST_ES); - SHOW(HOST_GS); - - printf("\n"); - printf("#endif\n"); - return(0); -} - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/kernel/skas/util/mk_ptregs-x86_64.c b/arch/um/kernel/skas/util/mk_ptregs-x86_64.c deleted file mode 100644 index 5fccbfe35f78..000000000000 --- a/arch/um/kernel/skas/util/mk_ptregs-x86_64.c +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2003 PathScale, Inc. - * - * Licensed under the GPL - */ - -#include -#include - -#define SHOW(name) \ - printf("#define %s (%d / sizeof(unsigned long))\n", #name, name) - -int main(int argc, char **argv) -{ - printf("/* Automatically generated by " - "arch/um/kernel/skas/util/mk_ptregs */\n"); - printf("\n"); - printf("#ifndef __SKAS_PT_REGS_\n"); - printf("#define __SKAS_PT_REGS_\n"); - SHOW(HOST_FRAME_SIZE); - SHOW(HOST_RBX); - SHOW(HOST_RCX); - SHOW(HOST_RDI); - SHOW(HOST_RSI); - SHOW(HOST_RDX); - SHOW(HOST_RBP); - SHOW(HOST_RAX); - SHOW(HOST_R8); - SHOW(HOST_R9); - SHOW(HOST_R10); - SHOW(HOST_R11); - SHOW(HOST_R12); - SHOW(HOST_R13); - SHOW(HOST_R14); - SHOW(HOST_R15); - SHOW(HOST_ORIG_RAX); - SHOW(HOST_CS); - SHOW(HOST_SS); - SHOW(HOST_EFLAGS); -#if 0 - SHOW(HOST_FS); - SHOW(HOST_GS); - SHOW(HOST_DS); - SHOW(HOST_ES); -#endif - - SHOW(HOST_IP); - SHOW(HOST_SP); - printf("#define HOST_FP_SIZE 0\n"); - printf("#define HOST_XFP_SIZE 0\n"); - printf("\n"); - printf("\n"); - printf("#endif\n"); - return(0); -} - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c index f80850091e79..b331e970002f 100644 --- a/arch/um/kernel/sysrq.c +++ b/arch/um/kernel/sysrq.c @@ -62,13 +62,7 @@ void show_stack(struct task_struct *task, unsigned long *esp) if (esp == NULL) { if (task != current && task != NULL) { - /* XXX: Isn't this bogus? I.e. isn't this the - * *userspace* stack of this task? If not so, use this - * even when task == current (as in i386). - */ esp = (unsigned long *) KSTK_ESP(task); - /* Which one? No actual difference - just coding style.*/ - //esp = (unsigned long *) PT_REGS_IP(&task->thread.regs); } else { esp = (unsigned long *) &esp; } @@ -84,5 +78,5 @@ void show_stack(struct task_struct *task, unsigned long *esp) } printk("Call Trace: \n"); - show_trace(current, esp); + show_trace(task, esp); } diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c index 0a562c3c0fd8..f5b0636f9ad7 100644 --- a/arch/um/kernel/tlb.c +++ b/arch/um/kernel/tlb.c @@ -193,12 +193,12 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr, r = pte_read(*npte); w = pte_write(*npte); x = pte_exec(*npte); - if(!pte_dirty(*npte)) - w = 0; - if(!pte_young(*npte)){ - r = 0; - w = 0; - } + if (!pte_young(*npte)) { + r = 0; + w = 0; + } else if (!pte_dirty(*npte)) { + w = 0; + } if(force || pte_newpage(*npte)){ if(pte_present(*npte)) ret = add_mmap(addr, diff --git a/arch/um/kernel/trap_kern.c b/arch/um/kernel/trap_kern.c index 87cc6fd76ced..95c8f8733baf 100644 --- a/arch/um/kernel/trap_kern.c +++ b/arch/um/kernel/trap_kern.c @@ -18,6 +18,7 @@ #include "asm/a.out.h" #include "asm/current.h" #include "asm/irq.h" +#include "sysdep/sigcontext.h" #include "user_util.h" #include "kern_util.h" #include "kern.h" @@ -25,6 +26,9 @@ #include "mconsole_kern.h" #include "mem.h" #include "mem_kern.h" +#ifdef CONFIG_MODE_SKAS +#include "skas.h" +#endif /* Note this is constrained to return 0, -EFAULT, -EACCESS, -ENOMEM by segv(). */ int handle_page_fault(unsigned long address, unsigned long ip, @@ -39,6 +43,12 @@ int handle_page_fault(unsigned long address, unsigned long ip, int err = -EFAULT; *code_out = SEGV_MAPERR; + + /* If the fault was during atomic operation, don't take the fault, just + * fail. */ + if (in_atomic()) + goto out_nosemaphore; + down_read(&mm->mmap_sem); vma = find_vma(mm, address); if(!vma) @@ -89,6 +99,7 @@ survive: flush_tlb_page(vma, address); out: up_read(&mm->mmap_sem); +out_nosemaphore: return(err); /* @@ -125,7 +136,15 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, void *sc) } else if(current->mm == NULL) panic("Segfault with no mm"); - err = handle_page_fault(address, ip, is_write, is_user, &si.si_code); + + if (SEGV_IS_FIXABLE(&fi) || SEGV_MAYBE_FIXABLE(&fi)) + err = handle_page_fault(address, ip, is_write, is_user, &si.si_code); + else { + err = -EFAULT; + /* A thread accessed NULL, we get a fault, but CR2 is invalid. + * This code is used in __do_copy_from_user() of TT mode. */ + address = 0; + } catcher = current->thread.fault_catcher; if(!err) diff --git a/arch/um/kernel/tt/uaccess_user.c b/arch/um/kernel/tt/uaccess_user.c index f01475512ecb..8c220f054b61 100644 --- a/arch/um/kernel/tt/uaccess_user.c +++ b/arch/um/kernel/tt/uaccess_user.c @@ -22,8 +22,15 @@ int __do_copy_from_user(void *to, const void *from, int n, __do_copy, &faulted); TASK_REGS(get_current())->tt = save; - if(!faulted) return(0); - else return(n - (fault - (unsigned long) from)); + if(!faulted) + return 0; + else if (fault) + return n - (fault - (unsigned long) from); + else + /* In case of a general protection fault, we don't have the + * fault address, so NULL is used instead. Pretend we didn't + * copy anything. */ + return n; } static void __do_strncpy(void *dst, const void *src, int count) diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index f0a275947d34..93dc782dc1cc 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -334,6 +334,8 @@ int linux_main(int argc, char **argv) add_arg(DEFAULT_COMMAND_LINE); os_early_checks(); + if (force_tt) + clear_can_do_skas(); mode_tt = force_tt ? 1 : !can_do_skas(); #ifndef CONFIG_MODE_TT if (mode_tt) { diff --git a/arch/um/kernel/umid.c b/arch/um/kernel/umid.c index 186c28885016..0b21d59ba0cd 100644 --- a/arch/um/kernel/umid.c +++ b/arch/um/kernel/umid.c @@ -31,6 +31,8 @@ static char *uml_dir = UML_DIR; /* Changed by set_umid */ static int umid_is_random = 1; static int umid_inited = 0; +/* Have we created the files? Should we remove them? */ +static int umid_owned = 0; static int make_umid(int (*printer)(const char *fmt, ...)); @@ -82,20 +84,21 @@ int __init umid_file_name(char *name, char *buf, int len) extern int tracing_pid; -static int __init create_pid_file(void) +static void __init create_pid_file(void) { char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")]; char pid[sizeof("nnnnn\0")]; int fd, n; - if(umid_file_name("pid", file, sizeof(file))) return 0; + if(umid_file_name("pid", file, sizeof(file))) + return; fd = os_open_file(file, of_create(of_excl(of_rdwr(OPENFLAGS()))), 0644); if(fd < 0){ printf("Open of machine pid file \"%s\" failed: %s\n", file, strerror(-fd)); - return 0; + return; } sprintf(pid, "%d\n", os_getpid()); @@ -103,7 +106,6 @@ static int __init create_pid_file(void) if(n != strlen(pid)) printf("Write of pid file failed - err = %d\n", -n); os_close_file(fd); - return 0; } static int actually_do_remove(char *dir) @@ -147,7 +149,8 @@ static int actually_do_remove(char *dir) void remove_umid_dir(void) { char dir[strlen(uml_dir) + UMID_LEN + 1]; - if(!umid_inited) return; + if (!umid_owned) + return; sprintf(dir, "%s%s", uml_dir, umid); actually_do_remove(dir); @@ -155,11 +158,12 @@ void remove_umid_dir(void) char *get_umid(int only_if_set) { - if(only_if_set && umid_is_random) return(NULL); - return(umid); + if(only_if_set && umid_is_random) + return NULL; + return umid; } -int not_dead_yet(char *dir) +static int not_dead_yet(char *dir) { char file[strlen(uml_dir) + UMID_LEN + sizeof("/pid\0")]; char pid[sizeof("nnnnn\0")], *end; @@ -193,7 +197,8 @@ int not_dead_yet(char *dir) (p == CHOOSE_MODE(tracing_pid, os_getpid()))) dead = 1; } - if(!dead) return(1); + if(!dead) + return(1); return(actually_do_remove(dir)); } @@ -232,16 +237,13 @@ static int __init make_uml_dir(void) strlcpy(dir, home, sizeof(dir)); uml_dir++; } + strlcat(dir, uml_dir, sizeof(dir)); len = strlen(dir); - strncat(dir, uml_dir, sizeof(dir) - len); - len = strlen(dir); - if((len > 0) && (len < sizeof(dir) - 1) && (dir[len - 1] != '/')){ - dir[len] = '/'; - dir[len + 1] = '\0'; - } + if (len > 0 && dir[len - 1] != '/') + strlcat(dir, "/", sizeof(dir)); uml_dir = malloc(strlen(dir) + 1); - if(uml_dir == NULL){ + if (uml_dir == NULL) { printf("make_uml_dir : malloc failed, errno = %d\n", errno); exit(1); } @@ -286,6 +288,7 @@ static int __init make_umid(int (*printer)(const char *fmt, ...)) if(errno == EEXIST){ if(not_dead_yet(tmp)){ (*printer)("umid '%s' is in use\n", umid); + umid_owned = 0; return(-1); } err = mkdir(tmp, 0777); @@ -296,7 +299,8 @@ static int __init make_umid(int (*printer)(const char *fmt, ...)) return(-1); } - return(0); + umid_owned = 1; + return 0; } __uml_setup("uml_dir=", set_uml_dir, @@ -309,7 +313,8 @@ static int __init make_umid_setup(void) /* one function with the ordering we need ... */ make_uml_dir(); make_umid(printf); - return create_pid_file(); + create_pid_file(); + return 0; } __uml_postsetup(make_umid_setup); diff --git a/arch/um/os-Linux/aio.c b/arch/um/os-Linux/aio.c index f6e64026f995..41cfb0944201 100644 --- a/arch/um/os-Linux/aio.c +++ b/arch/um/os-Linux/aio.c @@ -6,7 +6,6 @@ #include #include #include -#include #include #include #include @@ -17,31 +16,18 @@ #include "user.h" #include "mode.h" +struct aio_thread_req { + enum aio_type type; + int io_fd; + unsigned long long offset; + char *buf; + int len; + struct aio_context *aio; +}; + static int aio_req_fd_r = -1; static int aio_req_fd_w = -1; -static int update_aio(struct aio_context *aio, int res) -{ - if(res < 0) - aio->len = res; - else if((res == 0) && (aio->type == AIO_READ)){ - /* This is the EOF case - we have hit the end of the file - * and it ends in a partial block, so we fill the end of - * the block with zeros and claim success. - */ - memset(aio->data, 0, aio->len); - aio->len = 0; - } - else if(res > 0){ - aio->len -= res; - aio->data += res; - aio->offset += res; - return aio->len; - } - - return 0; -} - #if defined(HAVE_AIO_ABI) #include @@ -80,7 +66,8 @@ static long io_getevents(aio_context_t ctx_id, long min_nr, long nr, * that it now backs the mmapped area. */ -static int do_aio(aio_context_t ctx, struct aio_context *aio) +static int do_aio(aio_context_t ctx, enum aio_type type, int fd, char *buf, + int len, unsigned long long offset, struct aio_context *aio) { struct iocb iocb, *iocbp = &iocb; char c; @@ -88,39 +75,40 @@ static int do_aio(aio_context_t ctx, struct aio_context *aio) iocb = ((struct iocb) { .aio_data = (unsigned long) aio, .aio_reqprio = 0, - .aio_fildes = aio->fd, - .aio_buf = (unsigned long) aio->data, - .aio_nbytes = aio->len, - .aio_offset = aio->offset, + .aio_fildes = fd, + .aio_buf = (unsigned long) buf, + .aio_nbytes = len, + .aio_offset = offset, .aio_reserved1 = 0, .aio_reserved2 = 0, .aio_reserved3 = 0 }); - switch(aio->type){ + switch(type){ case AIO_READ: iocb.aio_lio_opcode = IOCB_CMD_PREAD; + err = io_submit(ctx, 1, &iocbp); break; case AIO_WRITE: iocb.aio_lio_opcode = IOCB_CMD_PWRITE; + err = io_submit(ctx, 1, &iocbp); break; case AIO_MMAP: iocb.aio_lio_opcode = IOCB_CMD_PREAD; iocb.aio_buf = (unsigned long) &c; iocb.aio_nbytes = sizeof(c); + err = io_submit(ctx, 1, &iocbp); break; default: - printk("Bogus op in do_aio - %d\n", aio->type); + printk("Bogus op in do_aio - %d\n", type); err = -EINVAL; - goto out; + break; } - err = io_submit(ctx, 1, &iocbp); if(err > 0) err = 0; else err = -errno; - out: return err; } @@ -129,9 +117,8 @@ static aio_context_t ctx = 0; static int aio_thread(void *arg) { struct aio_thread_reply reply; - struct aio_context *aio; struct io_event event; - int err, n; + int err, n, reply_fd; signal(SIGWINCH, SIG_IGN); @@ -144,22 +131,14 @@ static int aio_thread(void *arg) "errno = %d\n", errno); } else { - /* This is safe as we've just a pointer here. */ - aio = (struct aio_context *) (long) event.data; - if(update_aio(aio, event.res)){ - do_aio(ctx, aio); - continue; - } - reply = ((struct aio_thread_reply) - { .data = aio, - .err = aio->len }); - err = os_write_file(aio->reply_fd, &reply, - sizeof(reply)); + { .data = (void *) (long) event.data, + .err = event.res }); + reply_fd = ((struct aio_context *) reply.data)->reply_fd; + err = os_write_file(reply_fd, &reply, sizeof(reply)); if(err != sizeof(reply)) - printk("aio_thread - write failed, " - "fd = %d, err = %d\n", aio->reply_fd, - -err); + printk("aio_thread - write failed, fd = %d, " + "err = %d\n", aio_req_fd_r, -err); } } return 0; @@ -167,35 +146,35 @@ static int aio_thread(void *arg) #endif -static int do_not_aio(struct aio_context *aio) +static int do_not_aio(struct aio_thread_req *req) { char c; int err; - switch(aio->type){ + switch(req->type){ case AIO_READ: - err = os_seek_file(aio->fd, aio->offset); + err = os_seek_file(req->io_fd, req->offset); if(err) goto out; - err = os_read_file(aio->fd, aio->data, aio->len); + err = os_read_file(req->io_fd, req->buf, req->len); break; case AIO_WRITE: - err = os_seek_file(aio->fd, aio->offset); + err = os_seek_file(req->io_fd, req->offset); if(err) goto out; - err = os_write_file(aio->fd, aio->data, aio->len); + err = os_write_file(req->io_fd, req->buf, req->len); break; case AIO_MMAP: - err = os_seek_file(aio->fd, aio->offset); + err = os_seek_file(req->io_fd, req->offset); if(err) goto out; - err = os_read_file(aio->fd, &c, sizeof(c)); + err = os_read_file(req->io_fd, &c, sizeof(c)); break; default: - printk("do_not_aio - bad request type : %d\n", aio->type); + printk("do_not_aio - bad request type : %d\n", req->type); err = -EINVAL; break; } @@ -206,14 +185,14 @@ static int do_not_aio(struct aio_context *aio) static int not_aio_thread(void *arg) { - struct aio_context *aio; + struct aio_thread_req req; struct aio_thread_reply reply; int err; signal(SIGWINCH, SIG_IGN); while(1){ - err = os_read_file(aio_req_fd_r, &aio, sizeof(aio)); - if(err != sizeof(aio)){ + err = os_read_file(aio_req_fd_r, &req, sizeof(req)); + if(err != sizeof(req)){ if(err < 0) printk("not_aio_thread - read failed, " "fd = %d, err = %d\n", aio_req_fd_r, @@ -224,34 +203,17 @@ static int not_aio_thread(void *arg) } continue; } - again: - err = do_not_aio(aio); - - if(update_aio(aio, err)) - goto again; - - reply = ((struct aio_thread_reply) { .data = aio, - .err = aio->len }); - err = os_write_file(aio->reply_fd, &reply, sizeof(reply)); + err = do_not_aio(&req); + reply = ((struct aio_thread_reply) { .data = req.aio, + .err = err }); + err = os_write_file(req.aio->reply_fd, &reply, sizeof(reply)); if(err != sizeof(reply)) printk("not_aio_thread - write failed, fd = %d, " "err = %d\n", aio_req_fd_r, -err); } } -static int submit_aio_24(struct aio_context *aio) -{ - int err; - - err = os_write_file(aio_req_fd_w, &aio, sizeof(aio)); - if(err == sizeof(aio)) - err = 0; - - return err; -} - static int aio_pid = -1; -static int (*submit_proc)(struct aio_context *aio); static int init_aio_24(void) { @@ -283,33 +245,11 @@ static int init_aio_24(void) #endif printk("2.6 host AIO support not used - falling back to I/O " "thread\n"); - - submit_proc = submit_aio_24; - return 0; } #ifdef HAVE_AIO_ABI #define DEFAULT_24_AIO 0 -static int submit_aio_26(struct aio_context *aio) -{ - struct aio_thread_reply reply; - int err; - - err = do_aio(ctx, aio); - if(err){ - reply = ((struct aio_thread_reply) { .data = aio, - .err = err }); - err = os_write_file(aio->reply_fd, &reply, sizeof(reply)); - if(err != sizeof(reply)) - printk("submit_aio_26 - write failed, " - "fd = %d, err = %d\n", aio->reply_fd, -err); - else err = 0; - } - - return err; -} - static int init_aio_26(void) { unsigned long stack; @@ -330,22 +270,39 @@ static int init_aio_26(void) aio_pid = err; printk("Using 2.6 host AIO\n"); - - submit_proc = submit_aio_26; - return 0; } +static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len, + unsigned long long offset, struct aio_context *aio) +{ + struct aio_thread_reply reply; + int err; + + err = do_aio(ctx, type, io_fd, buf, len, offset, aio); + if(err){ + reply = ((struct aio_thread_reply) { .data = aio, + .err = err }); + err = os_write_file(aio->reply_fd, &reply, sizeof(reply)); + if(err != sizeof(reply)) + printk("submit_aio_26 - write failed, " + "fd = %d, err = %d\n", aio->reply_fd, -err); + else err = 0; + } + + return err; +} + #else #define DEFAULT_24_AIO 1 -static int submit_aio_26(struct aio_context *aio) +static int init_aio_26(void) { return -ENOSYS; } -static int init_aio_26(void) +static int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len, + unsigned long long offset, struct aio_context *aio) { - submit_proc = submit_aio_26; return -ENOSYS; } #endif @@ -412,7 +369,33 @@ static void exit_aio(void) __uml_exitcall(exit_aio); -int submit_aio(struct aio_context *aio) +static int submit_aio_24(enum aio_type type, int io_fd, char *buf, int len, + unsigned long long offset, struct aio_context *aio) { - return (*submit_proc)(aio); + struct aio_thread_req req = { .type = type, + .io_fd = io_fd, + .offset = offset, + .buf = buf, + .len = len, + .aio = aio, + }; + int err; + + err = os_write_file(aio_req_fd_w, &req, sizeof(req)); + if(err == sizeof(req)) + err = 0; + + return err; +} + +int submit_aio(enum aio_type type, int io_fd, char *buf, int len, + unsigned long long offset, int reply_fd, + struct aio_context *aio) +{ + aio->reply_fd = reply_fd; + if(aio_24) + return submit_aio_24(type, io_fd, buf, len, offset, aio); + else { + return submit_aio_26(type, io_fd, buf, len, offset, aio); + } } diff --git a/arch/um/os-Linux/elf_aux.c b/arch/um/os-Linux/elf_aux.c index ab33cb3c74ec..5a99dd3fbed0 100644 --- a/arch/um/os-Linux/elf_aux.c +++ b/arch/um/os-Linux/elf_aux.c @@ -12,7 +12,7 @@ #include "init.h" #include "elf_user.h" #include "mem_user.h" -#include +#include /* Use the one from the kernel - the host may miss it, if having old headers. */ #if UM_ELF_CLASS == UM_ELFCLASS32 diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c index 6af83171ca4e..b99ab414542f 100644 --- a/arch/um/os-Linux/start_up.c +++ b/arch/um/os-Linux/start_up.c @@ -143,11 +143,22 @@ static int __init skas0_cmd_param(char *str, int* add) return 0; } +/* The two __uml_setup would conflict, without this stupid alias. */ + +static int __init mode_skas0_cmd_param(char *str, int* add) + __attribute__((alias("skas0_cmd_param"))); + __uml_setup("skas0", skas0_cmd_param, "skas0\n" " Disables SKAS3 usage, so that SKAS0 is used, unless \n" " you specify mode=tt.\n\n"); +__uml_setup("mode=skas0", mode_skas0_cmd_param, + "mode=skas0\n" + " Disables SKAS3 usage, so that SKAS0 is used, unless you \n" + " specify mode=tt. Note that this was recently added - on \n" + " older kernels you must use simply \"skas0\".\n\n"); + static int force_sysemu_disabled = 0; static int __init nosysemu_cmd_param(char *str, int* add) diff --git a/arch/um/os-Linux/sys-i386/registers.c b/arch/um/os-Linux/sys-i386/registers.c index 3125d320722c..aee4812333c6 100644 --- a/arch/um/os-Linux/sys-i386/registers.c +++ b/arch/um/os-Linux/sys-i386/registers.c @@ -5,6 +5,7 @@ #include #include +#include #include "sysdep/ptrace_user.h" #include "sysdep/ptrace.h" #include "uml-config.h" @@ -126,13 +127,11 @@ void get_safe_registers(unsigned long *regs) memcpy(regs, exec_regs, HOST_FRAME_SIZE * sizeof(unsigned long)); } -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ +void get_thread_regs(union uml_pt_regs *uml_regs, void *buffer) +{ + struct __jmp_buf_tag *jmpbuf = buffer; + + UPT_SET(uml_regs, EIP, jmpbuf->__jmpbuf[JB_PC]); + UPT_SET(uml_regs, UESP, jmpbuf->__jmpbuf[JB_SP]); + UPT_SET(uml_regs, EBP, jmpbuf->__jmpbuf[JB_BP]); +} diff --git a/arch/um/os-Linux/sys-x86_64/registers.c b/arch/um/os-Linux/sys-x86_64/registers.c index 44438d15c3d6..4b638dfb52b0 100644 --- a/arch/um/os-Linux/sys-x86_64/registers.c +++ b/arch/um/os-Linux/sys-x86_64/registers.c @@ -5,6 +5,7 @@ #include #include +#include #include "ptrace_user.h" #include "uml-config.h" #include "skas_ptregs.h" @@ -74,13 +75,11 @@ void get_safe_registers(unsigned long *regs) memcpy(regs, exec_regs, HOST_FRAME_SIZE * sizeof(unsigned long)); } -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ +void get_thread_regs(union uml_pt_regs *uml_regs, void *buffer) +{ + struct __jmp_buf_tag *jmpbuf = buffer; + + UPT_SET(uml_regs, RIP, jmpbuf->__jmpbuf[JB_PC]); + UPT_SET(uml_regs, RSP, jmpbuf->__jmpbuf[JB_RSP]); + UPT_SET(uml_regs, RBP, jmpbuf->__jmpbuf[JB_RBP]); +} diff --git a/arch/um/os-Linux/util/Makefile b/arch/um/os-Linux/util/Makefile deleted file mode 100644 index 9778aed0c314..000000000000 --- a/arch/um/os-Linux/util/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -hostprogs-y := mk_user_constants -always := $(hostprogs-y) - -HOSTCFLAGS_mk_user_constants.o := -I$(objtree)/arch/um diff --git a/arch/um/os-Linux/util/mk_user_constants.c b/arch/um/os-Linux/util/mk_user_constants.c deleted file mode 100644 index 4838f30eecf0..000000000000 --- a/arch/um/os-Linux/util/mk_user_constants.c +++ /dev/null @@ -1,23 +0,0 @@ -#include -#include - -int main(int argc, char **argv) -{ - printf("/*\n"); - printf(" * Generated by mk_user_constants\n"); - printf(" */\n"); - printf("\n"); - printf("#ifndef __UM_USER_CONSTANTS_H\n"); - printf("#define __UM_USER_CONSTANTS_H\n"); - printf("\n"); - /* I'd like to use FRAME_SIZE from ptrace.h here, but that's wrong on - * x86_64 (216 vs 168 bytes). user_regs_struct is the correct size on - * both x86_64 and i386. - */ - printf("#define UM_FRAME_SIZE %d\n", __UM_FRAME_SIZE); - - printf("\n"); - printf("#endif\n"); - - return(0); -} diff --git a/arch/um/scripts/Makefile.rules b/arch/um/scripts/Makefile.rules index 59a1291f477e..651d9d88b656 100644 --- a/arch/um/scripts/Makefile.rules +++ b/arch/um/scripts/Makefile.rules @@ -7,8 +7,8 @@ USER_SINGLE_OBJS := \ USER_OBJS += $(filter %_user.o,$(obj-y) $(obj-m) $(USER_SINGLE_OBJS)) USER_OBJS := $(foreach file,$(USER_OBJS),$(obj)/$(file)) -$(USER_OBJS) : c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) \ - $(CFLAGS_$(notdir $@)) +$(USER_OBJS) $(USER_OBJS:.o=.i) $(USER_OBJS:.o=.s) $(USER_OBJS:.o=.lst): \ + c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) $(CFLAGS_$(notdir $@)) $(USER_OBJS): cmd_checksrc = $(USER_OBJS): quiet_cmd_checksrc = $(USER_OBJS): cmd_force_checksrc = diff --git a/arch/um/sys-i386/Makefile b/arch/um/sys-i386/Makefile index 4ca2a229da49..6dfeb70f6957 100644 --- a/arch/um/sys-i386/Makefile +++ b/arch/um/sys-i386/Makefile @@ -18,6 +18,4 @@ module.c-dir = kernel $(obj)/stub_segv.o : _c_flags = $(call unprofile,$(CFLAGS)) -subdir- := util - include arch/um/scripts/Makefile.unmap diff --git a/arch/um/sys-i386/kernel-offsets.c b/arch/um/sys-i386/kernel-offsets.c index a1070af2bcd8..35db85057506 100644 --- a/arch/um/sys-i386/kernel-offsets.c +++ b/arch/um/sys-i386/kernel-offsets.c @@ -18,9 +18,9 @@ void foo(void) { - OFFSET(TASK_DEBUGREGS, task_struct, thread.arch.debugregs); + OFFSET(HOST_TASK_DEBUGREGS, task_struct, thread.arch.debugregs); #ifdef CONFIG_MODE_TT - OFFSET(TASK_EXTERN_PID, task_struct, thread.mode.tt.extern_pid); + OFFSET(HOST_TASK_EXTERN_PID, task_struct, thread.mode.tt.extern_pid); #endif #include } diff --git a/arch/um/sys-i386/sysrq.c b/arch/um/sys-i386/sysrq.c index e3706d15c4f5..d5244f070539 100644 --- a/arch/um/sys-i386/sysrq.c +++ b/arch/um/sys-i386/sysrq.c @@ -88,9 +88,7 @@ void show_trace(struct task_struct* task, unsigned long * stack) task = current; if (task != current) { - //ebp = (unsigned long) KSTK_EBP(task); - /* Which one? No actual difference - just coding style.*/ - ebp = (unsigned long) PT_REGS_EBP(&task->thread.regs); + ebp = (unsigned long) KSTK_EBP(task); } else { asm ("movl %%ebp, %0" : "=r" (ebp) : ); } @@ -99,15 +97,6 @@ void show_trace(struct task_struct* task, unsigned long * stack) ((unsigned long)stack & (~(THREAD_SIZE - 1))); print_context_stack(context, stack, ebp); - /*while (((long) stack & (THREAD_SIZE-1)) != 0) { - addr = *stack; - if (__kernel_text_address(addr)) { - printk("%08lx: [<%08lx>]", (unsigned long) stack, addr); - print_symbol(" %s", addr); - printk("\n"); - } - stack++; - }*/ printk("\n"); } diff --git a/arch/um/sys-i386/user-offsets.c b/arch/um/sys-i386/user-offsets.c index 3ceaabceb3d7..26b68675053d 100644 --- a/arch/um/sys-i386/user-offsets.c +++ b/arch/um/sys-i386/user-offsets.c @@ -7,47 +7,48 @@ #define DEFINE(sym, val) \ asm volatile("\n->" #sym " %0 " #val : : "i" (val)) +#define DEFINE_LONGS(sym, val) \ + asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long))) + #define OFFSET(sym, str, mem) \ DEFINE(sym, offsetof(struct str, mem)); void foo(void) { - OFFSET(SC_IP, sigcontext, eip); - OFFSET(SC_SP, sigcontext, esp); - OFFSET(SC_FS, sigcontext, fs); - OFFSET(SC_GS, sigcontext, gs); - OFFSET(SC_DS, sigcontext, ds); - OFFSET(SC_ES, sigcontext, es); - OFFSET(SC_SS, sigcontext, ss); - OFFSET(SC_CS, sigcontext, cs); - OFFSET(SC_EFLAGS, sigcontext, eflags); - OFFSET(SC_EAX, sigcontext, eax); - OFFSET(SC_EBX, sigcontext, ebx); - OFFSET(SC_ECX, sigcontext, ecx); - OFFSET(SC_EDX, sigcontext, edx); - OFFSET(SC_EDI, sigcontext, edi); - OFFSET(SC_ESI, sigcontext, esi); - OFFSET(SC_EBP, sigcontext, ebp); - OFFSET(SC_TRAPNO, sigcontext, trapno); - OFFSET(SC_ERR, sigcontext, err); - OFFSET(SC_CR2, sigcontext, cr2); - OFFSET(SC_FPSTATE, sigcontext, fpstate); - OFFSET(SC_SIGMASK, sigcontext, oldmask); - OFFSET(SC_FP_CW, _fpstate, cw); - OFFSET(SC_FP_SW, _fpstate, sw); - OFFSET(SC_FP_TAG, _fpstate, tag); - OFFSET(SC_FP_IPOFF, _fpstate, ipoff); - OFFSET(SC_FP_CSSEL, _fpstate, cssel); - OFFSET(SC_FP_DATAOFF, _fpstate, dataoff); - OFFSET(SC_FP_DATASEL, _fpstate, datasel); - OFFSET(SC_FP_ST, _fpstate, _st); - OFFSET(SC_FXSR_ENV, _fpstate, _fxsr_env); + OFFSET(HOST_SC_IP, sigcontext, eip); + OFFSET(HOST_SC_SP, sigcontext, esp); + OFFSET(HOST_SC_FS, sigcontext, fs); + OFFSET(HOST_SC_GS, sigcontext, gs); + OFFSET(HOST_SC_DS, sigcontext, ds); + OFFSET(HOST_SC_ES, sigcontext, es); + OFFSET(HOST_SC_SS, sigcontext, ss); + OFFSET(HOST_SC_CS, sigcontext, cs); + OFFSET(HOST_SC_EFLAGS, sigcontext, eflags); + OFFSET(HOST_SC_EAX, sigcontext, eax); + OFFSET(HOST_SC_EBX, sigcontext, ebx); + OFFSET(HOST_SC_ECX, sigcontext, ecx); + OFFSET(HOST_SC_EDX, sigcontext, edx); + OFFSET(HOST_SC_EDI, sigcontext, edi); + OFFSET(HOST_SC_ESI, sigcontext, esi); + OFFSET(HOST_SC_EBP, sigcontext, ebp); + OFFSET(HOST_SC_TRAPNO, sigcontext, trapno); + OFFSET(HOST_SC_ERR, sigcontext, err); + OFFSET(HOST_SC_CR2, sigcontext, cr2); + OFFSET(HOST_SC_FPSTATE, sigcontext, fpstate); + OFFSET(HOST_SC_SIGMASK, sigcontext, oldmask); + OFFSET(HOST_SC_FP_CW, _fpstate, cw); + OFFSET(HOST_SC_FP_SW, _fpstate, sw); + OFFSET(HOST_SC_FP_TAG, _fpstate, tag); + OFFSET(HOST_SC_FP_IPOFF, _fpstate, ipoff); + OFFSET(HOST_SC_FP_CSSEL, _fpstate, cssel); + OFFSET(HOST_SC_FP_DATAOFF, _fpstate, dataoff); + OFFSET(HOST_SC_FP_DATASEL, _fpstate, datasel); + OFFSET(HOST_SC_FP_ST, _fpstate, _st); + OFFSET(HOST_SC_FXSR_ENV, _fpstate, _fxsr_env); DEFINE(HOST_FRAME_SIZE, FRAME_SIZE); - DEFINE(HOST_FP_SIZE, - sizeof(struct user_i387_struct) / sizeof(unsigned long)); - DEFINE(HOST_XFP_SIZE, - sizeof(struct user_fxsr_struct) / sizeof(unsigned long)); + DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_i387_struct)); + DEFINE_LONGS(HOST_XFP_SIZE, sizeof(struct user_fxsr_struct)); DEFINE(HOST_IP, EIP); DEFINE(HOST_SP, UESP); @@ -65,5 +66,5 @@ void foo(void) DEFINE(HOST_FS, FS); DEFINE(HOST_ES, ES); DEFINE(HOST_GS, GS); - DEFINE(__UM_FRAME_SIZE, sizeof(struct user_regs_struct)); + DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct)); } diff --git a/arch/um/sys-i386/util/Makefile b/arch/um/sys-i386/util/Makefile deleted file mode 100644 index bf61afd0b045..000000000000 --- a/arch/um/sys-i386/util/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -hostprogs-y := mk_sc mk_thread -always := $(hostprogs-y) - -HOSTCFLAGS_mk_sc.o := -I$(objtree)/arch/um -HOSTCFLAGS_mk_thread.o := -I$(objtree)/arch/um diff --git a/arch/um/sys-i386/util/mk_sc.c b/arch/um/sys-i386/util/mk_sc.c deleted file mode 100644 index 04c0d73433aa..000000000000 --- a/arch/um/sys-i386/util/mk_sc.c +++ /dev/null @@ -1,51 +0,0 @@ -#include -#include - -#define SC_OFFSET(name, field) \ - printf("#define " #name "(sc) *((unsigned long *) &(((char *) (sc))[%d]))\n",\ - name) - -#define SC_FP_OFFSET(name, field) \ - printf("#define " #name \ - "(sc) *((unsigned long *) &(((char *) (SC_FPSTATE(sc)))[%d]))\n",\ - name) - -#define SC_FP_OFFSET_PTR(name, field, type) \ - printf("#define " #name \ - "(sc) ((" type " *) &(((char *) (SC_FPSTATE(sc)))[%d]))\n",\ - name) - -int main(int argc, char **argv) -{ - SC_OFFSET(SC_IP, eip); - SC_OFFSET(SC_SP, esp); - SC_OFFSET(SC_FS, fs); - SC_OFFSET(SC_GS, gs); - SC_OFFSET(SC_DS, ds); - SC_OFFSET(SC_ES, es); - SC_OFFSET(SC_SS, ss); - SC_OFFSET(SC_CS, cs); - SC_OFFSET(SC_EFLAGS, eflags); - SC_OFFSET(SC_EAX, eax); - SC_OFFSET(SC_EBX, ebx); - SC_OFFSET(SC_ECX, ecx); - SC_OFFSET(SC_EDX, edx); - SC_OFFSET(SC_EDI, edi); - SC_OFFSET(SC_ESI, esi); - SC_OFFSET(SC_EBP, ebp); - SC_OFFSET(SC_TRAPNO, trapno); - SC_OFFSET(SC_ERR, err); - SC_OFFSET(SC_CR2, cr2); - SC_OFFSET(SC_FPSTATE, fpstate); - SC_OFFSET(SC_SIGMASK, oldmask); - SC_FP_OFFSET(SC_FP_CW, cw); - SC_FP_OFFSET(SC_FP_SW, sw); - SC_FP_OFFSET(SC_FP_TAG, tag); - SC_FP_OFFSET(SC_FP_IPOFF, ipoff); - SC_FP_OFFSET(SC_FP_CSSEL, cssel); - SC_FP_OFFSET(SC_FP_DATAOFF, dataoff); - SC_FP_OFFSET(SC_FP_DATASEL, datasel); - SC_FP_OFFSET_PTR(SC_FP_ST, _st, "struct _fpstate"); - SC_FP_OFFSET_PTR(SC_FXSR_ENV, _fxsr_env, "void"); - return(0); -} diff --git a/arch/um/sys-i386/util/mk_thread.c b/arch/um/sys-i386/util/mk_thread.c deleted file mode 100644 index 7470d0dda67e..000000000000 --- a/arch/um/sys-i386/util/mk_thread.c +++ /dev/null @@ -1,22 +0,0 @@ -#include -#include - -int main(int argc, char **argv) -{ - printf("/*\n"); - printf(" * Generated by mk_thread\n"); - printf(" */\n"); - printf("\n"); - printf("#ifndef __UM_THREAD_H\n"); - printf("#define __UM_THREAD_H\n"); - printf("\n"); - printf("#define TASK_DEBUGREGS(task) ((unsigned long *) " - "&(((char *) (task))[%d]))\n", TASK_DEBUGREGS); -#ifdef TASK_EXTERN_PID - printf("#define TASK_EXTERN_PID(task) *((int *) &(((char *) (task))[%d]))\n", - TASK_EXTERN_PID); -#endif - printf("\n"); - printf("#endif\n"); - return(0); -} diff --git a/arch/um/sys-x86_64/Makefile b/arch/um/sys-x86_64/Makefile index f0ab574d1e95..06c3633457a2 100644 --- a/arch/um/sys-x86_64/Makefile +++ b/arch/um/sys-x86_64/Makefile @@ -29,6 +29,4 @@ module.c-dir = kernel $(obj)/stub_segv.o: _c_flags = $(call unprofile,$(CFLAGS)) -subdir- := util - include arch/um/scripts/Makefile.unmap diff --git a/arch/um/sys-x86_64/kernel-offsets.c b/arch/um/sys-x86_64/kernel-offsets.c index 998541eade41..bfcb104b846e 100644 --- a/arch/um/sys-x86_64/kernel-offsets.c +++ b/arch/um/sys-x86_64/kernel-offsets.c @@ -19,7 +19,7 @@ void foo(void) { #ifdef CONFIG_MODE_TT - OFFSET(TASK_EXTERN_PID, task_struct, thread.mode.tt.extern_pid); + OFFSET(HOST_TASK_EXTERN_PID, task_struct, thread.mode.tt.extern_pid); #endif #include } diff --git a/arch/um/sys-x86_64/stub_segv.c b/arch/um/sys-x86_64/stub_segv.c index 65a131b362b6..d1e53bdf2e85 100644 --- a/arch/um/sys-x86_64/stub_segv.c +++ b/arch/um/sys-x86_64/stub_segv.c @@ -10,6 +10,22 @@ #include "uml-config.h" #include "sysdep/sigcontext.h" #include "sysdep/faultinfo.h" +#include + +/* Copied from sys-x86_64/signal.c - Can't find an equivalent definition + * in the libc headers anywhere. + */ +struct rt_sigframe +{ + char *pretcode; + struct ucontext uc; + struct siginfo info; +}; + +/* Copied here from - we're userspace. */ +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) void __attribute__ ((__section__ (".__syscall_stub"))) stub_segv_handler(int sig) @@ -17,16 +33,19 @@ stub_segv_handler(int sig) struct ucontext *uc; __asm__("movq %%rdx, %0" : "=g" (uc) :); - GET_FAULTINFO_FROM_SC(*((struct faultinfo *) UML_CONFIG_STUB_DATA), - &uc->uc_mcontext); + GET_FAULTINFO_FROM_SC(*((struct faultinfo *) UML_CONFIG_STUB_DATA), + &uc->uc_mcontext); - __asm__("movq %0, %%rax ; syscall": : "g" (__NR_getpid)); + __asm__("movq %0, %%rax ; syscall": : "g" (__NR_getpid)); __asm__("movq %%rax, %%rdi ; movq %0, %%rax ; movq %1, %%rsi ;" - "syscall": : "g" (__NR_kill), "g" (SIGUSR1)); - /* Two popqs to restore the stack to the state just before entering - * the handler, one pops the return address, the other pops the frame - * pointer. + "syscall": : "g" (__NR_kill), "g" (SIGUSR1) : + "%rdi", "%rax", "%rsi"); + /* sys_sigreturn expects that the stack pointer will be 8 bytes into + * the signal frame. So, we use the ucontext pointer, which we know + * already, to get the signal frame pointer, and add 8 to that. */ - __asm__("popq %%rax ; popq %%rax ; movq %0, %%rax ; syscall" : : "g" - (__NR_rt_sigreturn)); + __asm__("movq %0, %%rsp": : + "g" ((unsigned long) container_of(uc, struct rt_sigframe, + uc) + 8)); + __asm__("movq %0, %%rax ; syscall" : : "g" (__NR_rt_sigreturn)); } diff --git a/arch/um/sys-x86_64/user-offsets.c b/arch/um/sys-x86_64/user-offsets.c index 513d17ceafd4..5a585bfbb8c2 100644 --- a/arch/um/sys-x86_64/user-offsets.c +++ b/arch/um/sys-x86_64/user-offsets.c @@ -16,71 +16,76 @@ typedef __u32 u32; #define DEFINE(sym, val) \ asm volatile("\n->" #sym " %0 " #val : : "i" (val)) +#define DEFINE_LONGS(sym, val) \ + asm volatile("\n->" #sym " %0 " #val : : "i" (val/sizeof(unsigned long))) + #define OFFSET(sym, str, mem) \ DEFINE(sym, offsetof(struct str, mem)); void foo(void) { - OFFSET(SC_RBX, sigcontext, rbx); - OFFSET(SC_RCX, sigcontext, rcx); - OFFSET(SC_RDX, sigcontext, rdx); - OFFSET(SC_RSI, sigcontext, rsi); - OFFSET(SC_RDI, sigcontext, rdi); - OFFSET(SC_RBP, sigcontext, rbp); - OFFSET(SC_RAX, sigcontext, rax); - OFFSET(SC_R8, sigcontext, r8); - OFFSET(SC_R9, sigcontext, r9); - OFFSET(SC_R10, sigcontext, r10); - OFFSET(SC_R11, sigcontext, r11); - OFFSET(SC_R12, sigcontext, r12); - OFFSET(SC_R13, sigcontext, r13); - OFFSET(SC_R14, sigcontext, r14); - OFFSET(SC_R15, sigcontext, r15); - OFFSET(SC_IP, sigcontext, rip); - OFFSET(SC_SP, sigcontext, rsp); - OFFSET(SC_CR2, sigcontext, cr2); - OFFSET(SC_ERR, sigcontext, err); - OFFSET(SC_TRAPNO, sigcontext, trapno); - OFFSET(SC_CS, sigcontext, cs); - OFFSET(SC_FS, sigcontext, fs); - OFFSET(SC_GS, sigcontext, gs); - OFFSET(SC_EFLAGS, sigcontext, eflags); - OFFSET(SC_SIGMASK, sigcontext, oldmask); + OFFSET(HOST_SC_RBX, sigcontext, rbx); + OFFSET(HOST_SC_RCX, sigcontext, rcx); + OFFSET(HOST_SC_RDX, sigcontext, rdx); + OFFSET(HOST_SC_RSI, sigcontext, rsi); + OFFSET(HOST_SC_RDI, sigcontext, rdi); + OFFSET(HOST_SC_RBP, sigcontext, rbp); + OFFSET(HOST_SC_RAX, sigcontext, rax); + OFFSET(HOST_SC_R8, sigcontext, r8); + OFFSET(HOST_SC_R9, sigcontext, r9); + OFFSET(HOST_SC_R10, sigcontext, r10); + OFFSET(HOST_SC_R11, sigcontext, r11); + OFFSET(HOST_SC_R12, sigcontext, r12); + OFFSET(HOST_SC_R13, sigcontext, r13); + OFFSET(HOST_SC_R14, sigcontext, r14); + OFFSET(HOST_SC_R15, sigcontext, r15); + OFFSET(HOST_SC_IP, sigcontext, rip); + OFFSET(HOST_SC_SP, sigcontext, rsp); + OFFSET(HOST_SC_CR2, sigcontext, cr2); + OFFSET(HOST_SC_ERR, sigcontext, err); + OFFSET(HOST_SC_TRAPNO, sigcontext, trapno); + OFFSET(HOST_SC_CS, sigcontext, cs); + OFFSET(HOST_SC_FS, sigcontext, fs); + OFFSET(HOST_SC_GS, sigcontext, gs); + OFFSET(HOST_SC_EFLAGS, sigcontext, eflags); + OFFSET(HOST_SC_SIGMASK, sigcontext, oldmask); #if 0 - OFFSET(SC_ORIG_RAX, sigcontext, orig_rax); - OFFSET(SC_DS, sigcontext, ds); - OFFSET(SC_ES, sigcontext, es); - OFFSET(SC_SS, sigcontext, ss); + OFFSET(HOST_SC_ORIG_RAX, sigcontext, orig_rax); + OFFSET(HOST_SC_DS, sigcontext, ds); + OFFSET(HOST_SC_ES, sigcontext, es); + OFFSET(HOST_SC_SS, sigcontext, ss); #endif - DEFINE(HOST_FRAME_SIZE, FRAME_SIZE); - DEFINE(HOST_RBX, RBX); - DEFINE(HOST_RCX, RCX); - DEFINE(HOST_RDI, RDI); - DEFINE(HOST_RSI, RSI); - DEFINE(HOST_RDX, RDX); - DEFINE(HOST_RBP, RBP); - DEFINE(HOST_RAX, RAX); - DEFINE(HOST_R8, R8); - DEFINE(HOST_R9, R9); - DEFINE(HOST_R10, R10); - DEFINE(HOST_R11, R11); - DEFINE(HOST_R12, R12); - DEFINE(HOST_R13, R13); - DEFINE(HOST_R14, R14); - DEFINE(HOST_R15, R15); - DEFINE(HOST_ORIG_RAX, ORIG_RAX); - DEFINE(HOST_CS, CS); - DEFINE(HOST_SS, SS); - DEFINE(HOST_EFLAGS, EFLAGS); + DEFINE_LONGS(HOST_FRAME_SIZE, FRAME_SIZE); + DEFINE(HOST_FP_SIZE, 0); + DEFINE(HOST_XFP_SIZE, 0); + DEFINE_LONGS(HOST_RBX, RBX); + DEFINE_LONGS(HOST_RCX, RCX); + DEFINE_LONGS(HOST_RDI, RDI); + DEFINE_LONGS(HOST_RSI, RSI); + DEFINE_LONGS(HOST_RDX, RDX); + DEFINE_LONGS(HOST_RBP, RBP); + DEFINE_LONGS(HOST_RAX, RAX); + DEFINE_LONGS(HOST_R8, R8); + DEFINE_LONGS(HOST_R9, R9); + DEFINE_LONGS(HOST_R10, R10); + DEFINE_LONGS(HOST_R11, R11); + DEFINE_LONGS(HOST_R12, R12); + DEFINE_LONGS(HOST_R13, R13); + DEFINE_LONGS(HOST_R14, R14); + DEFINE_LONGS(HOST_R15, R15); + DEFINE_LONGS(HOST_ORIG_RAX, ORIG_RAX); + DEFINE_LONGS(HOST_CS, CS); + DEFINE_LONGS(HOST_SS, SS); + DEFINE_LONGS(HOST_EFLAGS, EFLAGS); #if 0 - DEFINE(HOST_FS, FS); - DEFINE(HOST_GS, GS); - DEFINE(HOST_DS, DS); - DEFINE(HOST_ES, ES); + DEFINE_LONGS(HOST_FS, FS); + DEFINE_LONGS(HOST_GS, GS); + DEFINE_LONGS(HOST_DS, DS); + DEFINE_LONGS(HOST_ES, ES); #endif - DEFINE(HOST_IP, RIP); - DEFINE(HOST_SP, RSP); - DEFINE(__UM_FRAME_SIZE, sizeof(struct user_regs_struct)); + DEFINE_LONGS(HOST_IP, RIP); + DEFINE_LONGS(HOST_SP, RSP); + DEFINE(UM_FRAME_SIZE, sizeof(struct user_regs_struct)); } diff --git a/arch/um/sys-x86_64/util/Makefile b/arch/um/sys-x86_64/util/Makefile deleted file mode 100644 index 75b052cfc206..000000000000 --- a/arch/um/sys-x86_64/util/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -# Copyright 2003 - 2004 Pathscale, Inc -# Released under the GPL - -hostprogs-y := mk_sc mk_thread -always := $(hostprogs-y) - -HOSTCFLAGS_mk_sc.o := -I$(objtree)/arch/um -HOSTCFLAGS_mk_thread.o := -I$(objtree)/arch/um diff --git a/arch/um/sys-x86_64/util/mk_sc.c b/arch/um/sys-x86_64/util/mk_sc.c deleted file mode 100644 index 7619bc377c1f..000000000000 --- a/arch/um/sys-x86_64/util/mk_sc.c +++ /dev/null @@ -1,47 +0,0 @@ -/* Copyright (C) 2003 - 2004 PathScale, Inc - * Released under the GPL - */ - -#include -#include - -#define SC_OFFSET(name) \ - printf("#define " #name \ - "(sc) *((unsigned long *) &(((char *) (sc))[%d]))\n",\ - name) - -int main(int argc, char **argv) -{ - SC_OFFSET(SC_RBX); - SC_OFFSET(SC_RCX); - SC_OFFSET(SC_RDX); - SC_OFFSET(SC_RSI); - SC_OFFSET(SC_RDI); - SC_OFFSET(SC_RBP); - SC_OFFSET(SC_RAX); - SC_OFFSET(SC_R8); - SC_OFFSET(SC_R9); - SC_OFFSET(SC_R10); - SC_OFFSET(SC_R11); - SC_OFFSET(SC_R12); - SC_OFFSET(SC_R13); - SC_OFFSET(SC_R14); - SC_OFFSET(SC_R15); - SC_OFFSET(SC_IP); - SC_OFFSET(SC_SP); - SC_OFFSET(SC_CR2); - SC_OFFSET(SC_ERR); - SC_OFFSET(SC_TRAPNO); - SC_OFFSET(SC_CS); - SC_OFFSET(SC_FS); - SC_OFFSET(SC_GS); - SC_OFFSET(SC_EFLAGS); - SC_OFFSET(SC_SIGMASK); -#if 0 - SC_OFFSET(SC_ORIG_RAX); - SC_OFFSET(SC_DS); - SC_OFFSET(SC_ES); - SC_OFFSET(SC_SS); -#endif - return(0); -} diff --git a/arch/um/sys-x86_64/util/mk_thread.c b/arch/um/sys-x86_64/util/mk_thread.c deleted file mode 100644 index 15517396e9cf..000000000000 --- a/arch/um/sys-x86_64/util/mk_thread.c +++ /dev/null @@ -1,20 +0,0 @@ -#include -#include - -int main(int argc, char **argv) -{ - printf("/*\n"); - printf(" * Generated by mk_thread\n"); - printf(" */\n"); - printf("\n"); - printf("#ifndef __UM_THREAD_H\n"); - printf("#define __UM_THREAD_H\n"); - printf("\n"); -#ifdef TASK_EXTERN_PID - printf("#define TASK_EXTERN_PID(task) *((int *) &(((char *) (task))[%d]))\n", - TASK_EXTERN_PID); -#endif - printf("\n"); - printf("#endif\n"); - return(0); -} diff --git a/arch/um/util/Makefile b/arch/um/util/Makefile deleted file mode 100644 index 4c7551c28033..000000000000 --- a/arch/um/util/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -hostprogs-y := mk_task mk_constants -always := $(hostprogs-y) - -HOSTCFLAGS_mk_task.o := -I$(objtree)/arch/um -HOSTCFLAGS_mk_constants.o := -I$(objtree)/arch/um diff --git a/arch/um/util/mk_constants.c b/arch/um/util/mk_constants.c deleted file mode 100644 index ab217becc36a..000000000000 --- a/arch/um/util/mk_constants.c +++ /dev/null @@ -1,32 +0,0 @@ -#include -#include - -#define SHOW_INT(sym) printf("#define %s %d\n", #sym, sym) -#define SHOW_STR(sym) printf("#define %s %s\n", #sym, sym) - -int main(int argc, char **argv) -{ - printf("/*\n"); - printf(" * Generated by mk_constants\n"); - printf(" */\n"); - printf("\n"); - printf("#ifndef __UM_CONSTANTS_H\n"); - printf("#define __UM_CONSTANTS_H\n"); - printf("\n"); - - SHOW_INT(UM_KERN_PAGE_SIZE); - - SHOW_STR(UM_KERN_EMERG); - SHOW_STR(UM_KERN_ALERT); - SHOW_STR(UM_KERN_CRIT); - SHOW_STR(UM_KERN_ERR); - SHOW_STR(UM_KERN_WARNING); - SHOW_STR(UM_KERN_NOTICE); - SHOW_STR(UM_KERN_INFO); - SHOW_STR(UM_KERN_DEBUG); - - SHOW_INT(UM_NSEC_PER_SEC); - printf("\n"); - printf("#endif\n"); - return(0); -} diff --git a/arch/um/util/mk_task.c b/arch/um/util/mk_task.c deleted file mode 100644 index 36c9606505e2..000000000000 --- a/arch/um/util/mk_task.c +++ /dev/null @@ -1,30 +0,0 @@ -#include -#include - -void print_ptr(char *name, char *type, int offset) -{ - printf("#define %s(task) ((%s *) &(((char *) (task))[%d]))\n", name, type, - offset); -} - -void print(char *name, char *type, int offset) -{ - printf("#define %s(task) *((%s *) &(((char *) (task))[%d]))\n", name, type, - offset); -} - -int main(int argc, char **argv) -{ - printf("/*\n"); - printf(" * Generated by mk_task\n"); - printf(" */\n"); - printf("\n"); - printf("#ifndef __TASK_H\n"); - printf("#define __TASK_H\n"); - printf("\n"); - print_ptr("TASK_REGS", "union uml_pt_regs", TASK_REGS); - print("TASK_PID", "int", TASK_PID); - printf("\n"); - printf("#endif\n"); - return(0); -} diff --git a/arch/x86_64/ia32/ia32_signal.c b/arch/x86_64/ia32/ia32_signal.c index 66e2821533db..0903cc1faef2 100644 --- a/arch/x86_64/ia32/ia32_signal.c +++ b/arch/x86_64/ia32/ia32_signal.c @@ -425,7 +425,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) rsp = (unsigned long) ka->sa.sa_restorer; } - return (void __user *)((rsp - frame_size) & -8UL); + rsp -= frame_size; + /* Align the stack pointer according to the i386 ABI, + * i.e. so that on function entry ((sp + 4) & 15) == 0. */ + rsp = ((rsp + 4) & -16ul) - 4; + return (void __user *) rsp; } int ia32_setup_frame(int sig, struct k_sigaction *ka, diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S index 4592bf21fcaf..b92e5f45ed46 100644 --- a/arch/x86_64/kernel/head.S +++ b/arch/x86_64/kernel/head.S @@ -270,26 +270,26 @@ ENTRY(level3_kernel_pgt) .org 0x4000 ENTRY(level2_ident_pgt) /* 40MB for bootup. */ - .quad 0x0000000000000183 - .quad 0x0000000000200183 - .quad 0x0000000000400183 - .quad 0x0000000000600183 - .quad 0x0000000000800183 - .quad 0x0000000000A00183 - .quad 0x0000000000C00183 - .quad 0x0000000000E00183 - .quad 0x0000000001000183 - .quad 0x0000000001200183 - .quad 0x0000000001400183 - .quad 0x0000000001600183 - .quad 0x0000000001800183 - .quad 0x0000000001A00183 - .quad 0x0000000001C00183 - .quad 0x0000000001E00183 - .quad 0x0000000002000183 - .quad 0x0000000002200183 - .quad 0x0000000002400183 - .quad 0x0000000002600183 + .quad 0x0000000000000083 + .quad 0x0000000000200083 + .quad 0x0000000000400083 + .quad 0x0000000000600083 + .quad 0x0000000000800083 + .quad 0x0000000000A00083 + .quad 0x0000000000C00083 + .quad 0x0000000000E00083 + .quad 0x0000000001000083 + .quad 0x0000000001200083 + .quad 0x0000000001400083 + .quad 0x0000000001600083 + .quad 0x0000000001800083 + .quad 0x0000000001A00083 + .quad 0x0000000001C00083 + .quad 0x0000000001E00083 + .quad 0x0000000002000083 + .quad 0x0000000002200083 + .quad 0x0000000002400083 + .quad 0x0000000002600083 /* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */ .globl temp_boot_pmds temp_boot_pmds: diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index df08c43276a0..76a28b007be9 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c @@ -77,9 +77,9 @@ static inline int is_IF_modifier(kprobe_opcode_t *insn) int __kprobes arch_prepare_kprobe(struct kprobe *p) { /* insn: must be on special executable page on x86_64. */ - up(&kprobe_mutex); - p->ainsn.insn = get_insn_slot(); down(&kprobe_mutex); + p->ainsn.insn = get_insn_slot(); + up(&kprobe_mutex); if (!p->ainsn.insn) { return -ENOMEM; } @@ -231,9 +231,9 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - up(&kprobe_mutex); - free_insn_slot(p->ainsn.insn); down(&kprobe_mutex); + free_insn_slot(p->ainsn.insn); + up(&kprobe_mutex); } static inline void save_previous_kprobe(void) diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c index 08203b07f4bd..69541db5ff2c 100644 --- a/arch/x86_64/kernel/mce.c +++ b/arch/x86_64/kernel/mce.c @@ -54,9 +54,12 @@ void mce_log(struct mce *mce) { unsigned next, entry; mce->finished = 0; - smp_wmb(); + wmb(); for (;;) { entry = rcu_dereference(mcelog.next); + /* The rmb forces the compiler to reload next in each + iteration */ + rmb(); for (;;) { /* When the buffer fills up discard new entries. Assume that the earlier errors are the more interesting. */ @@ -69,6 +72,7 @@ void mce_log(struct mce *mce) entry++; continue; } + break; } smp_rmb(); next = entry + 1; @@ -76,9 +80,9 @@ void mce_log(struct mce *mce) break; } memcpy(mcelog.entry + entry, mce, sizeof(struct mce)); - smp_wmb(); + wmb(); mcelog.entry[entry].finished = 1; - smp_wmb(); + wmb(); if (!test_and_set_bit(0, &console_logged)) notify_user = 1; diff --git a/arch/x86_64/kernel/pci-gart.c b/arch/x86_64/kernel/pci-gart.c index cf0a0315d586..88be97c96987 100644 --- a/arch/x86_64/kernel/pci-gart.c +++ b/arch/x86_64/kernel/pci-gart.c @@ -187,7 +187,7 @@ static void flush_gart(struct device *dev) /* Allocate DMA memory on node near device */ noinline -static void *dma_alloc_pages(struct device *dev, unsigned gfp, unsigned order) +static void *dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order) { struct page *page; int node; @@ -204,7 +204,7 @@ static void *dma_alloc_pages(struct device *dev, unsigned gfp, unsigned order) */ void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - unsigned gfp) + gfp_t gfp) { void *memory; unsigned long dma_mask = 0; diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c index 67d90b89af0b..5a981dca87ff 100644 --- a/arch/x86_64/kernel/pci-nommu.c +++ b/arch/x86_64/kernel/pci-nommu.c @@ -24,7 +24,7 @@ EXPORT_SYMBOL(iommu_sac_force); */ void *dma_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, unsigned gfp) + dma_addr_t *dma_handle, gfp_t gfp) { void *ret; u64 mask; diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 238f73e1a834..cb28df14ff6f 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -831,8 +831,6 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c) #endif } -#define HWCR 0xc0010015 - static int __init init_amd(struct cpuinfo_x86 *c) { int r; @@ -841,14 +839,18 @@ static int __init init_amd(struct cpuinfo_x86 *c) #ifdef CONFIG_SMP unsigned long value; - // Disable TLB flush filter by setting HWCR.FFDIS: - // bit 6 of msr C001_0015 - // - // Errata 63 for SH-B3 steppings - // Errata 122 for all(?) steppings - rdmsrl(HWCR, value); - value |= 1 << 6; - wrmsrl(HWCR, value); + /* + * Disable TLB flush filter by setting HWCR.FFDIS on K8 + * bit 6 of msr C001_0015 + * + * Errata 63 for SH-B3 steppings + * Errata 122 for all steppings (F+ have it disabled by default) + */ + if (c->x86 == 15) { + rdmsrl(MSR_K8_HWCR, value); + value |= 1 << 6; + wrmsrl(MSR_K8_HWCR, value); + } #endif /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; @@ -965,13 +967,12 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) static void srat_detect_node(void) { #ifdef CONFIG_NUMA - unsigned apicid, node; + unsigned node; int cpu = smp_processor_id(); /* Don't do the funky fallback heuristics the AMD version employs for now. */ - apicid = phys_proc_id[cpu]; - node = apicid_to_node[apicid]; + node = apicid_to_node[hard_smp_processor_id()]; if (node == NUMA_NO_NODE) node = 0; cpu_to_node[cpu] = node; diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c index bd33be24a386..79190891fbc5 100644 --- a/arch/x86_64/kernel/setup64.c +++ b/arch/x86_64/kernel/setup64.c @@ -87,6 +87,10 @@ void __init setup_per_cpu_areas(void) int i; unsigned long size; +#ifdef CONFIG_HOTPLUG_CPU + prefill_possible_map(); +#endif + /* Copy section for each CPU (we discard the original) */ size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); #ifdef CONFIG_MODULES diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index e12d7baeb33e..658a81b33f3b 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c @@ -892,7 +892,7 @@ static __init void disable_smp(void) * those NR_CPUS, hence cpu_possible_map represents entire NR_CPUS range. * - Ashok Raj */ -static void prefill_possible_map(void) +__init void prefill_possible_map(void) { int i; for (i = 0; i < NR_CPUS; i++) @@ -967,10 +967,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) current_cpu_data = boot_cpu_data; current_thread_info()->cpu = 0; /* needed? */ -#ifdef CONFIG_HOTPLUG_CPU - prefill_possible_map(); -#endif - if (smp_sanity_check(max_cpus) < 0) { printk(KERN_INFO "SMP disabled\n"); disable_smp(); diff --git a/arch/x86_64/kernel/suspend.c b/arch/x86_64/kernel/suspend.c index ebb9abf3ce6d..f066c6ab3618 100644 --- a/arch/x86_64/kernel/suspend.c +++ b/arch/x86_64/kernel/suspend.c @@ -11,6 +11,8 @@ #include #include #include +#include +#include struct saved_context saved_context; @@ -140,4 +142,129 @@ void fix_processor_context(void) } +#ifdef CONFIG_SOFTWARE_SUSPEND +/* Defined in arch/x86_64/kernel/suspend_asm.S */ +extern int restore_image(void); +pgd_t *temp_level4_pgt; + +static void **pages; + +static inline void *__add_page(void) +{ + void **c; + + c = (void **)get_usable_page(GFP_ATOMIC); + if (c) { + *c = pages; + pages = c; + } + return c; +} + +static inline void *__next_page(void) +{ + void **c; + + c = pages; + if (c) { + pages = *c; + *c = NULL; + } + return c; +} + +/* + * Try to allocate as many usable pages as needed and daisy chain them. + * If one allocation fails, free the pages allocated so far + */ +static int alloc_usable_pages(unsigned long n) +{ + void *p; + + pages = NULL; + do + if (!__add_page()) + break; + while (--n); + if (n) { + p = __next_page(); + while (p) { + free_page((unsigned long)p); + p = __next_page(); + } + return -ENOMEM; + } + return 0; +} + +static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) +{ + long i, j; + + i = pud_index(address); + pud = pud + i; + for (; i < PTRS_PER_PUD; pud++, i++) { + unsigned long paddr; + pmd_t *pmd; + + paddr = address + i*PUD_SIZE; + if (paddr >= end) + break; + + pmd = (pmd_t *)__next_page(); + set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); + for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) { + unsigned long pe; + + if (paddr >= end) + break; + pe = _PAGE_NX | _PAGE_PSE | _KERNPG_TABLE | paddr; + pe &= __supported_pte_mask; + set_pmd(pmd, __pmd(pe)); + } + } +} + +static void set_up_temporary_mappings(void) +{ + unsigned long start, end, next; + + temp_level4_pgt = (pgd_t *)__next_page(); + + /* It is safe to reuse the original kernel mapping */ + set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map), + init_level4_pgt[pgd_index(__START_KERNEL_map)]); + + /* Set up the direct mapping from scratch */ + start = (unsigned long)pfn_to_kaddr(0); + end = (unsigned long)pfn_to_kaddr(end_pfn); + + for (; start < end; start = next) { + pud_t *pud = (pud_t *)__next_page(); + next = start + PGDIR_SIZE; + if (next > end) + next = end; + res_phys_pud_init(pud, __pa(start), __pa(next)); + set_pgd(temp_level4_pgt + pgd_index(start), + mk_kernel_pgd(__pa(pud))); + } +} + +int swsusp_arch_resume(void) +{ + unsigned long n; + + n = ((end_pfn << PAGE_SHIFT) + PUD_SIZE - 1) >> PUD_SHIFT; + n += (n + PTRS_PER_PUD - 1) / PTRS_PER_PUD + 1; + pr_debug("swsusp_arch_resume(): pages needed = %lu\n", n); + if (alloc_usable_pages(n)) { + free_eaten_memory(); + return -ENOMEM; + } + /* We have got enough memory and from now on we cannot recover */ + set_up_temporary_mappings(); + restore_image(); + return 0; +} +#endif /* CONFIG_SOFTWARE_SUSPEND */ diff --git a/arch/x86_64/kernel/suspend_asm.S b/arch/x86_64/kernel/suspend_asm.S index 4d659e97df10..320b6fb00cca 100644 --- a/arch/x86_64/kernel/suspend_asm.S +++ b/arch/x86_64/kernel/suspend_asm.S @@ -39,12 +39,13 @@ ENTRY(swsusp_arch_suspend) call swsusp_save ret -ENTRY(swsusp_arch_resume) - /* set up cr3 */ - leaq init_level4_pgt(%rip),%rax - subq $__START_KERNEL_map,%rax - movq %rax,%cr3 - +ENTRY(restore_image) + /* switch to temporary page tables */ + movq $__PAGE_OFFSET, %rdx + movq temp_level4_pgt(%rip), %rax + subq %rdx, %rax + movq %rax, %cr3 + /* Flush TLB */ movq mmu_cr4_features(%rip), %rax movq %rax, %rdx andq $~(1<<7), %rdx # PGE @@ -69,6 +70,10 @@ loop: movq pbe_next(%rdx), %rdx jmp loop done: + /* go back to the original page tables */ + leaq init_level4_pgt(%rip), %rax + subq $__START_KERNEL_map, %rax + movq %rax, %cr3 /* Flush TLB, including "global" things (vmalloc) */ movq mmu_cr4_features(%rip), %rax movq %rax, %rdx diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index 2373cb8b8625..703acde2a1a5 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c @@ -959,9 +959,6 @@ static __init int unsynchronized_tsc(void) are handled in the OEM check above. */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) return 0; - /* All in a single socket - should be synchronized */ - if (cpus_weight(cpu_core_map[0]) == num_online_cpus()) - return 0; #endif /* Assume multi socket systems are not synchronized */ return num_online_cpus() > 1; diff --git a/arch/x86_64/mm/numa.c b/arch/x86_64/mm/numa.c index 80a49d9bd8a7..214803821001 100644 --- a/arch/x86_64/mm/numa.c +++ b/arch/x86_64/mm/numa.c @@ -167,18 +167,16 @@ void __init numa_init_array(void) mapping. To avoid this fill in the mapping for all possible CPUs, as the number of CPUs is not known yet. We round robin the existing nodes. */ - rr = 0; + rr = first_node(node_online_map); for (i = 0; i < NR_CPUS; i++) { if (cpu_to_node[i] != NUMA_NO_NODE) continue; + cpu_to_node[i] = rr; rr = next_node(rr, node_online_map); if (rr == MAX_NUMNODES) rr = first_node(node_online_map); - cpu_to_node[i] = rr; - rr++; } - set_bit(0, &node_to_cpumask[cpu_to_node(0)]); } #ifdef CONFIG_NUMA_EMU @@ -266,9 +264,7 @@ void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn) __cpuinit void numa_add_cpu(int cpu) { - /* BP is initialized elsewhere */ - if (cpu) - set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]); + set_bit(cpu, &node_to_cpumask[cpu_to_node(cpu)]); } unsigned long __init numa_free_all_bootmem(void) diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c index 94862e1ec032..b90e8fe9eeb0 100644 --- a/arch/x86_64/mm/pageattr.c +++ b/arch/x86_64/mm/pageattr.c @@ -220,8 +220,6 @@ void global_flush_tlb(void) down_read(&init_mm.mmap_sem); df = xchg(&df_list, NULL); up_read(&init_mm.mmap_sem); - if (!df) - return; flush_map((df && !df->next) ? df->address : 0); for (; df; df = next_df) { next_df = df->next; diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 84fde258cf85..1ff82268e8ea 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -29,7 +29,7 @@ */ void * -dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp) +dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) { void *ret; diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c index 09887c96e9a1..de19501aa809 100644 --- a/arch/xtensa/kernel/pci.c +++ b/arch/xtensa/kernel/pci.c @@ -402,8 +402,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, __pci_mmap_set_flags(dev, vma, mmap_state); __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine); - ret = io_remap_page_range(vma, vma->vm_start, vma->vm_pgoff<vm_end - vma->vm_start, vma->vm_page_prot); + ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start,vma->vm_page_prot); return ret; } diff --git a/arch/xtensa/kernel/platform.c b/arch/xtensa/kernel/platform.c index cf1362784443..03674daabc66 100644 --- a/arch/xtensa/kernel/platform.c +++ b/arch/xtensa/kernel/platform.c @@ -39,7 +39,7 @@ _F(int, pcibios_fixup, (void), { return 0; }); _F(int, get_rtc_time, (time_t* t), { return 0; }); _F(int, set_rtc_time, (time_t t), { return 0; }); -#if CONFIG_XTENSA_CALIBRATE_CCOUNT +#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT _F(void, calibrate_ccount, (void), { printk ("ERROR: Cannot calibrate cpu frequency! Assuming 100MHz.\n"); diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index c83bb0d41787..08ef6d82ee51 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -457,7 +457,7 @@ int dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r) { /* see asm/coprocessor.h for this magic number 16 */ -#if TOTAL_CPEXTRA_SIZE > 16 +#if XTENSA_CP_EXTRA_SIZE > 16 do_save_fpregs (r, regs, task); /* For now, bit 16 means some extra state may be present: */ diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 1f5bf5d624e4..513ed8d67766 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c @@ -304,7 +304,7 @@ void __init setup_arch(char **cmdline_p) # endif #endif -#if CONFIG_PCI +#ifdef CONFIG_PCI platform_pcibios_init(); #endif } diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index dc42cede9394..e252b61e45a5 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -182,7 +182,7 @@ restore_cpextra (struct _cpstate *buf) struct task_struct *tsk = current; release_all_cp(tsk); - return __copy_from_user(tsk->thread.cpextra, buf, TOTAL_CPEXTRA_SIZE); + return __copy_from_user(tsk->thread.cpextra, buf, XTENSA_CP_EXTRA_SIZE); #endif return 0; } diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index 1ac7d5ce7456..8e423d1335ce 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c @@ -68,7 +68,7 @@ void __init time_init(void) * speed for the CALIBRATE. */ -#if CONFIG_XTENSA_CALIBRATE_CCOUNT +#ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT printk("Calibrating CPU frequency "); platform_calibrate_ccount(); printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ), diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 56aace84aaeb..5a91d6c9e66d 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -239,7 +239,7 @@ void __init mem_init(void) high_memory = (void *) __va(max_mapnr << PAGE_SHIFT); highmemsize = 0; -#if CONFIG_HIGHMEM +#ifdef CONFIG_HIGHMEM #error HIGHGMEM not implemented in init.c #endif diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c index bfa8b76de403..2dbb1b0f11d5 100644 --- a/drivers/acpi/event.c +++ b/drivers/acpi/event.c @@ -58,9 +58,8 @@ acpi_system_read_event(struct file *file, char __user * buffer, size_t count, return_VALUE(-EAGAIN); result = acpi_bus_receive_event(&event); - if (result) { - return_VALUE(-EIO); - } + if (result) + return_VALUE(result); chars_remaining = sprintf(str, "%s %s %08x %08x\n", event.device_class ? event. diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index e36c5da2b31a..3937adf4e5e5 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -96,7 +96,7 @@ struct acpi_find_pci_root { static acpi_status do_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) { - int *busnr = (int *)data; + unsigned long *busnr = (unsigned long *)data; struct acpi_resource_address64 address; if (resource->id != ACPI_RSTYPE_ADDRESS16 && @@ -115,13 +115,13 @@ do_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) static int get_root_bridge_busnr(acpi_handle handle) { acpi_status status; - int bus, bbn; + unsigned long bus, bbn; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); status = acpi_evaluate_integer(handle, METHOD_NAME__BBN, NULL, - (unsigned long *)&bbn); + &bbn); if (status == AE_NOT_FOUND) { /* Assume bus = 0 */ printk(KERN_INFO PREFIX @@ -153,7 +153,7 @@ static int get_root_bridge_busnr(acpi_handle handle) } exit: acpi_os_free(buffer.pointer); - return bbn; + return (int)bbn; } static acpi_status diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index d74a7c5e75dd..4b6bf19c39c0 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c @@ -795,7 +795,7 @@ static void drain_rx_pools (amb_dev * dev) { } static inline void fill_rx_pool (amb_dev * dev, unsigned char pool, - unsigned int __nocast priority) + gfp_t priority) { rx_in rx; amb_rxq * rxq; diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 58219744f5db..7f7ec288824d 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -1374,8 +1374,7 @@ static void reset_chip (struct fs_dev *dev) } } -static void __devinit *aligned_kmalloc (int size, unsigned int __nocast flags, - int alignment) +static void __devinit *aligned_kmalloc (int size, gfp_t flags, int alignment) { void *t; @@ -1466,7 +1465,7 @@ static inline int nr_buffers_in_freepool (struct fs_dev *dev, struct freepool *f working again after that... -- REW */ static void top_off_fp (struct fs_dev *dev, struct freepool *fp, - unsigned int __nocast gfp_flags) + gfp_t gfp_flags) { struct FS_BPENTRY *qe, *ne; struct sk_buff *skb; diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index 2bf723a7b6e6..14f6a6201da3 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -178,14 +178,12 @@ fore200e_irq_itoa(int irq) static void* -fore200e_kmalloc(int size, int flags) +fore200e_kmalloc(int size, gfp_t flags) { - void* chunk = kmalloc(size, flags); + void *chunk = kzalloc(size, flags); - if (chunk) - memset(chunk, 0x00, size); - else - printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags); + if (!chunk) + printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags); return chunk; } diff --git a/drivers/base/class.c b/drivers/base/class.c index 3b112e3542f8..ce23dc8c18c5 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -669,6 +669,7 @@ void class_device_destroy(struct class *cls, dev_t devt) int class_device_rename(struct class_device *class_dev, char *new_name) { int error = 0; + char *old_class_name = NULL, *new_class_name = NULL; class_dev = class_device_get(class_dev); if (!class_dev) @@ -677,12 +678,24 @@ int class_device_rename(struct class_device *class_dev, char *new_name) pr_debug("CLASS: renaming '%s' to '%s'\n", class_dev->class_id, new_name); + if (class_dev->dev) + old_class_name = make_class_name(class_dev); + strlcpy(class_dev->class_id, new_name, KOBJ_NAME_LEN); error = kobject_rename(&class_dev->kobj, new_name); + if (class_dev->dev) { + new_class_name = make_class_name(class_dev); + sysfs_create_link(&class_dev->dev->kobj, &class_dev->kobj, + new_class_name); + sysfs_remove_link(&class_dev->dev->kobj, old_class_name); + } class_device_put(class_dev); + kfree(old_class_name); + kfree(new_class_name); + return error; } diff --git a/drivers/base/dd.c b/drivers/base/dd.c index d5bbce38282f..3565e9795301 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -40,6 +40,9 @@ */ void device_bind_driver(struct device * dev) { + if (klist_node_attached(&dev->knode_driver)) + return; + pr_debug("bound device '%s' to driver '%s'\n", dev->bus_id, dev->driver->name); klist_add_tail(&dev->knode_driver, &dev->driver->klist_devices); diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c index 60a7ef6a201b..e2f64f91ed05 100644 --- a/drivers/base/dmapool.c +++ b/drivers/base/dmapool.c @@ -156,7 +156,7 @@ dma_pool_create (const char *name, struct device *dev, static struct dma_page * -pool_alloc_page (struct dma_pool *pool, unsigned int __nocast mem_flags) +pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags) { struct dma_page *page; int mapsize; @@ -262,8 +262,7 @@ dma_pool_destroy (struct dma_pool *pool) * If such a memory block can't be allocated, null is returned. */ void * -dma_pool_alloc (struct dma_pool *pool, unsigned int __nocast mem_flags, - dma_addr_t *handle) +dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) { unsigned long flags; struct dma_page *page; diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c index 95c0a3690b0f..4081c36c8c19 100644 --- a/drivers/block/as-iosched.c +++ b/drivers/block/as-iosched.c @@ -98,7 +98,6 @@ struct as_data { struct as_rq *next_arq[2]; /* next in sort order */ sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */ - struct list_head *dispatch; /* driver dispatch queue */ struct list_head *hash; /* request hash */ unsigned long exit_prob; /* probability a task will exit while @@ -239,6 +238,25 @@ static struct io_context *as_get_io_context(void) return ioc; } +static void as_put_io_context(struct as_rq *arq) +{ + struct as_io_context *aic; + + if (unlikely(!arq->io_context)) + return; + + aic = arq->io_context->aic; + + if (arq->is_sync == REQ_SYNC && aic) { + spin_lock(&aic->lock); + set_bit(AS_TASK_IORUNNING, &aic->state); + aic->last_end_request = jiffies; + spin_unlock(&aic->lock); + } + + put_io_context(arq->io_context); +} + /* * the back merge hash support functions */ @@ -261,14 +279,6 @@ static inline void as_del_arq_hash(struct as_rq *arq) __as_del_arq_hash(arq); } -static void as_remove_merge_hints(request_queue_t *q, struct as_rq *arq) -{ - as_del_arq_hash(arq); - - if (q->last_merge == arq->request) - q->last_merge = NULL; -} - static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq) { struct request *rq = arq->request; @@ -312,7 +322,7 @@ static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset) BUG_ON(!arq->on_hash); if (!rq_mergeable(__rq)) { - as_remove_merge_hints(ad->q, arq); + as_del_arq_hash(arq); continue; } @@ -950,23 +960,12 @@ static void as_completed_request(request_queue_t *q, struct request *rq) WARN_ON(!list_empty(&rq->queuelist)); - if (arq->state == AS_RQ_PRESCHED) { - WARN_ON(arq->io_context); - goto out; - } - - if (arq->state == AS_RQ_MERGED) - goto out_ioc; - if (arq->state != AS_RQ_REMOVED) { printk("arq->state %d\n", arq->state); WARN_ON(1); goto out; } - if (!blk_fs_request(rq)) - goto out; - if (ad->changed_batch && ad->nr_dispatched == 1) { kblockd_schedule_work(&ad->antic_work); ad->changed_batch = 0; @@ -1001,21 +1000,7 @@ static void as_completed_request(request_queue_t *q, struct request *rq) } } -out_ioc: - if (!arq->io_context) - goto out; - - if (arq->is_sync == REQ_SYNC) { - struct as_io_context *aic = arq->io_context->aic; - if (aic) { - spin_lock(&aic->lock); - set_bit(AS_TASK_IORUNNING, &aic->state); - aic->last_end_request = jiffies; - spin_unlock(&aic->lock); - } - } - - put_io_context(arq->io_context); + as_put_io_context(arq); out: arq->state = AS_RQ_POSTSCHED; } @@ -1047,72 +1032,10 @@ static void as_remove_queued_request(request_queue_t *q, struct request *rq) ad->next_arq[data_dir] = as_find_next_arq(ad, arq); list_del_init(&arq->fifo); - as_remove_merge_hints(q, arq); + as_del_arq_hash(arq); as_del_arq_rb(ad, arq); } -/* - * as_remove_dispatched_request is called to remove a request which has gone - * to the dispatch list. - */ -static void as_remove_dispatched_request(request_queue_t *q, struct request *rq) -{ - struct as_rq *arq = RQ_DATA(rq); - struct as_io_context *aic; - - if (!arq) { - WARN_ON(1); - return; - } - - WARN_ON(arq->state != AS_RQ_DISPATCHED); - WARN_ON(ON_RB(&arq->rb_node)); - if (arq->io_context && arq->io_context->aic) { - aic = arq->io_context->aic; - if (aic) { - WARN_ON(!atomic_read(&aic->nr_dispatched)); - atomic_dec(&aic->nr_dispatched); - } - } -} - -/* - * as_remove_request is called when a driver has finished with a request. - * This should be only called for dispatched requests, but for some reason - * a POWER4 box running hwscan it does not. - */ -static void as_remove_request(request_queue_t *q, struct request *rq) -{ - struct as_rq *arq = RQ_DATA(rq); - - if (unlikely(arq->state == AS_RQ_NEW)) - goto out; - - if (ON_RB(&arq->rb_node)) { - if (arq->state != AS_RQ_QUEUED) { - printk("arq->state %d\n", arq->state); - WARN_ON(1); - goto out; - } - /* - * We'll lose the aliased request(s) here. I don't think this - * will ever happen, but if it does, hopefully someone will - * report it. - */ - WARN_ON(!list_empty(&rq->queuelist)); - as_remove_queued_request(q, rq); - } else { - if (arq->state != AS_RQ_DISPATCHED) { - printk("arq->state %d\n", arq->state); - WARN_ON(1); - goto out; - } - as_remove_dispatched_request(q, rq); - } -out: - arq->state = AS_RQ_REMOVED; -} - /* * as_fifo_expired returns 0 if there are no expired reads on the fifo, * 1 otherwise. It is ratelimited so that we only perform the check once per @@ -1165,7 +1088,6 @@ static inline int as_batch_expired(struct as_data *ad) static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) { struct request *rq = arq->request; - struct list_head *insert; const int data_dir = arq->is_sync; BUG_ON(!ON_RB(&arq->rb_node)); @@ -1198,13 +1120,13 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) /* * take it off the sort and fifo list, add to dispatch queue */ - insert = ad->dispatch->prev; - while (!list_empty(&rq->queuelist)) { struct request *__rq = list_entry_rq(rq->queuelist.next); struct as_rq *__arq = RQ_DATA(__rq); - list_move_tail(&__rq->queuelist, ad->dispatch); + list_del(&__rq->queuelist); + + elv_dispatch_add_tail(ad->q, __rq); if (__arq->io_context && __arq->io_context->aic) atomic_inc(&__arq->io_context->aic->nr_dispatched); @@ -1218,7 +1140,8 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) as_remove_queued_request(ad->q, rq); WARN_ON(arq->state != AS_RQ_QUEUED); - list_add(&rq->queuelist, insert); + elv_dispatch_sort(ad->q, rq); + arq->state = AS_RQ_DISPATCHED; if (arq->io_context && arq->io_context->aic) atomic_inc(&arq->io_context->aic->nr_dispatched); @@ -1230,12 +1153,42 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) * read/write expire, batch expire, etc, and moves it to the dispatch * queue. Returns 1 if a request was found, 0 otherwise. */ -static int as_dispatch_request(struct as_data *ad) +static int as_dispatch_request(request_queue_t *q, int force) { + struct as_data *ad = q->elevator->elevator_data; struct as_rq *arq; const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]); const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]); + if (unlikely(force)) { + /* + * Forced dispatch, accounting is useless. Reset + * accounting states and dump fifo_lists. Note that + * batch_data_dir is reset to REQ_SYNC to avoid + * screwing write batch accounting as write batch + * accounting occurs on W->R transition. + */ + int dispatched = 0; + + ad->batch_data_dir = REQ_SYNC; + ad->changed_batch = 0; + ad->new_batch = 0; + + while (ad->next_arq[REQ_SYNC]) { + as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]); + dispatched++; + } + ad->last_check_fifo[REQ_SYNC] = jiffies; + + while (ad->next_arq[REQ_ASYNC]) { + as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]); + dispatched++; + } + ad->last_check_fifo[REQ_ASYNC] = jiffies; + + return dispatched; + } + /* Signal that the write batch was uncontended, so we can't time it */ if (ad->batch_data_dir == REQ_ASYNC && !reads) { if (ad->current_write_count == 0 || !writes) @@ -1359,20 +1312,6 @@ fifo_expired: return 1; } -static struct request *as_next_request(request_queue_t *q) -{ - struct as_data *ad = q->elevator->elevator_data; - struct request *rq = NULL; - - /* - * if there are still requests on the dispatch queue, grab the first - */ - if (!list_empty(ad->dispatch) || as_dispatch_request(ad)) - rq = list_entry_rq(ad->dispatch->next); - - return rq; -} - /* * Add arq to a list behind alias */ @@ -1404,17 +1343,25 @@ as_add_aliased_request(struct as_data *ad, struct as_rq *arq, struct as_rq *alia /* * Don't want to have to handle merges. */ - as_remove_merge_hints(ad->q, arq); + as_del_arq_hash(arq); } /* * add arq to rbtree and fifo */ -static void as_add_request(struct as_data *ad, struct as_rq *arq) +static void as_add_request(request_queue_t *q, struct request *rq) { + struct as_data *ad = q->elevator->elevator_data; + struct as_rq *arq = RQ_DATA(rq); struct as_rq *alias; int data_dir; + if (arq->state != AS_RQ_PRESCHED) { + printk("arq->state: %d\n", arq->state); + WARN_ON(1); + } + arq->state = AS_RQ_NEW; + if (rq_data_dir(arq->request) == READ || current->flags&PF_SYNCWRITE) arq->is_sync = 1; @@ -1437,12 +1384,8 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq) arq->expires = jiffies + ad->fifo_expire[data_dir]; list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]); - if (rq_mergeable(arq->request)) { + if (rq_mergeable(arq->request)) as_add_arq_hash(ad, arq); - - if (!ad->q->last_merge) - ad->q->last_merge = arq->request; - } as_update_arq(ad, arq); /* keep state machine up to date */ } else { @@ -1463,96 +1406,24 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq) arq->state = AS_RQ_QUEUED; } +static void as_activate_request(request_queue_t *q, struct request *rq) +{ + struct as_rq *arq = RQ_DATA(rq); + + WARN_ON(arq->state != AS_RQ_DISPATCHED); + arq->state = AS_RQ_REMOVED; + if (arq->io_context && arq->io_context->aic) + atomic_dec(&arq->io_context->aic->nr_dispatched); +} + static void as_deactivate_request(request_queue_t *q, struct request *rq) { - struct as_data *ad = q->elevator->elevator_data; struct as_rq *arq = RQ_DATA(rq); - if (arq) { - if (arq->state == AS_RQ_REMOVED) { - arq->state = AS_RQ_DISPATCHED; - if (arq->io_context && arq->io_context->aic) - atomic_inc(&arq->io_context->aic->nr_dispatched); - } - } else - WARN_ON(blk_fs_request(rq) - && (!(rq->flags & (REQ_HARDBARRIER|REQ_SOFTBARRIER))) ); - - /* Stop anticipating - let this request get through */ - as_antic_stop(ad); -} - -/* - * requeue the request. The request has not been completed, nor is it a - * new request, so don't touch accounting. - */ -static void as_requeue_request(request_queue_t *q, struct request *rq) -{ - as_deactivate_request(q, rq); - list_add(&rq->queuelist, &q->queue_head); -} - -/* - * Account a request that is inserted directly onto the dispatch queue. - * arq->io_context->aic->nr_dispatched should not need to be incremented - * because only new requests should come through here: requeues go through - * our explicit requeue handler. - */ -static void as_account_queued_request(struct as_data *ad, struct request *rq) -{ - if (blk_fs_request(rq)) { - struct as_rq *arq = RQ_DATA(rq); - arq->state = AS_RQ_DISPATCHED; - ad->nr_dispatched++; - } -} - -static void -as_insert_request(request_queue_t *q, struct request *rq, int where) -{ - struct as_data *ad = q->elevator->elevator_data; - struct as_rq *arq = RQ_DATA(rq); - - if (arq) { - if (arq->state != AS_RQ_PRESCHED) { - printk("arq->state: %d\n", arq->state); - WARN_ON(1); - } - arq->state = AS_RQ_NEW; - } - - /* barriers must flush the reorder queue */ - if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER) - && where == ELEVATOR_INSERT_SORT)) { - WARN_ON(1); - where = ELEVATOR_INSERT_BACK; - } - - switch (where) { - case ELEVATOR_INSERT_BACK: - while (ad->next_arq[REQ_SYNC]) - as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]); - - while (ad->next_arq[REQ_ASYNC]) - as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]); - - list_add_tail(&rq->queuelist, ad->dispatch); - as_account_queued_request(ad, rq); - as_antic_stop(ad); - break; - case ELEVATOR_INSERT_FRONT: - list_add(&rq->queuelist, ad->dispatch); - as_account_queued_request(ad, rq); - as_antic_stop(ad); - break; - case ELEVATOR_INSERT_SORT: - BUG_ON(!blk_fs_request(rq)); - as_add_request(ad, arq); - break; - default: - BUG(); - return; - } + WARN_ON(arq->state != AS_RQ_REMOVED); + arq->state = AS_RQ_DISPATCHED; + if (arq->io_context && arq->io_context->aic) + atomic_inc(&arq->io_context->aic->nr_dispatched); } /* @@ -1565,12 +1436,8 @@ static int as_queue_empty(request_queue_t *q) { struct as_data *ad = q->elevator->elevator_data; - if (!list_empty(&ad->fifo_list[REQ_ASYNC]) - || !list_empty(&ad->fifo_list[REQ_SYNC]) - || !list_empty(ad->dispatch)) - return 0; - - return 1; + return list_empty(&ad->fifo_list[REQ_ASYNC]) + && list_empty(&ad->fifo_list[REQ_SYNC]); } static struct request * @@ -1607,15 +1474,6 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio) struct request *__rq; int ret; - /* - * try last_merge to avoid going to hash - */ - ret = elv_try_last_merge(q, bio); - if (ret != ELEVATOR_NO_MERGE) { - __rq = q->last_merge; - goto out_insert; - } - /* * see if the merge hash can satisfy a back merge */ @@ -1644,9 +1502,6 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio) return ELEVATOR_NO_MERGE; out: - if (rq_mergeable(__rq)) - q->last_merge = __rq; -out_insert: if (ret) { if (rq_mergeable(__rq)) as_hot_arq_hash(ad, RQ_DATA(__rq)); @@ -1693,9 +1548,6 @@ static void as_merged_request(request_queue_t *q, struct request *req) * behind the disk head. We currently don't bother adjusting. */ } - - if (arq->on_hash) - q->last_merge = req; } static void @@ -1763,6 +1615,7 @@ as_merged_requests(request_queue_t *q, struct request *req, * kill knowledge of next, this one is a goner */ as_remove_queued_request(q, next); + as_put_io_context(anext); anext->state = AS_RQ_MERGED; } @@ -1782,7 +1635,7 @@ static void as_work_handler(void *data) unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); - if (as_next_request(q)) + if (!as_queue_empty(q)) q->request_fn(q); spin_unlock_irqrestore(q->queue_lock, flags); } @@ -1797,7 +1650,9 @@ static void as_put_request(request_queue_t *q, struct request *rq) return; } - if (arq->state != AS_RQ_POSTSCHED && arq->state != AS_RQ_PRESCHED) { + if (unlikely(arq->state != AS_RQ_POSTSCHED && + arq->state != AS_RQ_PRESCHED && + arq->state != AS_RQ_MERGED)) { printk("arq->state %d\n", arq->state); WARN_ON(1); } @@ -1807,7 +1662,7 @@ static void as_put_request(request_queue_t *q, struct request *rq) } static int as_set_request(request_queue_t *q, struct request *rq, - struct bio *bio, int gfp_mask) + struct bio *bio, gfp_t gfp_mask) { struct as_data *ad = q->elevator->elevator_data; struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask); @@ -1907,7 +1762,6 @@ static int as_init_queue(request_queue_t *q, elevator_t *e) INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); ad->sort_list[REQ_SYNC] = RB_ROOT; ad->sort_list[REQ_ASYNC] = RB_ROOT; - ad->dispatch = &q->queue_head; ad->fifo_expire[REQ_SYNC] = default_read_expire; ad->fifo_expire[REQ_ASYNC] = default_write_expire; ad->antic_expire = default_antic_expire; @@ -2072,10 +1926,9 @@ static struct elevator_type iosched_as = { .elevator_merge_fn = as_merge, .elevator_merged_fn = as_merged_request, .elevator_merge_req_fn = as_merged_requests, - .elevator_next_req_fn = as_next_request, - .elevator_add_req_fn = as_insert_request, - .elevator_remove_req_fn = as_remove_request, - .elevator_requeue_req_fn = as_requeue_request, + .elevator_dispatch_fn = as_dispatch_request, + .elevator_add_req_fn = as_add_request, + .elevator_activate_req_fn = as_activate_request, .elevator_deactivate_req_fn = as_deactivate_request, .elevator_queue_empty_fn = as_queue_empty, .elevator_completed_req_fn = as_completed_request, diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c index cd056e7e64ec..94690e4d41e0 100644 --- a/drivers/block/cfq-iosched.c +++ b/drivers/block/cfq-iosched.c @@ -84,7 +84,6 @@ static int cfq_max_depth = 2; (node)->rb_left = NULL; \ } while (0) #define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL) -#define ON_RB(node) ((node)->rb_color != RB_NONE) #define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) #define rq_rb_key(rq) (rq)->sector @@ -271,10 +270,7 @@ CFQ_CFQQ_FNS(expired); #undef CFQ_CFQQ_FNS enum cfq_rq_state_flags { - CFQ_CRQ_FLAG_in_flight = 0, - CFQ_CRQ_FLAG_in_driver, - CFQ_CRQ_FLAG_is_sync, - CFQ_CRQ_FLAG_requeued, + CFQ_CRQ_FLAG_is_sync = 0, }; #define CFQ_CRQ_FNS(name) \ @@ -291,14 +287,11 @@ static inline int cfq_crq_##name(const struct cfq_rq *crq) \ return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0; \ } -CFQ_CRQ_FNS(in_flight); -CFQ_CRQ_FNS(in_driver); CFQ_CRQ_FNS(is_sync); -CFQ_CRQ_FNS(requeued); #undef CFQ_CRQ_FNS static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); -static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *); +static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *); static void cfq_put_cfqd(struct cfq_data *cfqd); #define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) @@ -311,14 +304,6 @@ static inline void cfq_del_crq_hash(struct cfq_rq *crq) hlist_del_init(&crq->hash); } -static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq) -{ - cfq_del_crq_hash(crq); - - if (q->last_merge == crq->request) - q->last_merge = NULL; -} - static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq) { const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request)); @@ -347,18 +332,13 @@ static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset) return NULL; } -static inline int cfq_pending_requests(struct cfq_data *cfqd) -{ - return !list_empty(&cfqd->queue->queue_head) || cfqd->busy_queues; -} - /* * scheduler run of queue, if there are requests pending and no one in the * driver that will restart queueing */ static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) { - if (!cfqd->rq_in_driver && cfq_pending_requests(cfqd)) + if (!cfqd->rq_in_driver && cfqd->busy_queues) kblockd_schedule_work(&cfqd->unplug_work); } @@ -366,7 +346,7 @@ static int cfq_queue_empty(request_queue_t *q) { struct cfq_data *cfqd = q->elevator->elevator_data; - return !cfq_pending_requests(cfqd); + return !cfqd->busy_queues; } /* @@ -386,11 +366,6 @@ cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2) if (crq2 == NULL) return crq1; - if (cfq_crq_requeued(crq1) && !cfq_crq_requeued(crq2)) - return crq1; - else if (cfq_crq_requeued(crq2) && !cfq_crq_requeued(crq1)) - return crq2; - if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2)) return crq1; else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1)) @@ -461,10 +436,7 @@ cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct cfq_rq *crq_next = NULL, *crq_prev = NULL; struct rb_node *rbnext, *rbprev; - rbnext = NULL; - if (ON_RB(&last->rb_node)) - rbnext = rb_next(&last->rb_node); - if (!rbnext) { + if (!(rbnext = rb_next(&last->rb_node))) { rbnext = rb_first(&cfqq->sort_list); if (rbnext == &last->rb_node) rbnext = NULL; @@ -545,13 +517,13 @@ static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) * the pending list according to last request service */ static inline void -cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq, int requeue) +cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) { BUG_ON(cfq_cfqq_on_rr(cfqq)); cfq_mark_cfqq_on_rr(cfqq); cfqd->busy_queues++; - cfq_resort_rr_list(cfqq, requeue); + cfq_resort_rr_list(cfqq, 0); } static inline void @@ -571,22 +543,19 @@ cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) static inline void cfq_del_crq_rb(struct cfq_rq *crq) { struct cfq_queue *cfqq = crq->cfq_queue; + struct cfq_data *cfqd = cfqq->cfqd; + const int sync = cfq_crq_is_sync(crq); - if (ON_RB(&crq->rb_node)) { - struct cfq_data *cfqd = cfqq->cfqd; - const int sync = cfq_crq_is_sync(crq); + BUG_ON(!cfqq->queued[sync]); + cfqq->queued[sync]--; - BUG_ON(!cfqq->queued[sync]); - cfqq->queued[sync]--; + cfq_update_next_crq(crq); - cfq_update_next_crq(crq); + rb_erase(&crq->rb_node, &cfqq->sort_list); + RB_CLEAR_COLOR(&crq->rb_node); - rb_erase(&crq->rb_node, &cfqq->sort_list); - RB_CLEAR_COLOR(&crq->rb_node); - - if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list)) - cfq_del_cfqq_rr(cfqd, cfqq); - } + if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list)) + cfq_del_cfqq_rr(cfqd, cfqq); } static struct cfq_rq * @@ -627,12 +596,12 @@ static void cfq_add_crq_rb(struct cfq_rq *crq) * if that happens, put the alias on the dispatch list */ while ((__alias = __cfq_add_crq_rb(crq)) != NULL) - cfq_dispatch_sort(cfqd->queue, __alias); + cfq_dispatch_insert(cfqd->queue, __alias); rb_insert_color(&crq->rb_node, &cfqq->sort_list); if (!cfq_cfqq_on_rr(cfqq)) - cfq_add_cfqq_rr(cfqd, cfqq, cfq_crq_requeued(crq)); + cfq_add_cfqq_rr(cfqd, cfqq); /* * check if this request is a better next-serve candidate @@ -643,10 +612,8 @@ static void cfq_add_crq_rb(struct cfq_rq *crq) static inline void cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq) { - if (ON_RB(&crq->rb_node)) { - rb_erase(&crq->rb_node, &cfqq->sort_list); - cfqq->queued[cfq_crq_is_sync(crq)]--; - } + rb_erase(&crq->rb_node, &cfqq->sort_list); + cfqq->queued[cfq_crq_is_sync(crq)]--; cfq_add_crq_rb(crq); } @@ -676,49 +643,28 @@ out: return NULL; } +static void cfq_activate_request(request_queue_t *q, struct request *rq) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + + cfqd->rq_in_driver++; +} + static void cfq_deactivate_request(request_queue_t *q, struct request *rq) { struct cfq_data *cfqd = q->elevator->elevator_data; - struct cfq_rq *crq = RQ_DATA(rq); - if (crq) { - struct cfq_queue *cfqq = crq->cfq_queue; - - if (cfq_crq_in_driver(crq)) { - cfq_clear_crq_in_driver(crq); - WARN_ON(!cfqd->rq_in_driver); - cfqd->rq_in_driver--; - } - if (cfq_crq_in_flight(crq)) { - const int sync = cfq_crq_is_sync(crq); - - cfq_clear_crq_in_flight(crq); - WARN_ON(!cfqq->on_dispatch[sync]); - cfqq->on_dispatch[sync]--; - } - cfq_mark_crq_requeued(crq); - } + WARN_ON(!cfqd->rq_in_driver); + cfqd->rq_in_driver--; } -/* - * make sure the service time gets corrected on reissue of this request - */ -static void cfq_requeue_request(request_queue_t *q, struct request *rq) -{ - cfq_deactivate_request(q, rq); - list_add(&rq->queuelist, &q->queue_head); -} - -static void cfq_remove_request(request_queue_t *q, struct request *rq) +static void cfq_remove_request(struct request *rq) { struct cfq_rq *crq = RQ_DATA(rq); - if (crq) { - list_del_init(&rq->queuelist); - cfq_del_crq_rb(crq); - cfq_remove_merge_hints(q, crq); - - } + list_del_init(&rq->queuelist); + cfq_del_crq_rb(crq); + cfq_del_crq_hash(crq); } static int @@ -728,12 +674,6 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) struct request *__rq; int ret; - ret = elv_try_last_merge(q, bio); - if (ret != ELEVATOR_NO_MERGE) { - __rq = q->last_merge; - goto out_insert; - } - __rq = cfq_find_rq_hash(cfqd, bio->bi_sector); if (__rq && elv_rq_merge_ok(__rq, bio)) { ret = ELEVATOR_BACK_MERGE; @@ -748,8 +688,6 @@ cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) return ELEVATOR_NO_MERGE; out: - q->last_merge = __rq; -out_insert: *req = __rq; return ret; } @@ -762,14 +700,12 @@ static void cfq_merged_request(request_queue_t *q, struct request *req) cfq_del_crq_hash(crq); cfq_add_crq_hash(cfqd, crq); - if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) { + if (rq_rb_key(req) != crq->rb_key) { struct cfq_queue *cfqq = crq->cfq_queue; cfq_update_next_crq(crq); cfq_reposition_crq_rb(cfqq, crq); } - - q->last_merge = req; } static void @@ -785,7 +721,7 @@ cfq_merged_requests(request_queue_t *q, struct request *rq, time_before(next->start_time, rq->start_time)) list_move(&rq->queuelist, &next->queuelist); - cfq_remove_request(q, next); + cfq_remove_request(next); } static inline void @@ -992,53 +928,15 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) return 1; } -/* - * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues, - * this function sector sorts the selected request to minimize seeks. we start - * at cfqd->last_sector, not 0. - */ -static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq) +static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_queue *cfqq = crq->cfq_queue; - struct list_head *head = &q->queue_head, *entry = head; - struct request *__rq; - sector_t last; - - list_del(&crq->request->queuelist); - - last = cfqd->last_sector; - list_for_each_entry_reverse(__rq, head, queuelist) { - struct cfq_rq *__crq = RQ_DATA(__rq); - - if (blk_barrier_rq(__rq)) - break; - if (!blk_fs_request(__rq)) - break; - if (cfq_crq_requeued(__crq)) - break; - - if (__rq->sector <= crq->request->sector) - break; - if (__rq->sector > last && crq->request->sector < last) { - last = crq->request->sector + crq->request->nr_sectors; - break; - } - entry = &__rq->queuelist; - } - - cfqd->last_sector = last; cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq); - - cfq_del_crq_rb(crq); - cfq_remove_merge_hints(q, crq); - - cfq_mark_crq_in_flight(crq); - cfq_clear_crq_requeued(crq); - + cfq_remove_request(crq->request); cfqq->on_dispatch[cfq_crq_is_sync(crq)]++; - list_add_tail(&crq->request->queuelist, entry); + elv_dispatch_sort(q, crq->request); } /* @@ -1159,7 +1057,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, /* * finally, insert request into driver dispatch list */ - cfq_dispatch_sort(cfqd->queue, crq); + cfq_dispatch_insert(cfqd->queue, crq); cfqd->dispatch_slice++; dispatched++; @@ -1194,7 +1092,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, } static int -cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force) +cfq_dispatch_requests(request_queue_t *q, int force) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_queue *cfqq; @@ -1204,12 +1102,25 @@ cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force) cfqq = cfq_select_queue(cfqd, force); if (cfqq) { + int max_dispatch; + + /* + * if idle window is disabled, allow queue buildup + */ + if (!cfq_cfqq_idle_window(cfqq) && + cfqd->rq_in_driver >= cfqd->cfq_max_depth) + return 0; + cfq_clear_cfqq_must_dispatch(cfqq); cfq_clear_cfqq_wait_request(cfqq); del_timer(&cfqd->idle_slice_timer); - if (cfq_class_idle(cfqq)) - max_dispatch = 1; + if (!force) { + max_dispatch = cfqd->cfq_quantum; + if (cfq_class_idle(cfqq)) + max_dispatch = 1; + } else + max_dispatch = INT_MAX; return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); } @@ -1217,93 +1128,6 @@ cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force) return 0; } -static inline void cfq_account_dispatch(struct cfq_rq *crq) -{ - struct cfq_queue *cfqq = crq->cfq_queue; - struct cfq_data *cfqd = cfqq->cfqd; - - if (unlikely(!blk_fs_request(crq->request))) - return; - - /* - * accounted bit is necessary since some drivers will call - * elv_next_request() many times for the same request (eg ide) - */ - if (cfq_crq_in_driver(crq)) - return; - - cfq_mark_crq_in_driver(crq); - cfqd->rq_in_driver++; -} - -static inline void -cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq) -{ - struct cfq_data *cfqd = cfqq->cfqd; - unsigned long now; - - if (!cfq_crq_in_driver(crq)) - return; - - now = jiffies; - - WARN_ON(!cfqd->rq_in_driver); - cfqd->rq_in_driver--; - - if (!cfq_class_idle(cfqq)) - cfqd->last_end_request = now; - - if (!cfq_cfqq_dispatched(cfqq)) { - if (cfq_cfqq_on_rr(cfqq)) { - cfqq->service_last = now; - cfq_resort_rr_list(cfqq, 0); - } - if (cfq_cfqq_expired(cfqq)) { - __cfq_slice_expired(cfqd, cfqq, 0); - cfq_schedule_dispatch(cfqd); - } - } - - if (cfq_crq_is_sync(crq)) - crq->io_context->last_end_request = now; -} - -static struct request *cfq_next_request(request_queue_t *q) -{ - struct cfq_data *cfqd = q->elevator->elevator_data; - struct request *rq; - - if (!list_empty(&q->queue_head)) { - struct cfq_rq *crq; -dispatch: - rq = list_entry_rq(q->queue_head.next); - - crq = RQ_DATA(rq); - if (crq) { - struct cfq_queue *cfqq = crq->cfq_queue; - - /* - * if idle window is disabled, allow queue buildup - */ - if (!cfq_crq_in_driver(crq) && - !cfq_cfqq_idle_window(cfqq) && - !blk_barrier_rq(rq) && - cfqd->rq_in_driver >= cfqd->cfq_max_depth) - return NULL; - - cfq_remove_merge_hints(q, crq); - cfq_account_dispatch(crq); - } - - return rq; - } - - if (cfq_dispatch_requests(q, cfqd->cfq_quantum, 0)) - goto dispatch; - - return NULL; -} - /* * task holds one reference to the queue, dropped when task exits. each crq * in-flight on this queue also holds a reference, dropped when crq is freed. @@ -1422,7 +1246,7 @@ static void cfq_exit_io_context(struct cfq_io_context *cic) } static struct cfq_io_context * -cfq_alloc_io_context(struct cfq_data *cfqd, int gfp_mask) +cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) { struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); @@ -1517,7 +1341,7 @@ static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) static struct cfq_queue * cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio, - int gfp_mask) + gfp_t gfp_mask) { const int hashval = hash_long(key, CFQ_QHASH_SHIFT); struct cfq_queue *cfqq, *new_cfqq = NULL; @@ -1578,7 +1402,7 @@ out: * cfqq, so we don't need to worry about it disappearing */ static struct cfq_io_context * -cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, int gfp_mask) +cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) { struct io_context *ioc = NULL; struct cfq_io_context *cic; @@ -1816,8 +1640,9 @@ cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, } } -static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq) +static void cfq_insert_request(request_queue_t *q, struct request *rq) { + struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_rq *crq = RQ_DATA(rq); struct cfq_queue *cfqq = crq->cfq_queue; @@ -1827,66 +1652,43 @@ static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq) list_add_tail(&rq->queuelist, &cfqq->fifo); - if (rq_mergeable(rq)) { + if (rq_mergeable(rq)) cfq_add_crq_hash(cfqd, crq); - if (!cfqd->queue->last_merge) - cfqd->queue->last_merge = rq; - } - cfq_crq_enqueued(cfqd, cfqq, crq); } -static void -cfq_insert_request(request_queue_t *q, struct request *rq, int where) -{ - struct cfq_data *cfqd = q->elevator->elevator_data; - - switch (where) { - case ELEVATOR_INSERT_BACK: - while (cfq_dispatch_requests(q, INT_MAX, 1)) - ; - list_add_tail(&rq->queuelist, &q->queue_head); - /* - * If we were idling with pending requests on - * inactive cfqqs, force dispatching will - * remove the idle timer and the queue won't - * be kicked by __make_request() afterward. - * Kick it here. - */ - cfq_schedule_dispatch(cfqd); - break; - case ELEVATOR_INSERT_FRONT: - list_add(&rq->queuelist, &q->queue_head); - break; - case ELEVATOR_INSERT_SORT: - BUG_ON(!blk_fs_request(rq)); - cfq_enqueue(cfqd, rq); - break; - default: - printk("%s: bad insert point %d\n", __FUNCTION__,where); - return; - } -} - static void cfq_completed_request(request_queue_t *q, struct request *rq) { struct cfq_rq *crq = RQ_DATA(rq); - struct cfq_queue *cfqq; + struct cfq_queue *cfqq = crq->cfq_queue; + struct cfq_data *cfqd = cfqq->cfqd; + const int sync = cfq_crq_is_sync(crq); + unsigned long now; - if (unlikely(!blk_fs_request(rq))) - return; + now = jiffies; - cfqq = crq->cfq_queue; + WARN_ON(!cfqd->rq_in_driver); + WARN_ON(!cfqq->on_dispatch[sync]); + cfqd->rq_in_driver--; + cfqq->on_dispatch[sync]--; - if (cfq_crq_in_flight(crq)) { - const int sync = cfq_crq_is_sync(crq); + if (!cfq_class_idle(cfqq)) + cfqd->last_end_request = now; - WARN_ON(!cfqq->on_dispatch[sync]); - cfqq->on_dispatch[sync]--; + if (!cfq_cfqq_dispatched(cfqq)) { + if (cfq_cfqq_on_rr(cfqq)) { + cfqq->service_last = now; + cfq_resort_rr_list(cfqq, 0); + } + if (cfq_cfqq_expired(cfqq)) { + __cfq_slice_expired(cfqd, cfqq, 0); + cfq_schedule_dispatch(cfqd); + } } - cfq_account_completion(cfqq, crq); + if (cfq_crq_is_sync(crq)) + crq->io_context->last_end_request = now; } static struct request * @@ -2075,7 +1877,7 @@ static void cfq_put_request(request_queue_t *q, struct request *rq) */ static int cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, - int gfp_mask) + gfp_t gfp_mask) { struct cfq_data *cfqd = q->elevator->elevator_data; struct task_struct *tsk = current; @@ -2118,9 +1920,6 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, INIT_HLIST_NODE(&crq->hash); crq->cfq_queue = cfqq; crq->io_context = cic; - cfq_clear_crq_in_flight(crq); - cfq_clear_crq_in_driver(crq); - cfq_clear_crq_requeued(crq); if (rw == READ || process_sync(tsk)) cfq_mark_crq_is_sync(crq); @@ -2201,7 +2000,7 @@ static void cfq_idle_slice_timer(unsigned long data) * only expire and reinvoke request handler, if there are * other queues with pending requests */ - if (!cfq_pending_requests(cfqd)) { + if (!cfqd->busy_queues) { cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end); add_timer(&cfqd->idle_slice_timer); goto out_cont; @@ -2576,10 +2375,9 @@ static struct elevator_type iosched_cfq = { .elevator_merge_fn = cfq_merge, .elevator_merged_fn = cfq_merged_request, .elevator_merge_req_fn = cfq_merged_requests, - .elevator_next_req_fn = cfq_next_request, + .elevator_dispatch_fn = cfq_dispatch_requests, .elevator_add_req_fn = cfq_insert_request, - .elevator_remove_req_fn = cfq_remove_request, - .elevator_requeue_req_fn = cfq_requeue_request, + .elevator_activate_req_fn = cfq_activate_request, .elevator_deactivate_req_fn = cfq_deactivate_request, .elevator_queue_empty_fn = cfq_queue_empty, .elevator_completed_req_fn = cfq_completed_request, diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c index 52a3ae5289a0..7929471d7df7 100644 --- a/drivers/block/deadline-iosched.c +++ b/drivers/block/deadline-iosched.c @@ -50,7 +50,6 @@ struct deadline_data { * next in sort order. read, write or both are NULL */ struct deadline_rq *next_drq[2]; - struct list_head *dispatch; /* driver dispatch queue */ struct list_head *hash; /* request hash */ unsigned int batching; /* number of sequential requests made */ sector_t last_sector; /* head position */ @@ -113,15 +112,6 @@ static inline void deadline_del_drq_hash(struct deadline_rq *drq) __deadline_del_drq_hash(drq); } -static void -deadline_remove_merge_hints(request_queue_t *q, struct deadline_rq *drq) -{ - deadline_del_drq_hash(drq); - - if (q->last_merge == drq->request) - q->last_merge = NULL; -} - static inline void deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq) { @@ -239,10 +229,9 @@ deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) dd->next_drq[data_dir] = rb_entry_drq(rbnext); } - if (ON_RB(&drq->rb_node)) { - rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); - RB_CLEAR(&drq->rb_node); - } + BUG_ON(!ON_RB(&drq->rb_node)); + rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq)); + RB_CLEAR(&drq->rb_node); } static struct request * @@ -286,7 +275,7 @@ deadline_find_first_drq(struct deadline_data *dd, int data_dir) /* * add drq to rbtree and fifo */ -static inline void +static void deadline_add_request(struct request_queue *q, struct request *rq) { struct deadline_data *dd = q->elevator->elevator_data; @@ -301,12 +290,8 @@ deadline_add_request(struct request_queue *q, struct request *rq) drq->expires = jiffies + dd->fifo_expire[data_dir]; list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]); - if (rq_mergeable(rq)) { + if (rq_mergeable(rq)) deadline_add_drq_hash(dd, drq); - - if (!q->last_merge) - q->last_merge = rq; - } } /* @@ -315,14 +300,11 @@ deadline_add_request(struct request_queue *q, struct request *rq) static void deadline_remove_request(request_queue_t *q, struct request *rq) { struct deadline_rq *drq = RQ_DATA(rq); + struct deadline_data *dd = q->elevator->elevator_data; - if (drq) { - struct deadline_data *dd = q->elevator->elevator_data; - - list_del_init(&drq->fifo); - deadline_remove_merge_hints(q, drq); - deadline_del_drq_rb(dd, drq); - } + list_del_init(&drq->fifo); + deadline_del_drq_rb(dd, drq); + deadline_del_drq_hash(drq); } static int @@ -332,15 +314,6 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) struct request *__rq; int ret; - /* - * try last_merge to avoid going to hash - */ - ret = elv_try_last_merge(q, bio); - if (ret != ELEVATOR_NO_MERGE) { - __rq = q->last_merge; - goto out_insert; - } - /* * see if the merge hash can satisfy a back merge */ @@ -373,8 +346,6 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) return ELEVATOR_NO_MERGE; out: - q->last_merge = __rq; -out_insert: if (ret) deadline_hot_drq_hash(dd, RQ_DATA(__rq)); *req = __rq; @@ -399,8 +370,6 @@ static void deadline_merged_request(request_queue_t *q, struct request *req) deadline_del_drq_rb(dd, drq); deadline_add_drq_rb(dd, drq); } - - q->last_merge = req; } static void @@ -452,7 +421,7 @@ deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq) request_queue_t *q = drq->request->q; deadline_remove_request(q, drq->request); - list_add_tail(&drq->request->queuelist, dd->dispatch); + elv_dispatch_add_tail(q, drq->request); } /* @@ -502,8 +471,9 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) * deadline_dispatch_requests selects the best request according to * read/write expire, fifo_batch, etc */ -static int deadline_dispatch_requests(struct deadline_data *dd) +static int deadline_dispatch_requests(request_queue_t *q, int force) { + struct deadline_data *dd = q->elevator->elevator_data; const int reads = !list_empty(&dd->fifo_list[READ]); const int writes = !list_empty(&dd->fifo_list[WRITE]); struct deadline_rq *drq; @@ -597,65 +567,12 @@ dispatch_request: return 1; } -static struct request *deadline_next_request(request_queue_t *q) -{ - struct deadline_data *dd = q->elevator->elevator_data; - struct request *rq; - - /* - * if there are still requests on the dispatch queue, grab the first one - */ - if (!list_empty(dd->dispatch)) { -dispatch: - rq = list_entry_rq(dd->dispatch->next); - return rq; - } - - if (deadline_dispatch_requests(dd)) - goto dispatch; - - return NULL; -} - -static void -deadline_insert_request(request_queue_t *q, struct request *rq, int where) -{ - struct deadline_data *dd = q->elevator->elevator_data; - - /* barriers must flush the reorder queue */ - if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER) - && where == ELEVATOR_INSERT_SORT)) - where = ELEVATOR_INSERT_BACK; - - switch (where) { - case ELEVATOR_INSERT_BACK: - while (deadline_dispatch_requests(dd)) - ; - list_add_tail(&rq->queuelist, dd->dispatch); - break; - case ELEVATOR_INSERT_FRONT: - list_add(&rq->queuelist, dd->dispatch); - break; - case ELEVATOR_INSERT_SORT: - BUG_ON(!blk_fs_request(rq)); - deadline_add_request(q, rq); - break; - default: - printk("%s: bad insert point %d\n", __FUNCTION__,where); - return; - } -} - static int deadline_queue_empty(request_queue_t *q) { struct deadline_data *dd = q->elevator->elevator_data; - if (!list_empty(&dd->fifo_list[WRITE]) - || !list_empty(&dd->fifo_list[READ]) - || !list_empty(dd->dispatch)) - return 0; - - return 1; + return list_empty(&dd->fifo_list[WRITE]) + && list_empty(&dd->fifo_list[READ]); } static struct request * @@ -733,7 +650,6 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e) INIT_LIST_HEAD(&dd->fifo_list[WRITE]); dd->sort_list[READ] = RB_ROOT; dd->sort_list[WRITE] = RB_ROOT; - dd->dispatch = &q->queue_head; dd->fifo_expire[READ] = read_expire; dd->fifo_expire[WRITE] = write_expire; dd->writes_starved = writes_starved; @@ -748,15 +664,13 @@ static void deadline_put_request(request_queue_t *q, struct request *rq) struct deadline_data *dd = q->elevator->elevator_data; struct deadline_rq *drq = RQ_DATA(rq); - if (drq) { - mempool_free(drq, dd->drq_pool); - rq->elevator_private = NULL; - } + mempool_free(drq, dd->drq_pool); + rq->elevator_private = NULL; } static int deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, - int gfp_mask) + gfp_t gfp_mask) { struct deadline_data *dd = q->elevator->elevator_data; struct deadline_rq *drq; @@ -917,9 +831,8 @@ static struct elevator_type iosched_deadline = { .elevator_merge_fn = deadline_merge, .elevator_merged_fn = deadline_merged_request, .elevator_merge_req_fn = deadline_merged_requests, - .elevator_next_req_fn = deadline_next_request, - .elevator_add_req_fn = deadline_insert_request, - .elevator_remove_req_fn = deadline_remove_request, + .elevator_dispatch_fn = deadline_dispatch_requests, + .elevator_add_req_fn = deadline_add_request, .elevator_queue_empty_fn = deadline_queue_empty, .elevator_former_req_fn = deadline_former_request, .elevator_latter_req_fn = deadline_latter_request, diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c index 98f0126a2deb..55621d5c5774 100644 --- a/drivers/block/elevator.c +++ b/drivers/block/elevator.c @@ -34,6 +34,7 @@ #include #include #include +#include #include @@ -83,21 +84,11 @@ inline int elv_try_merge(struct request *__rq, struct bio *bio) } EXPORT_SYMBOL(elv_try_merge); -inline int elv_try_last_merge(request_queue_t *q, struct bio *bio) -{ - if (q->last_merge) - return elv_try_merge(q->last_merge, bio); - - return ELEVATOR_NO_MERGE; -} -EXPORT_SYMBOL(elv_try_last_merge); - static struct elevator_type *elevator_find(const char *name) { struct elevator_type *e = NULL; struct list_head *entry; - spin_lock_irq(&elv_list_lock); list_for_each(entry, &elv_list) { struct elevator_type *__e; @@ -108,7 +99,6 @@ static struct elevator_type *elevator_find(const char *name) break; } } - spin_unlock_irq(&elv_list_lock); return e; } @@ -120,12 +110,15 @@ static void elevator_put(struct elevator_type *e) static struct elevator_type *elevator_get(const char *name) { - struct elevator_type *e = elevator_find(name); + struct elevator_type *e; - if (!e) - return NULL; - if (!try_module_get(e->elevator_owner)) - return NULL; + spin_lock_irq(&elv_list_lock); + + e = elevator_find(name); + if (e && !try_module_get(e->elevator_owner)) + e = NULL; + + spin_unlock_irq(&elv_list_lock); return e; } @@ -139,8 +132,6 @@ static int elevator_attach(request_queue_t *q, struct elevator_type *e, eq->ops = &e->ops; eq->elevator_type = e; - INIT_LIST_HEAD(&q->queue_head); - q->last_merge = NULL; q->elevator = eq; if (eq->ops->elevator_init_fn) @@ -153,11 +144,15 @@ static char chosen_elevator[16]; static void elevator_setup_default(void) { + struct elevator_type *e; + /* * check if default is set and exists */ - if (chosen_elevator[0] && elevator_find(chosen_elevator)) + if (chosen_elevator[0] && (e = elevator_get(chosen_elevator))) { + elevator_put(e); return; + } #if defined(CONFIG_IOSCHED_AS) strcpy(chosen_elevator, "anticipatory"); @@ -186,6 +181,11 @@ int elevator_init(request_queue_t *q, char *name) struct elevator_queue *eq; int ret = 0; + INIT_LIST_HEAD(&q->queue_head); + q->last_merge = NULL; + q->end_sector = 0; + q->boundary_rq = NULL; + elevator_setup_default(); if (!name) @@ -220,9 +220,52 @@ void elevator_exit(elevator_t *e) kfree(e); } +/* + * Insert rq into dispatch queue of q. Queue lock must be held on + * entry. If sort != 0, rq is sort-inserted; otherwise, rq will be + * appended to the dispatch queue. To be used by specific elevators. + */ +void elv_dispatch_sort(request_queue_t *q, struct request *rq) +{ + sector_t boundary; + struct list_head *entry; + + if (q->last_merge == rq) + q->last_merge = NULL; + + boundary = q->end_sector; + + list_for_each_prev(entry, &q->queue_head) { + struct request *pos = list_entry_rq(entry); + + if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) + break; + if (rq->sector >= boundary) { + if (pos->sector < boundary) + continue; + } else { + if (pos->sector >= boundary) + break; + } + if (rq->sector >= pos->sector) + break; + } + + list_add(&rq->queuelist, entry); +} + int elv_merge(request_queue_t *q, struct request **req, struct bio *bio) { elevator_t *e = q->elevator; + int ret; + + if (q->last_merge) { + ret = elv_try_merge(q->last_merge, bio); + if (ret != ELEVATOR_NO_MERGE) { + *req = q->last_merge; + return ret; + } + } if (e->ops->elevator_merge_fn) return e->ops->elevator_merge_fn(q, req, bio); @@ -236,6 +279,8 @@ void elv_merged_request(request_queue_t *q, struct request *rq) if (e->ops->elevator_merged_fn) e->ops->elevator_merged_fn(q, rq); + + q->last_merge = rq; } void elv_merge_requests(request_queue_t *q, struct request *rq, @@ -243,20 +288,13 @@ void elv_merge_requests(request_queue_t *q, struct request *rq, { elevator_t *e = q->elevator; - if (q->last_merge == next) - q->last_merge = NULL; - if (e->ops->elevator_merge_req_fn) e->ops->elevator_merge_req_fn(q, rq, next); + + q->last_merge = rq; } -/* - * For careful internal use by the block layer. Essentially the same as - * a requeue in that it tells the io scheduler that this request is not - * active in the driver or hardware anymore, but we don't want the request - * added back to the scheduler. Function is not exported. - */ -void elv_deactivate_request(request_queue_t *q, struct request *rq) +void elv_requeue_request(request_queue_t *q, struct request *rq) { elevator_t *e = q->elevator; @@ -264,19 +302,14 @@ void elv_deactivate_request(request_queue_t *q, struct request *rq) * it already went through dequeue, we need to decrement the * in_flight count again */ - if (blk_account_rq(rq)) + if (blk_account_rq(rq)) { q->in_flight--; + if (blk_sorted_rq(rq) && e->ops->elevator_deactivate_req_fn) + e->ops->elevator_deactivate_req_fn(q, rq); + } rq->flags &= ~REQ_STARTED; - if (e->ops->elevator_deactivate_req_fn) - e->ops->elevator_deactivate_req_fn(q, rq); -} - -void elv_requeue_request(request_queue_t *q, struct request *rq) -{ - elv_deactivate_request(q, rq); - /* * if this is the flush, requeue the original instead and drop the flush */ @@ -285,31 +318,27 @@ void elv_requeue_request(request_queue_t *q, struct request *rq) rq = rq->end_io_data; } - /* - * the request is prepped and may have some resources allocated. - * allowing unprepped requests to pass this one may cause resource - * deadlock. turn on softbarrier. - */ - rq->flags |= REQ_SOFTBARRIER; - - /* - * if iosched has an explicit requeue hook, then use that. otherwise - * just put the request at the front of the queue - */ - if (q->elevator->ops->elevator_requeue_req_fn) - q->elevator->ops->elevator_requeue_req_fn(q, rq); - else - __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); + __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); } void __elv_add_request(request_queue_t *q, struct request *rq, int where, int plug) { - /* - * barriers implicitly indicate back insertion - */ - if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER) && - where == ELEVATOR_INSERT_SORT) + if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { + /* + * barriers implicitly indicate back insertion + */ + if (where == ELEVATOR_INSERT_SORT) + where = ELEVATOR_INSERT_BACK; + + /* + * this request is scheduling boundary, update end_sector + */ + if (blk_fs_request(rq)) { + q->end_sector = rq_end_sector(rq); + q->boundary_rq = rq; + } + } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) where = ELEVATOR_INSERT_BACK; if (plug) @@ -317,23 +346,54 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where, rq->q = q; - if (!test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) { - q->elevator->ops->elevator_add_req_fn(q, rq, where); + switch (where) { + case ELEVATOR_INSERT_FRONT: + rq->flags |= REQ_SOFTBARRIER; - if (blk_queue_plugged(q)) { - int nrq = q->rq.count[READ] + q->rq.count[WRITE] - - q->in_flight; + list_add(&rq->queuelist, &q->queue_head); + break; - if (nrq >= q->unplug_thresh) - __generic_unplug_device(q); - } - } else + case ELEVATOR_INSERT_BACK: + rq->flags |= REQ_SOFTBARRIER; + + while (q->elevator->ops->elevator_dispatch_fn(q, 1)) + ; + list_add_tail(&rq->queuelist, &q->queue_head); /* - * if drain is set, store the request "locally". when the drain - * is finished, the requests will be handed ordered to the io - * scheduler + * We kick the queue here for the following reasons. + * - The elevator might have returned NULL previously + * to delay requests and returned them now. As the + * queue wasn't empty before this request, ll_rw_blk + * won't run the queue on return, resulting in hang. + * - Usually, back inserted requests won't be merged + * with anything. There's no point in delaying queue + * processing. */ - list_add_tail(&rq->queuelist, &q->drain_list); + blk_remove_plug(q); + q->request_fn(q); + break; + + case ELEVATOR_INSERT_SORT: + BUG_ON(!blk_fs_request(rq)); + rq->flags |= REQ_SORTED; + q->elevator->ops->elevator_add_req_fn(q, rq); + if (q->last_merge == NULL && rq_mergeable(rq)) + q->last_merge = rq; + break; + + default: + printk(KERN_ERR "%s: bad insertion point %d\n", + __FUNCTION__, where); + BUG(); + } + + if (blk_queue_plugged(q)) { + int nrq = q->rq.count[READ] + q->rq.count[WRITE] + - q->in_flight; + + if (nrq >= q->unplug_thresh) + __generic_unplug_device(q); + } } void elv_add_request(request_queue_t *q, struct request *rq, int where, @@ -348,13 +408,19 @@ void elv_add_request(request_queue_t *q, struct request *rq, int where, static inline struct request *__elv_next_request(request_queue_t *q) { - struct request *rq = q->elevator->ops->elevator_next_req_fn(q); + struct request *rq; + + if (unlikely(list_empty(&q->queue_head) && + !q->elevator->ops->elevator_dispatch_fn(q, 0))) + return NULL; + + rq = list_entry_rq(q->queue_head.next); /* * if this is a barrier write and the device has to issue a * flush sequence to support it, check how far we are */ - if (rq && blk_fs_request(rq) && blk_barrier_rq(rq)) { + if (blk_fs_request(rq) && blk_barrier_rq(rq)) { BUG_ON(q->ordered == QUEUE_ORDERED_NONE); if (q->ordered == QUEUE_ORDERED_FLUSH && @@ -371,15 +437,30 @@ struct request *elv_next_request(request_queue_t *q) int ret; while ((rq = __elv_next_request(q)) != NULL) { - /* - * just mark as started even if we don't start it, a request - * that has been delayed should not be passed by new incoming - * requests - */ - rq->flags |= REQ_STARTED; + if (!(rq->flags & REQ_STARTED)) { + elevator_t *e = q->elevator; - if (rq == q->last_merge) - q->last_merge = NULL; + /* + * This is the first time the device driver + * sees this request (possibly after + * requeueing). Notify IO scheduler. + */ + if (blk_sorted_rq(rq) && + e->ops->elevator_activate_req_fn) + e->ops->elevator_activate_req_fn(q, rq); + + /* + * just mark as started even if we don't start + * it, a request that has been delayed should + * not be passed by new incoming requests + */ + rq->flags |= REQ_STARTED; + } + + if (!q->boundary_rq || q->boundary_rq == rq) { + q->end_sector = rq_end_sector(rq); + q->boundary_rq = NULL; + } if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn) break; @@ -391,9 +472,9 @@ struct request *elv_next_request(request_queue_t *q) /* * the request may have been (partially) prepped. * we need to keep this request in the front to - * avoid resource deadlock. turn on softbarrier. + * avoid resource deadlock. REQ_STARTED will + * prevent other fs requests from passing this one. */ - rq->flags |= REQ_SOFTBARRIER; rq = NULL; break; } else if (ret == BLKPREP_KILL) { @@ -416,42 +497,32 @@ struct request *elv_next_request(request_queue_t *q) return rq; } -void elv_remove_request(request_queue_t *q, struct request *rq) +void elv_dequeue_request(request_queue_t *q, struct request *rq) { - elevator_t *e = q->elevator; + BUG_ON(list_empty(&rq->queuelist)); + + list_del_init(&rq->queuelist); /* * the time frame between a request being removed from the lists * and to it is freed is accounted as io that is in progress at - * the driver side. note that we only account requests that the - * driver has seen (REQ_STARTED set), to avoid false accounting - * for request-request merges + * the driver side. */ if (blk_account_rq(rq)) q->in_flight++; - - /* - * the main clearing point for q->last_merge is on retrieval of - * request by driver (it calls elv_next_request()), but it _can_ - * also happen here if a request is added to the queue but later - * deleted without ever being given to driver (merged with another - * request). - */ - if (rq == q->last_merge) - q->last_merge = NULL; - - if (e->ops->elevator_remove_req_fn) - e->ops->elevator_remove_req_fn(q, rq); } int elv_queue_empty(request_queue_t *q) { elevator_t *e = q->elevator; + if (!list_empty(&q->queue_head)) + return 0; + if (e->ops->elevator_queue_empty_fn) return e->ops->elevator_queue_empty_fn(q); - return list_empty(&q->queue_head); + return 1; } struct request *elv_latter_request(request_queue_t *q, struct request *rq) @@ -487,7 +558,7 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq) } int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio, - int gfp_mask) + gfp_t gfp_mask) { elevator_t *e = q->elevator; @@ -523,11 +594,11 @@ void elv_completed_request(request_queue_t *q, struct request *rq) /* * request is released from the driver, io must be done */ - if (blk_account_rq(rq)) + if (blk_account_rq(rq)) { q->in_flight--; - - if (e->ops->elevator_completed_req_fn) - e->ops->elevator_completed_req_fn(q, rq); + if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) + e->ops->elevator_completed_req_fn(q, rq); + } } int elv_register_queue(struct request_queue *q) @@ -555,10 +626,9 @@ void elv_unregister_queue(struct request_queue *q) int elv_register(struct elevator_type *e) { + spin_lock_irq(&elv_list_lock); if (elevator_find(e->elevator_name)) BUG(); - - spin_lock_irq(&elv_list_lock); list_add_tail(&e->list, &elv_list); spin_unlock_irq(&elv_list_lock); @@ -582,25 +652,36 @@ EXPORT_SYMBOL_GPL(elv_unregister); * switch to new_e io scheduler. be careful not to introduce deadlocks - * we don't free the old io scheduler, before we have allocated what we * need for the new one. this way we have a chance of going back to the old - * one, if the new one fails init for some reason. we also do an intermediate - * switch to noop to ensure safety with stack-allocated requests, since they - * don't originate from the block layer allocator. noop is safe here, because - * it never needs to touch the elevator itself for completion events. DRAIN - * flags will make sure we don't touch it for additions either. + * one, if the new one fails init for some reason. */ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) { - elevator_t *e = kmalloc(sizeof(elevator_t), GFP_KERNEL); - struct elevator_type *noop_elevator = NULL; - elevator_t *old_elevator; + elevator_t *old_elevator, *e; + /* + * Allocate new elevator + */ + e = kmalloc(sizeof(elevator_t), GFP_KERNEL); if (!e) goto error; /* - * first step, drain requests from the block freelist + * Turn on BYPASS and drain all requests w/ elevator private data */ - blk_wait_queue_drained(q, 0); + spin_lock_irq(q->queue_lock); + + set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); + + while (q->elevator->ops->elevator_dispatch_fn(q, 1)) + ; + + while (q->rq.elvpriv) { + spin_unlock_irq(q->queue_lock); + msleep(10); + spin_lock_irq(q->queue_lock); + } + + spin_unlock_irq(q->queue_lock); /* * unregister old elevator data @@ -608,18 +689,6 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) elv_unregister_queue(q); old_elevator = q->elevator; - /* - * next step, switch to noop since it uses no private rq structures - * and doesn't allocate any memory for anything. then wait for any - * non-fs requests in-flight - */ - noop_elevator = elevator_get("noop"); - spin_lock_irq(q->queue_lock); - elevator_attach(q, noop_elevator, e); - spin_unlock_irq(q->queue_lock); - - blk_wait_queue_drained(q, 1); - /* * attach and start new elevator */ @@ -630,11 +699,10 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) goto fail_register; /* - * finally exit old elevator and start queue again + * finally exit old elevator and turn off BYPASS. */ elevator_exit(old_elevator); - blk_finish_queue_drain(q); - elevator_put(noop_elevator); + clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); return; fail_register: @@ -643,13 +711,13 @@ fail_register: * one again (along with re-adding the sysfs dir) */ elevator_exit(e); + e = NULL; fail: q->elevator = old_elevator; elv_register_queue(q); - blk_finish_queue_drain(q); + clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); + kfree(e); error: - if (noop_elevator) - elevator_put(noop_elevator); elevator_put(new_e); printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name); } @@ -701,11 +769,12 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name) return len; } +EXPORT_SYMBOL(elv_dispatch_sort); EXPORT_SYMBOL(elv_add_request); EXPORT_SYMBOL(__elv_add_request); EXPORT_SYMBOL(elv_requeue_request); EXPORT_SYMBOL(elv_next_request); -EXPORT_SYMBOL(elv_remove_request); +EXPORT_SYMBOL(elv_dequeue_request); EXPORT_SYMBOL(elv_queue_empty); EXPORT_SYMBOL(elv_completed_request); EXPORT_SYMBOL(elevator_exit); diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index baedac522945..0af73512b9a8 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c @@ -263,8 +263,6 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); blk_queue_activity_fn(q, NULL, NULL); - - INIT_LIST_HEAD(&q->drain_list); } EXPORT_SYMBOL(blk_queue_make_request); @@ -353,6 +351,8 @@ static void blk_pre_flush_end_io(struct request *flush_rq) struct request *rq = flush_rq->end_io_data; request_queue_t *q = rq->q; + elv_completed_request(q, flush_rq); + rq->flags |= REQ_BAR_PREFLUSH; if (!flush_rq->errors) @@ -369,6 +369,8 @@ static void blk_post_flush_end_io(struct request *flush_rq) struct request *rq = flush_rq->end_io_data; request_queue_t *q = rq->q; + elv_completed_request(q, flush_rq); + rq->flags |= REQ_BAR_POSTFLUSH; q->end_flush_fn(q, flush_rq); @@ -408,8 +410,6 @@ struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq) if (!list_empty(&rq->queuelist)) blkdev_dequeue_request(rq); - elv_deactivate_request(q, rq); - flush_rq->end_io_data = rq; flush_rq->end_io = blk_pre_flush_end_io; @@ -1040,6 +1040,7 @@ EXPORT_SYMBOL(blk_queue_invalidate_tags); static char *rq_flags[] = { "REQ_RW", "REQ_FAILFAST", + "REQ_SORTED", "REQ_SOFTBARRIER", "REQ_HARDBARRIER", "REQ_CMD", @@ -1047,6 +1048,7 @@ static char *rq_flags[] = { "REQ_STARTED", "REQ_DONTPREP", "REQ_QUEUED", + "REQ_ELVPRIV", "REQ_PC", "REQ_BLOCK_PC", "REQ_SENSE", @@ -1637,9 +1639,9 @@ static int blk_init_free_list(request_queue_t *q) rl->count[READ] = rl->count[WRITE] = 0; rl->starved[READ] = rl->starved[WRITE] = 0; + rl->elvpriv = 0; init_waitqueue_head(&rl->wait[READ]); init_waitqueue_head(&rl->wait[WRITE]); - init_waitqueue_head(&rl->drain); rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, request_cachep, q->node); @@ -1652,13 +1654,13 @@ static int blk_init_free_list(request_queue_t *q) static int __make_request(request_queue_t *, struct bio *); -request_queue_t *blk_alloc_queue(int gfp_mask) +request_queue_t *blk_alloc_queue(gfp_t gfp_mask) { return blk_alloc_queue_node(gfp_mask, -1); } EXPORT_SYMBOL(blk_alloc_queue); -request_queue_t *blk_alloc_queue_node(int gfp_mask, int node_id) +request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) { request_queue_t *q; @@ -1782,12 +1784,14 @@ EXPORT_SYMBOL(blk_get_queue); static inline void blk_free_request(request_queue_t *q, struct request *rq) { - elv_put_request(q, rq); + if (rq->flags & REQ_ELVPRIV) + elv_put_request(q, rq); mempool_free(rq, q->rq.rq_pool); } static inline struct request * -blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask) +blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, + int priv, gfp_t gfp_mask) { struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); @@ -1800,11 +1804,15 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask) */ rq->flags = rw; - if (!elv_set_request(q, rq, bio, gfp_mask)) - return rq; + if (priv) { + if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) { + mempool_free(rq, q->rq.rq_pool); + return NULL; + } + rq->flags |= REQ_ELVPRIV; + } - mempool_free(rq, q->rq.rq_pool); - return NULL; + return rq; } /* @@ -1860,22 +1868,18 @@ static void __freed_request(request_queue_t *q, int rw) * A request has just been released. Account for it, update the full and * congestion status, wake up any waiters. Called under q->queue_lock. */ -static void freed_request(request_queue_t *q, int rw) +static void freed_request(request_queue_t *q, int rw, int priv) { struct request_list *rl = &q->rq; rl->count[rw]--; + if (priv) + rl->elvpriv--; __freed_request(q, rw); if (unlikely(rl->starved[rw ^ 1])) __freed_request(q, rw ^ 1); - - if (!rl->count[READ] && !rl->count[WRITE]) { - smp_mb(); - if (unlikely(waitqueue_active(&rl->drain))) - wake_up(&rl->drain); - } } #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist) @@ -1885,14 +1889,12 @@ static void freed_request(request_queue_t *q, int rw) * Returns !NULL on success, with queue_lock *not held*. */ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, - int gfp_mask) + gfp_t gfp_mask) { struct request *rq = NULL; struct request_list *rl = &q->rq; struct io_context *ioc = current_io_context(GFP_ATOMIC); - - if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) - goto out; + int priv; if (rl->count[rw]+1 >= q->nr_requests) { /* @@ -1937,9 +1939,14 @@ get_rq: rl->starved[rw] = 0; if (rl->count[rw] >= queue_congestion_on_threshold(q)) set_queue_congested(q, rw); + + priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); + if (priv) + rl->elvpriv++; + spin_unlock_irq(q->queue_lock); - rq = blk_alloc_request(q, rw, bio, gfp_mask); + rq = blk_alloc_request(q, rw, bio, priv, gfp_mask); if (!rq) { /* * Allocation failed presumably due to memory. Undo anything @@ -1949,7 +1956,7 @@ get_rq: * wait queue, but this is pretty rare. */ spin_lock_irq(q->queue_lock); - freed_request(q, rw); + freed_request(q, rw, priv); /* * in the very unlikely event that allocation failed and no @@ -2019,7 +2026,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw, return rq; } -struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask) +struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask) { struct request *rq; @@ -2251,7 +2258,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user); * @gfp_mask: memory allocation flags */ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, - unsigned int len, unsigned int gfp_mask) + unsigned int len, gfp_t gfp_mask) { struct bio *bio; @@ -2433,13 +2440,15 @@ void disk_round_stats(struct gendisk *disk) { unsigned long now = jiffies; - __disk_stat_add(disk, time_in_queue, - disk->in_flight * (now - disk->stamp)); - disk->stamp = now; + if (now == disk->stamp) + return; - if (disk->in_flight) - __disk_stat_add(disk, io_ticks, (now - disk->stamp_idle)); - disk->stamp_idle = now; + if (disk->in_flight) { + __disk_stat_add(disk, time_in_queue, + disk->in_flight * (now - disk->stamp)); + __disk_stat_add(disk, io_ticks, (now - disk->stamp)); + } + disk->stamp = now; } /* @@ -2454,6 +2463,8 @@ static void __blk_put_request(request_queue_t *q, struct request *req) if (unlikely(--req->ref_count)) return; + elv_completed_request(q, req); + req->rq_status = RQ_INACTIVE; req->rl = NULL; @@ -2463,26 +2474,25 @@ static void __blk_put_request(request_queue_t *q, struct request *req) */ if (rl) { int rw = rq_data_dir(req); - - elv_completed_request(q, req); + int priv = req->flags & REQ_ELVPRIV; BUG_ON(!list_empty(&req->queuelist)); blk_free_request(q, req); - freed_request(q, rw); + freed_request(q, rw, priv); } } void blk_put_request(struct request *req) { - /* - * if req->rl isn't set, this request didnt originate from the - * block layer, so it's safe to just disregard it - */ - if (req->rl) { - unsigned long flags; - request_queue_t *q = req->q; + unsigned long flags; + request_queue_t *q = req->q; + /* + * Gee, IDE calls in w/ NULL q. Fix IDE and remove the + * following if (q) test. + */ + if (q) { spin_lock_irqsave(q->queue_lock, flags); __blk_put_request(q, req); spin_unlock_irqrestore(q->queue_lock, flags); @@ -2797,97 +2807,6 @@ static inline void blk_partition_remap(struct bio *bio) } } -void blk_finish_queue_drain(request_queue_t *q) -{ - struct request_list *rl = &q->rq; - struct request *rq; - int requeued = 0; - - spin_lock_irq(q->queue_lock); - clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags); - - while (!list_empty(&q->drain_list)) { - rq = list_entry_rq(q->drain_list.next); - - list_del_init(&rq->queuelist); - elv_requeue_request(q, rq); - requeued++; - } - - if (requeued) - q->request_fn(q); - - spin_unlock_irq(q->queue_lock); - - wake_up(&rl->wait[0]); - wake_up(&rl->wait[1]); - wake_up(&rl->drain); -} - -static int wait_drain(request_queue_t *q, struct request_list *rl, int dispatch) -{ - int wait = rl->count[READ] + rl->count[WRITE]; - - if (dispatch) - wait += !list_empty(&q->queue_head); - - return wait; -} - -/* - * We rely on the fact that only requests allocated through blk_alloc_request() - * have io scheduler private data structures associated with them. Any other - * type of request (allocated on stack or through kmalloc()) should not go - * to the io scheduler core, but be attached to the queue head instead. - */ -void blk_wait_queue_drained(request_queue_t *q, int wait_dispatch) -{ - struct request_list *rl = &q->rq; - DEFINE_WAIT(wait); - - spin_lock_irq(q->queue_lock); - set_bit(QUEUE_FLAG_DRAIN, &q->queue_flags); - - while (wait_drain(q, rl, wait_dispatch)) { - prepare_to_wait(&rl->drain, &wait, TASK_UNINTERRUPTIBLE); - - if (wait_drain(q, rl, wait_dispatch)) { - __generic_unplug_device(q); - spin_unlock_irq(q->queue_lock); - io_schedule(); - spin_lock_irq(q->queue_lock); - } - - finish_wait(&rl->drain, &wait); - } - - spin_unlock_irq(q->queue_lock); -} - -/* - * block waiting for the io scheduler being started again. - */ -static inline void block_wait_queue_running(request_queue_t *q) -{ - DEFINE_WAIT(wait); - - while (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) { - struct request_list *rl = &q->rq; - - prepare_to_wait_exclusive(&rl->drain, &wait, - TASK_UNINTERRUPTIBLE); - - /* - * re-check the condition. avoids using prepare_to_wait() - * in the fast path (queue is running) - */ - if (test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) - io_schedule(); - - finish_wait(&rl->drain, &wait); - } -} - static void handle_bad_sector(struct bio *bio) { char b[BDEVNAME_SIZE]; @@ -2983,8 +2902,6 @@ end_io: if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) goto end_io; - block_wait_queue_running(q); - /* * If this device has partitions, remap block n * of partition p to block n+start(p) of the disk. @@ -3393,7 +3310,7 @@ void exit_io_context(void) * but since the current task itself holds a reference, the context can be * used in general code, so long as it stays within `current` context. */ -struct io_context *current_io_context(int gfp_flags) +struct io_context *current_io_context(gfp_t gfp_flags) { struct task_struct *tsk = current; struct io_context *ret; @@ -3424,7 +3341,7 @@ EXPORT_SYMBOL(current_io_context); * * This is always called in the context of the task which submitted the I/O. */ -struct io_context *get_io_context(int gfp_flags) +struct io_context *get_io_context(gfp_t gfp_flags) { struct io_context *ret; ret = current_io_context(gfp_flags); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index b35e08876dd4..96c664af8d06 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -881,7 +881,7 @@ loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer, static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) { struct file *filp = lo->lo_backing_file; - int gfp = lo->old_gfp_mask; + gfp_t gfp = lo->old_gfp_mask; if (lo->lo_state != Lo_bound) return -ENXIO; diff --git a/drivers/block/noop-iosched.c b/drivers/block/noop-iosched.c index b1730b62c37e..f56b8edb06e4 100644 --- a/drivers/block/noop-iosched.c +++ b/drivers/block/noop-iosched.c @@ -7,57 +7,19 @@ #include #include -/* - * See if we can find a request that this buffer can be coalesced with. - */ -static int elevator_noop_merge(request_queue_t *q, struct request **req, - struct bio *bio) +static void elevator_noop_add_request(request_queue_t *q, struct request *rq) { - int ret; - - ret = elv_try_last_merge(q, bio); - if (ret != ELEVATOR_NO_MERGE) - *req = q->last_merge; - - return ret; + elv_dispatch_add_tail(q, rq); } -static void elevator_noop_merge_requests(request_queue_t *q, struct request *req, - struct request *next) +static int elevator_noop_dispatch(request_queue_t *q, int force) { - list_del_init(&next->queuelist); -} - -static void elevator_noop_add_request(request_queue_t *q, struct request *rq, - int where) -{ - if (where == ELEVATOR_INSERT_FRONT) - list_add(&rq->queuelist, &q->queue_head); - else - list_add_tail(&rq->queuelist, &q->queue_head); - - /* - * new merges must not precede this barrier - */ - if (rq->flags & REQ_HARDBARRIER) - q->last_merge = NULL; - else if (!q->last_merge) - q->last_merge = rq; -} - -static struct request *elevator_noop_next_request(request_queue_t *q) -{ - if (!list_empty(&q->queue_head)) - return list_entry_rq(q->queue_head.next); - - return NULL; + return 0; } static struct elevator_type elevator_noop = { .ops = { - .elevator_merge_fn = elevator_noop_merge, - .elevator_merge_req_fn = elevator_noop_merge_requests, - .elevator_next_req_fn = elevator_noop_next_request, + .elevator_dispatch_fn = elevator_noop_dispatch, .elevator_add_req_fn = elevator_noop_add_request, }, .elevator_name = "noop", diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 7e22a58926b8..a280e679b1ca 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -229,7 +229,7 @@ static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets) return 1; } -static void *pkt_rb_alloc(unsigned int __nocast gfp_mask, void *data) +static void *pkt_rb_alloc(gfp_t gfp_mask, void *data) { return kmalloc(sizeof(struct pkt_rb_node), gfp_mask); } @@ -2082,7 +2082,7 @@ static int pkt_close(struct inode *inode, struct file *file) } -static void *psd_pool_alloc(unsigned int __nocast gfp_mask, void *data) +static void *psd_pool_alloc(gfp_t gfp_mask, void *data) { return kmalloc(sizeof(struct packet_stacked_data), gfp_mask); } diff --git a/drivers/block/rd.c b/drivers/block/rd.c index 145c1fbffe01..68c60a5bcdab 100644 --- a/drivers/block/rd.c +++ b/drivers/block/rd.c @@ -348,7 +348,7 @@ static int rd_open(struct inode *inode, struct file *filp) struct block_device *bdev = inode->i_bdev; struct address_space *mapping; unsigned bsize; - int gfp_mask; + gfp_t gfp_mask; inode = igrab(bdev->bd_inode); rd_bdev[unit] = bdev; diff --git a/drivers/block/scsi_ioctl.c b/drivers/block/scsi_ioctl.c index 079ec344eb47..382dea7b224c 100644 --- a/drivers/block/scsi_ioctl.c +++ b/drivers/block/scsi_ioctl.c @@ -201,15 +201,15 @@ static int verify_command(struct file *file, unsigned char *cmd) return 0; } + /* And root can do any command.. */ + if (capable(CAP_SYS_RAWIO)) + return 0; + if (!type) { cmd_type[cmd[0]] = CMD_WARNED; printk(KERN_WARNING "scsi: unknown opcode 0x%02x\n", cmd[0]); } - /* And root can do any command.. */ - if (capable(CAP_SYS_RAWIO)) - return 0; - /* Otherwise fail it with an "Operation not permitted" */ return -EPERM; } diff --git a/drivers/block/ub.c b/drivers/block/ub.c index aa0bf7ee008d..ed4d5006fe62 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -172,7 +172,7 @@ struct bulk_cs_wrap { */ struct ub_dev; -#define UB_MAX_REQ_SG 4 +#define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */ #define UB_MAX_SECTORS 64 /* @@ -387,7 +387,7 @@ struct ub_dev { struct bulk_cs_wrap work_bcs; struct usb_ctrlrequest work_cr; - int sg_stat[UB_MAX_REQ_SG+1]; + int sg_stat[6]; struct ub_scsi_trace tr; }; @@ -525,12 +525,13 @@ static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr, "qlen %d qmax %d\n", sc->cmd_queue.qlen, sc->cmd_queue.qmax); cnt += sprintf(page + cnt, - "sg %d %d %d %d %d\n", + "sg %d %d %d %d %d .. %d\n", sc->sg_stat[0], sc->sg_stat[1], sc->sg_stat[2], sc->sg_stat[3], - sc->sg_stat[4]); + sc->sg_stat[4], + sc->sg_stat[5]); list_for_each (p, &sc->luns) { lun = list_entry(p, struct ub_lun, link); @@ -835,7 +836,7 @@ static int ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, return -1; } cmd->nsg = n_elem; - sc->sg_stat[n_elem]++; + sc->sg_stat[n_elem < 5 ? n_elem : 5]++; /* * build the command @@ -891,7 +892,7 @@ static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, return -1; } cmd->nsg = n_elem; - sc->sg_stat[n_elem]++; + sc->sg_stat[n_elem < 5 ? n_elem : 5]++; memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); cmd->cdb_len = rq->cmd_len; @@ -1010,7 +1011,6 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) sc->last_pipe = sc->send_bulk_pipe; usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe, bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc); - sc->work_urb.transfer_flags = 0; /* Fill what we shouldn't be filling, because usb-storage did so. */ sc->work_urb.actual_length = 0; @@ -1019,7 +1019,6 @@ static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { /* XXX Clear stalls */ - printk("ub: cmd #%d start failed (%d)\n", cmd->tag, rc); /* P3 */ ub_complete(&sc->work_done); return rc; } @@ -1190,11 +1189,9 @@ static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) return; } if (urb->status != 0) { - printk("ub: cmd #%d cmd status (%d)\n", cmd->tag, urb->status); /* P3 */ goto Bad_End; } if (urb->actual_length != US_BULK_CB_WRAP_LEN) { - printk("ub: cmd #%d xferred %d\n", cmd->tag, urb->actual_length); /* P3 */ /* XXX Must do reset here to unconfuse the device */ goto Bad_End; } @@ -1395,14 +1392,12 @@ static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, page_address(sg->page) + sg->offset, sg->length, ub_urb_complete, sc); - sc->work_urb.transfer_flags = 0; sc->work_urb.actual_length = 0; sc->work_urb.error_count = 0; sc->work_urb.status = 0; if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { /* XXX Clear stalls */ - printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */ ub_complete(&sc->work_done); ub_state_done(sc, cmd, rc); return; @@ -1442,7 +1437,6 @@ static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) sc->last_pipe = sc->recv_bulk_pipe; usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe, &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc); - sc->work_urb.transfer_flags = 0; sc->work_urb.actual_length = 0; sc->work_urb.error_count = 0; sc->work_urb.status = 0; @@ -1563,7 +1557,6 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, (unsigned char*) cr, NULL, 0, ub_urb_complete, sc); - sc->work_urb.transfer_flags = 0; sc->work_urb.actual_length = 0; sc->work_urb.error_count = 0; sc->work_urb.status = 0; @@ -2000,17 +1993,16 @@ static int ub_sync_getmaxlun(struct ub_dev *sc) usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe, (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl); - sc->work_urb.transfer_flags = 0; sc->work_urb.actual_length = 0; sc->work_urb.error_count = 0; sc->work_urb.status = 0; if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { if (rc == -EPIPE) { - printk("%s: Stall at GetMaxLUN, using 1 LUN\n", + printk("%s: Stall submitting GetMaxLUN, using 1 LUN\n", sc->name); /* P3 */ } else { - printk(KERN_WARNING + printk(KERN_NOTICE "%s: Unable to submit GetMaxLUN (%d)\n", sc->name, rc); } @@ -2028,6 +2020,18 @@ static int ub_sync_getmaxlun(struct ub_dev *sc) del_timer_sync(&timer); usb_kill_urb(&sc->work_urb); + if ((rc = sc->work_urb.status) < 0) { + if (rc == -EPIPE) { + printk("%s: Stall at GetMaxLUN, using 1 LUN\n", + sc->name); /* P3 */ + } else { + printk(KERN_NOTICE + "%s: Error at GetMaxLUN (%d)\n", + sc->name, rc); + } + goto err_io; + } + if (sc->work_urb.actual_length != 1) { printk("%s: GetMaxLUN returned %d bytes\n", sc->name, sc->work_urb.actual_length); /* P3 */ @@ -2048,6 +2052,7 @@ static int ub_sync_getmaxlun(struct ub_dev *sc) kfree(p); return nluns; +err_io: err_submit: kfree(p); err_alloc: @@ -2080,7 +2085,6 @@ static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe) usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); - sc->work_urb.transfer_flags = 0; sc->work_urb.actual_length = 0; sc->work_urb.error_count = 0; sc->work_urb.status = 0; @@ -2213,8 +2217,10 @@ static int ub_probe(struct usb_interface *intf, * This is needed to clear toggles. It is a problem only if we do * `rmmod ub && modprobe ub` without disconnects, but we like that. */ +#if 0 /* iPod Mini fails if we do this (big white iPod works) */ ub_probe_clear_stall(sc, sc->recv_bulk_pipe); ub_probe_clear_stall(sc, sc->send_bulk_pipe); +#endif /* * The way this is used by the startup code is a little specific. @@ -2241,10 +2247,10 @@ static int ub_probe(struct usb_interface *intf, for (i = 0; i < 3; i++) { if ((rc = ub_sync_getmaxlun(sc)) < 0) { /* - * Some devices (i.e. Iomega Zip100) need this -- - * apparently the bulk pipes get STALLed when the - * GetMaxLUN request is processed. - * XXX I have a ZIP-100, verify it does this. + * This segment is taken from usb-storage. They say + * that ZIP-100 needs this, but my own ZIP-100 works + * fine without this. + * Still, it does not seem to hurt anything. */ if (rc == -EPIPE) { ub_probe_clear_stall(sc, sc->recv_bulk_pipe); @@ -2313,7 +2319,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum) disk->first_minor = lun->id * UB_MINORS_PER_MAJOR; disk->fops = &ub_bd_fops; disk->private_data = lun; - disk->driverfs_dev = &sc->intf->dev; /* XXX Many to one ok? */ + disk->driverfs_dev = &sc->intf->dev; rc = -ENOMEM; if ((q = blk_init_queue(ub_request_fn, &sc->lock)) == NULL) @@ -2466,9 +2472,6 @@ static int __init ub_init(void) { int rc; - /* P3 */ printk("ub: sizeof ub_scsi_cmd %zu ub_dev %zu ub_lun %zu\n", - sizeof(struct ub_scsi_cmd), sizeof(struct ub_dev), sizeof(struct ub_lun)); - if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) goto err_regblkdev; devfs_mk_dir(DEVFS_NAME); diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c index a1bf8f066c88..4fa85234d8b5 100644 --- a/drivers/bluetooth/bpa10x.c +++ b/drivers/bluetooth/bpa10x.c @@ -308,7 +308,7 @@ unlock: } static inline struct urb *bpa10x_alloc_urb(struct usb_device *udev, unsigned int pipe, - size_t size, unsigned int __nocast flags, void *data) + size_t size, gfp_t flags, void *data) { struct urb *urb; struct usb_ctrlrequest *cr; diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c index 57c48bbf6fe6..6756cb20b753 100644 --- a/drivers/bluetooth/hci_usb.c +++ b/drivers/bluetooth/hci_usb.c @@ -132,7 +132,7 @@ static struct usb_device_id blacklist_ids[] = { { } /* Terminating entry */ }; -static struct _urb *_urb_alloc(int isoc, unsigned int __nocast gfp) +static struct _urb *_urb_alloc(int isoc, gfp_t gfp) { struct _urb *_urb = kmalloc(sizeof(struct _urb) + sizeof(struct usb_iso_packet_descriptor) * isoc, gfp); diff --git a/drivers/char/.gitignore b/drivers/char/.gitignore new file mode 100644 index 000000000000..2b6b1d772ed7 --- /dev/null +++ b/drivers/char/.gitignore @@ -0,0 +1,3 @@ +consolemap_deftbl.c +defkeymap.c + diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c index 6ba48f346fcf..041bb47b5c39 100644 --- a/drivers/char/drm/drm_drv.c +++ b/drivers/char/drm/drm_drv.c @@ -376,7 +376,7 @@ static int __init drm_core_init(void) goto err_p2; } - drm_proc_root = create_proc_entry("dri", S_IFDIR, NULL); + drm_proc_root = proc_mkdir("dri", NULL); if (!drm_proc_root) { DRM_ERROR("Cannot create /proc/dri\n"); ret = -1; diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c index 32d2bb99462c..977961002488 100644 --- a/drivers/char/drm/drm_proc.c +++ b/drivers/char/drm/drm_proc.c @@ -95,7 +95,7 @@ int drm_proc_init(drm_device_t *dev, int minor, char name[64]; sprintf(name, "%d", minor); - *dev_root = create_proc_entry(name, S_IFDIR, root); + *dev_root = proc_mkdir(name, root); if (!*dev_root) { DRM_ERROR("Cannot create /proc/dri/%s\n", name); return -1; diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c index 95a976c96eb8..70458cb061c6 100644 --- a/drivers/char/drm/drm_stub.c +++ b/drivers/char/drm/drm_stub.c @@ -47,7 +47,7 @@ MODULE_PARM_DESC(cards_limit, "Maximum number of graphics cards"); MODULE_PARM_DESC(debug, "Enable debug output"); module_param_named(cards_limit, drm_cards_limit, int, 0444); -module_param_named(debug, drm_debug, int, 0666); +module_param_named(debug, drm_debug, int, 0600); drm_head_t **drm_heads; struct drm_sysfs_class *drm_class; diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c index ced4215e2275..39ea96e42c5b 100644 --- a/drivers/char/drm/drm_vm.c +++ b/drivers/char/drm/drm_vm.c @@ -148,7 +148,8 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, offset = address - vma->vm_start; i = (unsigned long)map->handle + offset; - page = vmalloc_to_page((void *)i); + page = (map->type == _DRM_CONSISTENT) ? + virt_to_page((void *)i) : vmalloc_to_page((void *)i); if (!page) return NOPAGE_OOM; get_page(page); diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c index fc7d4a594bca..c8e1b6c83636 100644 --- a/drivers/char/drm/mga_dma.c +++ b/drivers/char/drm/mga_dma.c @@ -437,7 +437,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev, drm_mga_dma_bootstrap_t * dma_bs) { drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private; - const unsigned int warp_size = mga_warp_microcode_size(dev_priv); + unsigned int warp_size = mga_warp_microcode_size(dev_priv); int err; unsigned offset; const unsigned secondary_size = dma_bs->secondary_bin_count @@ -499,6 +499,12 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev, return err; } + /* Make drm_addbufs happy by not trying to create a mapping for less + * than a page. + */ + if (warp_size < PAGE_SIZE) + warp_size = PAGE_SIZE; + offset = 0; err = drm_addmap( dev, offset, warp_size, _DRM_AGP, _DRM_READ_ONLY, & dev_priv->warp ); @@ -587,7 +593,7 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev, drm_mga_dma_bootstrap_t * dma_bs) { drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private; - const unsigned int warp_size = mga_warp_microcode_size(dev_priv); + unsigned int warp_size = mga_warp_microcode_size(dev_priv); unsigned int primary_size; unsigned int bin_count; int err; @@ -599,6 +605,12 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev, return DRM_ERR(EFAULT); } + /* Make drm_addbufs happy by not trying to create a mapping for less + * than a page. + */ + if (warp_size < PAGE_SIZE) + warp_size = PAGE_SIZE; + /* The proper alignment is 0x100 for this mapping */ err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT, _DRM_READ_ONLY, &dev_priv->warp); @@ -812,6 +824,10 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) } if (! dev_priv->used_new_dma_init) { + + dev_priv->dma_access = MGA_PAGPXFER; + dev_priv->wagp_enable = MGA_WAGP_ENABLE; + dev_priv->status = drm_core_findmap(dev, init->status_offset); if (!dev_priv->status) { DRM_ERROR("failed to find status page!\n"); @@ -928,7 +944,7 @@ static int mga_do_cleanup_dma( drm_device_t *dev ) drm_mga_private_t *dev_priv = dev->dev_private; if ((dev_priv->warp != NULL) - && (dev_priv->mmio->type != _DRM_CONSISTENT)) + && (dev_priv->warp->type != _DRM_CONSISTENT)) drm_core_ioremapfree(dev_priv->warp, dev); if ((dev_priv->primary != NULL) diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h index b22fdbd4f830..6059c5a5b105 100644 --- a/drivers/char/drm/mga_drv.h +++ b/drivers/char/drm/mga_drv.h @@ -227,7 +227,7 @@ static inline u32 _MGA_READ(u32 *addr) #define MGA_EMIT_STATE( dev_priv, dirty ) \ do { \ if ( (dirty) & ~MGA_UPLOAD_CLIPRECTS ) { \ - if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) { \ + if ( dev_priv->chipset >= MGA_CARD_TYPE_G400 ) { \ mga_g400_emit_state( dev_priv ); \ } else { \ mga_g200_emit_state( dev_priv ); \ diff --git a/drivers/char/drm/mga_state.c b/drivers/char/drm/mga_state.c index 05bbb4719376..6ac5e006226f 100644 --- a/drivers/char/drm/mga_state.c +++ b/drivers/char/drm/mga_state.c @@ -53,7 +53,7 @@ static void mga_emit_clip_rect( drm_mga_private_t *dev_priv, /* Force reset of DWGCTL on G400 (eliminates clip disable bit). */ - if (dev_priv->chipset == MGA_CARD_TYPE_G400) { + if (dev_priv->chipset >= MGA_CARD_TYPE_G400) { DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl, MGA_LEN + MGA_EXEC, 0x80000000, MGA_DWGCTL, ctx->dwgctl, diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c index 6d9080a3ca7e..12ef13ff04ca 100644 --- a/drivers/char/drm/radeon_cp.c +++ b/drivers/char/drm/radeon_cp.c @@ -1133,10 +1133,10 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev, ring_start = (dev_priv->cp_ring->offset - dev->agp->base + dev_priv->gart_vm_start); - } else + } else #endif ring_start = (dev_priv->cp_ring->offset - - dev->sg->handle + - (unsigned long)dev->sg->virtual + dev_priv->gart_vm_start); RADEON_WRITE( RADEON_CP_RB_BASE, ring_start ); @@ -1164,7 +1164,8 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev, drm_sg_mem_t *entry = dev->sg; unsigned long tmp_ofs, page_ofs; - tmp_ofs = dev_priv->ring_rptr->offset - dev->sg->handle; + tmp_ofs = dev_priv->ring_rptr->offset - + (unsigned long)dev->sg->virtual; page_ofs = tmp_ofs >> PAGE_SHIFT; RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, @@ -1491,8 +1492,8 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init ) else #endif dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset - - dev->sg->handle - + dev_priv->gart_vm_start); + - (unsigned long)dev->sg->virtual + + dev_priv->gart_vm_start); DRM_DEBUG( "dev_priv->gart_size %d\n", dev_priv->gart_size ); diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index de0379b6d502..c055bb630ffc 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -273,7 +273,6 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma) vma->vm_flags |= VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - addr = __pa(addr); if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, PAGE_SIZE, vma->vm_page_prot)) { diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 463351d4f942..32fa82c78c73 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -2620,7 +2620,7 @@ void ipmi_smi_msg_received(ipmi_smi_t intf, spin_lock_irqsave(&(intf->waiting_msgs_lock), flags); if (!list_empty(&(intf->waiting_msgs))) { list_add_tail(&(msg->link), &(intf->waiting_msgs)); - spin_unlock(&(intf->waiting_msgs_lock)); + spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); goto out_unlock; } spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); @@ -2629,9 +2629,9 @@ void ipmi_smi_msg_received(ipmi_smi_t intf, if (rv > 0) { /* Could not handle the message now, just add it to a list to handle later. */ - spin_lock(&(intf->waiting_msgs_lock)); + spin_lock_irqsave(&(intf->waiting_msgs_lock), flags); list_add_tail(&(msg->link), &(intf->waiting_msgs)); - spin_unlock(&(intf->waiting_msgs_lock)); + spin_unlock_irqrestore(&(intf->waiting_msgs_lock), flags); } else if (rv == 0) { ipmi_free_smi_msg(msg); } diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c index e82a96ba396b..f66947722e12 100644 --- a/drivers/char/ipmi/ipmi_poweroff.c +++ b/drivers/char/ipmi/ipmi_poweroff.c @@ -55,7 +55,7 @@ extern void (*pm_power_off)(void); static int poweroff_powercycle; /* parameter definition to allow user to flag power cycle */ -module_param(poweroff_powercycle, int, 0); +module_param(poweroff_powercycle, int, 0644); MODULE_PARM_DESC(poweroff_powercycles, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down."); /* Stuff from the get device id command. */ diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c index 3fa64c631108..c268ee04b2aa 100644 --- a/drivers/char/mbcs.c +++ b/drivers/char/mbcs.c @@ -830,6 +830,9 @@ static int __init mbcs_init(void) { int rv; + if (!ia64_platform_is("sn2")) + return -ENODEV; + // Put driver into chrdevs[]. Get major number. rv = register_chrdev(mbcs_major, DEVICE_NAME, &mbcs_ops); if (rv < 0) { diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c index 2291a87e8ada..853c98cee64f 100644 --- a/drivers/char/n_r3964.c +++ b/drivers/char/n_r3964.c @@ -229,8 +229,8 @@ static int __init r3964_init(void) TRACE_L("line discipline %d registered", N_R3964); TRACE_L("flags=%x num=%x", tty_ldisc_N_R3964.flags, tty_ldisc_N_R3964.num); - TRACE_L("open=%x", (int)tty_ldisc_N_R3964.open); - TRACE_L("tty_ldisc_N_R3964 = %x", (int)&tty_ldisc_N_R3964); + TRACE_L("open=%p", tty_ldisc_N_R3964.open); + TRACE_L("tty_ldisc_N_R3964 = %p", &tty_ldisc_N_R3964); } else { @@ -267,8 +267,8 @@ static void add_tx_queue(struct r3964_info *pInfo, struct r3964_block_header *pH spin_unlock_irqrestore(&pInfo->lock, flags); - TRACE_Q("add_tx_queue %x, length %d, tx_first = %x", - (int)pHeader, pHeader->length, (int)pInfo->tx_first ); + TRACE_Q("add_tx_queue %p, length %d, tx_first = %p", + pHeader, pHeader->length, pInfo->tx_first ); } static void remove_from_tx_queue(struct r3964_info *pInfo, int error_code) @@ -285,10 +285,10 @@ static void remove_from_tx_queue(struct r3964_info *pInfo, int error_code) return; #ifdef DEBUG_QUEUE - printk("r3964: remove_from_tx_queue: %x, length %d - ", - (int)pHeader, (int)pHeader->length ); + printk("r3964: remove_from_tx_queue: %p, length %u - ", + pHeader, pHeader->length ); for(pDump=pHeader;pDump;pDump=pDump->next) - printk("%x ", (int)pDump); + printk("%p ", pDump); printk("\n"); #endif @@ -319,10 +319,10 @@ static void remove_from_tx_queue(struct r3964_info *pInfo, int error_code) spin_unlock_irqrestore(&pInfo->lock, flags); kfree(pHeader); - TRACE_M("remove_from_tx_queue - kfree %x",(int)pHeader); + TRACE_M("remove_from_tx_queue - kfree %p",pHeader); - TRACE_Q("remove_from_tx_queue: tx_first = %x, tx_last = %x", - (int)pInfo->tx_first, (int)pInfo->tx_last ); + TRACE_Q("remove_from_tx_queue: tx_first = %p, tx_last = %p", + pInfo->tx_first, pInfo->tx_last ); } static void add_rx_queue(struct r3964_info *pInfo, struct r3964_block_header *pHeader) @@ -346,9 +346,9 @@ static void add_rx_queue(struct r3964_info *pInfo, struct r3964_block_header *pH spin_unlock_irqrestore(&pInfo->lock, flags); - TRACE_Q("add_rx_queue: %x, length = %d, rx_first = %x, count = %d", - (int)pHeader, pHeader->length, - (int)pInfo->rx_first, pInfo->blocks_in_rx_queue); + TRACE_Q("add_rx_queue: %p, length = %d, rx_first = %p, count = %d", + pHeader, pHeader->length, + pInfo->rx_first, pInfo->blocks_in_rx_queue); } static void remove_from_rx_queue(struct r3964_info *pInfo, @@ -360,10 +360,10 @@ static void remove_from_rx_queue(struct r3964_info *pInfo, if(pHeader==NULL) return; - TRACE_Q("remove_from_rx_queue: rx_first = %x, rx_last = %x, count = %d", - (int)pInfo->rx_first, (int)pInfo->rx_last, pInfo->blocks_in_rx_queue ); - TRACE_Q("remove_from_rx_queue: %x, length %d", - (int)pHeader, (int)pHeader->length ); + TRACE_Q("remove_from_rx_queue: rx_first = %p, rx_last = %p, count = %d", + pInfo->rx_first, pInfo->rx_last, pInfo->blocks_in_rx_queue ); + TRACE_Q("remove_from_rx_queue: %p, length %u", + pHeader, pHeader->length ); spin_lock_irqsave(&pInfo->lock, flags); @@ -401,10 +401,10 @@ static void remove_from_rx_queue(struct r3964_info *pInfo, spin_unlock_irqrestore(&pInfo->lock, flags); kfree(pHeader); - TRACE_M("remove_from_rx_queue - kfree %x",(int)pHeader); + TRACE_M("remove_from_rx_queue - kfree %p",pHeader); - TRACE_Q("remove_from_rx_queue: rx_first = %x, rx_last = %x, count = %d", - (int)pInfo->rx_first, (int)pInfo->rx_last, pInfo->blocks_in_rx_queue ); + TRACE_Q("remove_from_rx_queue: rx_first = %p, rx_last = %p, count = %d", + pInfo->rx_first, pInfo->rx_last, pInfo->blocks_in_rx_queue ); } static void put_char(struct r3964_info *pInfo, unsigned char ch) @@ -506,8 +506,8 @@ static void transmit_block(struct r3964_info *pInfo) if(tty->driver->write_room) room=tty->driver->write_room(tty); - TRACE_PS("transmit_block %x, room %d, length %d", - (int)pBlock, room, pBlock->length); + TRACE_PS("transmit_block %p, room %d, length %d", + pBlock, room, pBlock->length); while(pInfo->tx_position < pBlock->length) { @@ -588,7 +588,7 @@ static void on_receive_block(struct r3964_info *pInfo) /* prepare struct r3964_block_header: */ pBlock = kmalloc(length+sizeof(struct r3964_block_header), GFP_KERNEL); - TRACE_M("on_receive_block - kmalloc %x",(int)pBlock); + TRACE_M("on_receive_block - kmalloc %p",pBlock); if(pBlock==NULL) return; @@ -695,7 +695,7 @@ static void receive_char(struct r3964_info *pInfo, const unsigned char c) { TRACE_PE("IDLE - got STX but no space in rx_queue!"); pInfo->state=R3964_WAIT_FOR_RX_BUF; - mod_timer(&pInfo->tmr, R3964_TO_NO_BUF); + mod_timer(&pInfo->tmr, jiffies + R3964_TO_NO_BUF); break; } start_receiving: @@ -705,7 +705,7 @@ start_receiving: pInfo->last_rx = 0; pInfo->flags &= ~R3964_ERROR; pInfo->state=R3964_RECEIVING; - mod_timer(&pInfo->tmr, R3964_TO_ZVZ); + mod_timer(&pInfo->tmr, jiffies + R3964_TO_ZVZ); pInfo->nRetry = 0; put_char(pInfo, DLE); flush(pInfo); @@ -732,7 +732,7 @@ start_receiving: if(pInfo->flags & R3964_BCC) { pInfo->state = R3964_WAIT_FOR_BCC; - mod_timer(&pInfo->tmr, R3964_TO_ZVZ); + mod_timer(&pInfo->tmr, jiffies + R3964_TO_ZVZ); } else { @@ -744,7 +744,7 @@ start_receiving: pInfo->last_rx = c; char_to_buf: pInfo->rx_buf[pInfo->rx_position++] = c; - mod_timer(&pInfo->tmr, R3964_TO_ZVZ); + mod_timer(&pInfo->tmr, jiffies + R3964_TO_ZVZ); } } /* else: overflow-msg? BUF_SIZE>MTU; should not happen? */ @@ -868,11 +868,11 @@ static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg) if(pMsg) { kfree(pMsg); - TRACE_M("enable_signals - msg kfree %x",(int)pMsg); + TRACE_M("enable_signals - msg kfree %p",pMsg); } } kfree(pClient); - TRACE_M("enable_signals - kfree %x",(int)pClient); + TRACE_M("enable_signals - kfree %p",pClient); return 0; } } @@ -890,7 +890,7 @@ static int enable_signals(struct r3964_info *pInfo, pid_t pid, int arg) { /* add client to client list */ pClient=kmalloc(sizeof(struct r3964_client_info), GFP_KERNEL); - TRACE_M("enable_signals - kmalloc %x",(int)pClient); + TRACE_M("enable_signals - kmalloc %p",pClient); if(pClient==NULL) return -ENOMEM; @@ -954,7 +954,7 @@ static void add_msg(struct r3964_client_info *pClient, int msg_id, int arg, queue_the_message: pMsg = kmalloc(sizeof(struct r3964_message), GFP_KERNEL); - TRACE_M("add_msg - kmalloc %x",(int)pMsg); + TRACE_M("add_msg - kmalloc %p",pMsg); if(pMsg==NULL) { return; } @@ -1067,11 +1067,11 @@ static int r3964_open(struct tty_struct *tty) struct r3964_info *pInfo; TRACE_L("open"); - TRACE_L("tty=%x, PID=%d, disc_data=%x", - (int)tty, current->pid, (int)tty->disc_data); + TRACE_L("tty=%p, PID=%d, disc_data=%p", + tty, current->pid, tty->disc_data); pInfo=kmalloc(sizeof(struct r3964_info), GFP_KERNEL); - TRACE_M("r3964_open - info kmalloc %x",(int)pInfo); + TRACE_M("r3964_open - info kmalloc %p",pInfo); if(!pInfo) { @@ -1080,26 +1080,26 @@ static int r3964_open(struct tty_struct *tty) } pInfo->rx_buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL); - TRACE_M("r3964_open - rx_buf kmalloc %x",(int)pInfo->rx_buf); + TRACE_M("r3964_open - rx_buf kmalloc %p",pInfo->rx_buf); if(!pInfo->rx_buf) { printk(KERN_ERR "r3964: failed to alloc receive buffer\n"); kfree(pInfo); - TRACE_M("r3964_open - info kfree %x",(int)pInfo); + TRACE_M("r3964_open - info kfree %p",pInfo); return -ENOMEM; } pInfo->tx_buf = kmalloc(TX_BUF_SIZE, GFP_KERNEL); - TRACE_M("r3964_open - tx_buf kmalloc %x",(int)pInfo->tx_buf); + TRACE_M("r3964_open - tx_buf kmalloc %p",pInfo->tx_buf); if(!pInfo->tx_buf) { printk(KERN_ERR "r3964: failed to alloc transmit buffer\n"); kfree(pInfo->rx_buf); - TRACE_M("r3964_open - rx_buf kfree %x",(int)pInfo->rx_buf); + TRACE_M("r3964_open - rx_buf kfree %p",pInfo->rx_buf); kfree(pInfo); - TRACE_M("r3964_open - info kfree %x",(int)pInfo); + TRACE_M("r3964_open - info kfree %p",pInfo); return -ENOMEM; } @@ -1154,11 +1154,11 @@ static void r3964_close(struct tty_struct *tty) if(pMsg) { kfree(pMsg); - TRACE_M("r3964_close - msg kfree %x",(int)pMsg); + TRACE_M("r3964_close - msg kfree %p",pMsg); } } kfree(pClient); - TRACE_M("r3964_close - client kfree %x",(int)pClient); + TRACE_M("r3964_close - client kfree %p",pClient); pClient=pNext; } /* Remove jobs from tx_queue: */ @@ -1177,11 +1177,11 @@ static void r3964_close(struct tty_struct *tty) /* Free buffers: */ wake_up_interruptible(&pInfo->read_wait); kfree(pInfo->rx_buf); - TRACE_M("r3964_close - rx_buf kfree %x",(int)pInfo->rx_buf); + TRACE_M("r3964_close - rx_buf kfree %p",pInfo->rx_buf); kfree(pInfo->tx_buf); - TRACE_M("r3964_close - tx_buf kfree %x",(int)pInfo->tx_buf); + TRACE_M("r3964_close - tx_buf kfree %p",pInfo->tx_buf); kfree(pInfo); - TRACE_M("r3964_close - info kfree %x",(int)pInfo); + TRACE_M("r3964_close - info kfree %p",pInfo); } static ssize_t r3964_read(struct tty_struct *tty, struct file *file, @@ -1234,7 +1234,7 @@ repeat: count = sizeof(struct r3964_client_message); kfree(pMsg); - TRACE_M("r3964_read - msg kfree %x",(int)pMsg); + TRACE_M("r3964_read - msg kfree %p",pMsg); if (copy_to_user(buf,&theMsg, count)) return -EFAULT; @@ -1279,7 +1279,7 @@ static ssize_t r3964_write(struct tty_struct * tty, struct file * file, * Allocate a buffer for the data and copy it from the buffer with header prepended */ new_data = kmalloc (count+sizeof(struct r3964_block_header), GFP_KERNEL); - TRACE_M("r3964_write - kmalloc %x",(int)new_data); + TRACE_M("r3964_write - kmalloc %p",new_data); if (new_data == NULL) { if (pInfo->flags & R3964_DEBUG) { diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c index c9bdf544ed2c..c556f4d3ccd7 100644 --- a/drivers/char/n_tty.c +++ b/drivers/char/n_tty.c @@ -62,7 +62,7 @@ static inline unsigned char *alloc_buf(void) { - unsigned int prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; + gfp_t prio = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; if (PAGE_SIZE != N_TTY_BUF_SIZE) return kmalloc(N_TTY_BUF_SIZE, prio); diff --git a/drivers/char/s3c2410-rtc.c b/drivers/char/s3c2410-rtc.c index ed867db550a9..e1a90d9a8756 100644 --- a/drivers/char/s3c2410-rtc.c +++ b/drivers/char/s3c2410-rtc.c @@ -564,6 +564,7 @@ static int s3c2410_rtc_resume(struct device *dev, u32 level) static struct device_driver s3c2410_rtcdrv = { .name = "s3c2410-rtc", + .owner = THIS_MODULE, .bus = &platform_bus_type, .probe = s3c2410_rtc_probe, .remove = s3c2410_rtc_remove, diff --git a/drivers/char/watchdog/mv64x60_wdt.c b/drivers/char/watchdog/mv64x60_wdt.c index 1436aea3b28f..6d3ff0836c44 100644 --- a/drivers/char/watchdog/mv64x60_wdt.c +++ b/drivers/char/watchdog/mv64x60_wdt.c @@ -87,6 +87,8 @@ static int mv64x60_wdt_open(struct inode *inode, struct file *file) mv64x60_wdt_service(); mv64x60_wdt_handler_enable(); + nonseekable_open(inode, file); + return 0; } @@ -103,12 +105,9 @@ static int mv64x60_wdt_release(struct inode *inode, struct file *file) return 0; } -static ssize_t mv64x60_wdt_write(struct file *file, const char *data, +static ssize_t mv64x60_wdt_write(struct file *file, const char __user *data, size_t len, loff_t * ppos) { - if (*ppos != file->f_pos) - return -ESPIPE; - if (len) mv64x60_wdt_service(); @@ -119,6 +118,7 @@ static int mv64x60_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { int timeout; + void __user *argp = (void __user *)arg; static struct watchdog_info info = { .options = WDIOF_KEEPALIVEPING, .firmware_version = 0, @@ -127,13 +127,13 @@ static int mv64x60_wdt_ioctl(struct inode *inode, struct file *file, switch (cmd) { case WDIOC_GETSUPPORT: - if (copy_to_user((void *)arg, &info, sizeof(info))) + if (copy_to_user(argp, &info, sizeof(info))) return -EFAULT; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: - if (put_user(wdt_status, (int *)arg)) + if (put_user(wdt_status, (int __user *)argp)) return -EFAULT; wdt_status &= ~WDIOF_KEEPALIVEPING; break; @@ -154,7 +154,7 @@ static int mv64x60_wdt_ioctl(struct inode *inode, struct file *file, case WDIOC_GETTIMEOUT: timeout = mv64x60_wdt_timeout * HZ; - if (put_user(timeout, (int *)arg)) + if (put_user(timeout, (int __user *)argp)) return -EFAULT; break; diff --git a/drivers/char/watchdog/pcwd_pci.c b/drivers/char/watchdog/pcwd_pci.c index 5a80adbf8032..0b8e493be045 100644 --- a/drivers/char/watchdog/pcwd_pci.c +++ b/drivers/char/watchdog/pcwd_pci.c @@ -50,8 +50,8 @@ #include /* For inb/outb/... */ /* Module and version information */ -#define WATCHDOG_VERSION "1.01" -#define WATCHDOG_DATE "02 Sep 2005" +#define WATCHDOG_VERSION "1.02" +#define WATCHDOG_DATE "03 Sep 2005" #define WATCHDOG_DRIVER_NAME "PCI-PC Watchdog" #define WATCHDOG_NAME "pcwd_pci" #define PFX WATCHDOG_NAME ": " @@ -70,19 +70,30 @@ * These are the defines that describe the control status bits for the * PCI-PC Watchdog card. */ -#define WD_PCI_WTRP 0x01 /* Watchdog Trip status */ -#define WD_PCI_HRBT 0x02 /* Watchdog Heartbeat */ -#define WD_PCI_TTRP 0x04 /* Temperature Trip status */ +/* Port 1 : Control Status #1 */ +#define WD_PCI_WTRP 0x01 /* Watchdog Trip status */ +#define WD_PCI_HRBT 0x02 /* Watchdog Heartbeat */ +#define WD_PCI_TTRP 0x04 /* Temperature Trip status */ +#define WD_PCI_RL2A 0x08 /* Relay 2 Active */ +#define WD_PCI_RL1A 0x10 /* Relay 1 Active */ +#define WD_PCI_R2DS 0x40 /* Relay 2 Disable Temperature-trip/reset */ +#define WD_PCI_RLY2 0x80 /* Activate Relay 2 on the board */ +/* Port 2 : Control Status #2 */ +#define WD_PCI_WDIS 0x10 /* Watchdog Disable */ +#define WD_PCI_ENTP 0x20 /* Enable Temperature Trip Reset */ +#define WD_PCI_WRSP 0x40 /* Watchdog wrote response */ +#define WD_PCI_PCMD 0x80 /* PC has sent command */ /* according to documentation max. time to process a command for the pci * watchdog card is 100 ms, so we give it 150 ms to do it's job */ #define PCI_COMMAND_TIMEOUT 150 /* Watchdog's internal commands */ -#define CMD_GET_STATUS 0x04 -#define CMD_GET_FIRMWARE_VERSION 0x08 -#define CMD_READ_WATCHDOG_TIMEOUT 0x18 -#define CMD_WRITE_WATCHDOG_TIMEOUT 0x19 +#define CMD_GET_STATUS 0x04 +#define CMD_GET_FIRMWARE_VERSION 0x08 +#define CMD_READ_WATCHDOG_TIMEOUT 0x18 +#define CMD_WRITE_WATCHDOG_TIMEOUT 0x19 +#define CMD_GET_CLEAR_RESET_COUNT 0x84 /* We can only use 1 card due to the /dev/watchdog restriction */ static int cards_found; @@ -91,15 +102,22 @@ static int cards_found; static int temp_panic; static unsigned long is_active; static char expect_release; -static struct { - int supports_temp; /* Wether or not the card has a temperature device */ - int boot_status; /* The card's boot status */ - unsigned long io_addr; /* The cards I/O address */ - spinlock_t io_lock; - struct pci_dev *pdev; +static struct { /* this is private data for each PCI-PC watchdog card */ + int supports_temp; /* Wether or not the card has a temperature device */ + int boot_status; /* The card's boot status */ + unsigned long io_addr; /* The cards I/O address */ + spinlock_t io_lock; /* the lock for io operations */ + struct pci_dev *pdev; /* the PCI-device */ } pcipcwd_private; /* module parameters */ +#define QUIET 0 /* Default */ +#define VERBOSE 1 /* Verbose */ +#define DEBUG 2 /* print fancy stuff too */ +static int debug = QUIET; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level: 0=Quiet, 1=Verbose, 2=Debug (default=0)"); + #define WATCHDOG_HEARTBEAT 2 /* 2 sec default heartbeat */ static int heartbeat = WATCHDOG_HEARTBEAT; module_param(heartbeat, int, 0); @@ -117,6 +135,10 @@ static int send_command(int cmd, int *msb, int *lsb) { int got_response, count; + if (debug >= DEBUG) + printk(KERN_DEBUG PFX "sending following data cmd=0x%02x msb=0x%02x lsb=0x%02x\n", + cmd, *msb, *lsb); + spin_lock(&pcipcwd_private.io_lock); /* If a command requires data it should be written first. * Data for commands with 8 bits of data should be written to port 4. @@ -131,10 +153,19 @@ static int send_command(int cmd, int *msb, int *lsb) /* wait till the pci card processed the command, signaled by * the WRSP bit in port 2 and give it a max. timeout of * PCI_COMMAND_TIMEOUT to process */ - got_response = inb_p(pcipcwd_private.io_addr + 2) & 0x40; + got_response = inb_p(pcipcwd_private.io_addr + 2) & WD_PCI_WRSP; for (count = 0; (count < PCI_COMMAND_TIMEOUT) && (!got_response); count++) { mdelay(1); - got_response = inb_p(pcipcwd_private.io_addr + 2) & 0x40; + got_response = inb_p(pcipcwd_private.io_addr + 2) & WD_PCI_WRSP; + } + + if (debug >= DEBUG) { + if (got_response) { + printk(KERN_DEBUG PFX "time to process command was: %d ms\n", + count); + } else { + printk(KERN_DEBUG PFX "card did not respond on command!\n"); + } } if (got_response) { @@ -144,12 +175,66 @@ static int send_command(int cmd, int *msb, int *lsb) /* clear WRSP bit */ inb_p(pcipcwd_private.io_addr + 6); + + if (debug >= DEBUG) + printk(KERN_DEBUG PFX "received following data for cmd=0x%02x: msb=0x%02x lsb=0x%02x\n", + cmd, *msb, *lsb); } + spin_unlock(&pcipcwd_private.io_lock); return got_response; } +static inline void pcipcwd_check_temperature_support(void) +{ + if (inb_p(pcipcwd_private.io_addr) != 0xF0) + pcipcwd_private.supports_temp = 1; +} + +static int pcipcwd_get_option_switches(void) +{ + int option_switches; + + option_switches = inb_p(pcipcwd_private.io_addr + 3); + return option_switches; +} + +static void pcipcwd_show_card_info(void) +{ + int got_fw_rev, fw_rev_major, fw_rev_minor; + char fw_ver_str[20]; /* The cards firmware version */ + int option_switches; + + got_fw_rev = send_command(CMD_GET_FIRMWARE_VERSION, &fw_rev_major, &fw_rev_minor); + if (got_fw_rev) { + sprintf(fw_ver_str, "%u.%02u", fw_rev_major, fw_rev_minor); + } else { + sprintf(fw_ver_str, ""); + } + + /* Get switch settings */ + option_switches = pcipcwd_get_option_switches(); + + printk(KERN_INFO PFX "Found card at port 0x%04x (Firmware: %s) %s temp option\n", + (int) pcipcwd_private.io_addr, fw_ver_str, + (pcipcwd_private.supports_temp ? "with" : "without")); + + printk(KERN_INFO PFX "Option switches (0x%02x): Temperature Reset Enable=%s, Power On Delay=%s\n", + option_switches, + ((option_switches & 0x10) ? "ON" : "OFF"), + ((option_switches & 0x08) ? "ON" : "OFF")); + + if (pcipcwd_private.boot_status & WDIOF_CARDRESET) + printk(KERN_INFO PFX "Previous reset was caused by the Watchdog card\n"); + + if (pcipcwd_private.boot_status & WDIOF_OVERHEAT) + printk(KERN_INFO PFX "Card sensed a CPU Overheat\n"); + + if (pcipcwd_private.boot_status == 0) + printk(KERN_INFO PFX "No previous trip detected - Cold boot or reset\n"); +} + static int pcipcwd_start(void) { int stat_reg; @@ -161,11 +246,14 @@ static int pcipcwd_start(void) stat_reg = inb_p(pcipcwd_private.io_addr + 2); spin_unlock(&pcipcwd_private.io_lock); - if (stat_reg & 0x10) { + if (stat_reg & WD_PCI_WDIS) { printk(KERN_ERR PFX "Card timer not enabled\n"); return -1; } + if (debug >= VERBOSE) + printk(KERN_DEBUG PFX "Watchdog started\n"); + return 0; } @@ -183,18 +271,25 @@ static int pcipcwd_stop(void) stat_reg = inb_p(pcipcwd_private.io_addr + 2); spin_unlock(&pcipcwd_private.io_lock); - if (!(stat_reg & 0x10)) { + if (!(stat_reg & WD_PCI_WDIS)) { printk(KERN_ERR PFX "Card did not acknowledge disable attempt\n"); return -1; } + if (debug >= VERBOSE) + printk(KERN_DEBUG PFX "Watchdog stopped\n"); + return 0; } static int pcipcwd_keepalive(void) { /* Re-trigger watchdog by writing to port 0 */ - outb_p(0x42, pcipcwd_private.io_addr); + outb_p(0x42, pcipcwd_private.io_addr); /* send out any data */ + + if (debug >= DEBUG) + printk(KERN_DEBUG PFX "Watchdog keepalive signal send\n"); + return 0; } @@ -210,29 +305,64 @@ static int pcipcwd_set_heartbeat(int t) send_command(CMD_WRITE_WATCHDOG_TIMEOUT, &t_msb, &t_lsb); heartbeat = t; + if (debug >= VERBOSE) + printk(KERN_DEBUG PFX "New heartbeat: %d\n", + heartbeat); + return 0; } static int pcipcwd_get_status(int *status) { - int new_status; + int control_status; *status=0; - new_status = inb_p(pcipcwd_private.io_addr + 1); - if (new_status & WD_PCI_WTRP) + control_status = inb_p(pcipcwd_private.io_addr + 1); + if (control_status & WD_PCI_WTRP) *status |= WDIOF_CARDRESET; - if (new_status & WD_PCI_TTRP) { + if (control_status & WD_PCI_TTRP) { *status |= WDIOF_OVERHEAT; if (temp_panic) panic(PFX "Temperature overheat trip!\n"); } + if (debug >= DEBUG) + printk(KERN_DEBUG PFX "Control Status #1: 0x%02x\n", + control_status); + return 0; } static int pcipcwd_clear_status(void) { - outb_p(0x01, pcipcwd_private.io_addr + 1); + int control_status; + int msb; + int reset_counter; + + if (debug >= VERBOSE) + printk(KERN_INFO PFX "clearing watchdog trip status & LED\n"); + + control_status = inb_p(pcipcwd_private.io_addr + 1); + + if (debug >= DEBUG) { + printk(KERN_DEBUG PFX "status was: 0x%02x\n", control_status); + printk(KERN_DEBUG PFX "sending: 0x%02x\n", + (control_status & WD_PCI_R2DS) | WD_PCI_WTRP); + } + + /* clear trip status & LED and keep mode of relay 2 */ + outb_p((control_status & WD_PCI_R2DS) | WD_PCI_WTRP, pcipcwd_private.io_addr + 1); + + /* clear reset counter */ + msb=0; + reset_counter=0xff; + send_command(CMD_GET_CLEAR_RESET_COUNT, &msb, &reset_counter); + + if (debug >= DEBUG) { + printk(KERN_DEBUG PFX "reset count was: 0x%02x\n", + reset_counter); + } + return 0; } @@ -242,11 +372,18 @@ static int pcipcwd_get_temperature(int *temperature) if (!pcipcwd_private.supports_temp) return -ENODEV; + *temperature = inb_p(pcipcwd_private.io_addr); + /* * Convert celsius to fahrenheit, since this was * the decided 'standard' for this return value. */ - *temperature = ((inb_p(pcipcwd_private.io_addr)) * 9 / 5) + 32; + *temperature = (*temperature * 9 / 5) + 32; + + if (debug >= DEBUG) { + printk(KERN_DEBUG PFX "temperature is: %d F\n", + *temperature); + } return 0; } @@ -256,7 +393,7 @@ static int pcipcwd_get_temperature(int *temperature) */ static ssize_t pcipcwd_write(struct file *file, const char __user *data, - size_t len, loff_t *ppos) + size_t len, loff_t *ppos) { /* See if we got the magic character 'V' and reload the timer */ if (len) { @@ -381,8 +518,11 @@ static int pcipcwd_ioctl(struct inode *inode, struct file *file, static int pcipcwd_open(struct inode *inode, struct file *file) { /* /dev/watchdog can only be opened once */ - if (test_and_set_bit(0, &is_active)) + if (test_and_set_bit(0, &is_active)) { + if (debug >= VERBOSE) + printk(KERN_ERR PFX "Attempt to open already opened device.\n"); return -EBUSY; + } /* Activate */ pcipcwd_start(); @@ -492,19 +632,10 @@ static struct notifier_block pcipcwd_notifier = { * Init & exit routines */ -static inline void check_temperature_support(void) -{ - if (inb_p(pcipcwd_private.io_addr) != 0xF0) - pcipcwd_private.supports_temp = 1; -} - static int __devinit pcipcwd_card_init(struct pci_dev *pdev, const struct pci_device_id *ent) { int ret = -EIO; - int got_fw_rev, fw_rev_major, fw_rev_minor; - char fw_ver_str[20]; - char option_switches; cards_found++; if (cards_found == 1) @@ -546,36 +677,10 @@ static int __devinit pcipcwd_card_init(struct pci_dev *pdev, pcipcwd_stop(); /* Check whether or not the card supports the temperature device */ - check_temperature_support(); + pcipcwd_check_temperature_support(); - /* Get the Firmware Version */ - got_fw_rev = send_command(CMD_GET_FIRMWARE_VERSION, &fw_rev_major, &fw_rev_minor); - if (got_fw_rev) { - sprintf(fw_ver_str, "%u.%02u", fw_rev_major, fw_rev_minor); - } else { - sprintf(fw_ver_str, ""); - } - - /* Get switch settings */ - option_switches = inb_p(pcipcwd_private.io_addr + 3); - - printk(KERN_INFO PFX "Found card at port 0x%04x (Firmware: %s) %s temp option\n", - (int) pcipcwd_private.io_addr, fw_ver_str, - (pcipcwd_private.supports_temp ? "with" : "without")); - - printk(KERN_INFO PFX "Option switches (0x%02x): Temperature Reset Enable=%s, Power On Delay=%s\n", - option_switches, - ((option_switches & 0x10) ? "ON" : "OFF"), - ((option_switches & 0x08) ? "ON" : "OFF")); - - if (pcipcwd_private.boot_status & WDIOF_CARDRESET) - printk(KERN_INFO PFX "Previous reset was caused by the Watchdog card\n"); - - if (pcipcwd_private.boot_status & WDIOF_OVERHEAT) - printk(KERN_INFO PFX "Card sensed a CPU Overheat\n"); - - if (pcipcwd_private.boot_status == 0) - printk(KERN_INFO PFX "No previous trip detected - Cold boot or reset\n"); + /* Show info about the card itself */ + pcipcwd_show_card_info(); /* Check that the heartbeat value is within it's range ; if not reset to the default */ if (pcipcwd_set_heartbeat(heartbeat)) { @@ -656,7 +761,7 @@ static struct pci_driver pcipcwd_driver = { static int __init pcipcwd_init_module(void) { - spin_lock_init (&pcipcwd_private.io_lock); + spin_lock_init(&pcipcwd_private.io_lock); return pci_register_driver(&pcipcwd_driver); } diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c index 966632182e2d..9f2f00d82917 100644 --- a/drivers/connector/cn_queue.c +++ b/drivers/connector/cn_queue.c @@ -31,16 +31,19 @@ #include #include -static void cn_queue_wrapper(void *data) +void cn_queue_wrapper(void *data) { - struct cn_callback_entry *cbq = data; + struct cn_callback_data *d = data; - cbq->cb->callback(cbq->cb->priv); - cbq->destruct_data(cbq->ddata); - cbq->ddata = NULL; + d->callback(d->callback_priv); + + d->destruct_data(d->ddata); + d->ddata = NULL; + + kfree(d->free); } -static struct cn_callback_entry *cn_queue_alloc_callback_entry(struct cn_callback *cb) +static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struct cb_id *id, void (*callback)(void *)) { struct cn_callback_entry *cbq; @@ -50,8 +53,11 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(struct cn_callbac return NULL; } - cbq->cb = cb; - INIT_WORK(&cbq->work, &cn_queue_wrapper, cbq); + snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); + memcpy(&cbq->id.id, id, sizeof(struct cb_id)); + cbq->data.callback = callback; + + INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data); return cbq; } @@ -68,12 +74,12 @@ int cn_cb_equal(struct cb_id *i1, struct cb_id *i2) return ((i1->idx == i2->idx) && (i1->val == i2->val)); } -int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb) +int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *)) { struct cn_callback_entry *cbq, *__cbq; int found = 0; - cbq = cn_queue_alloc_callback_entry(cb); + cbq = cn_queue_alloc_callback_entry(name, id, callback); if (!cbq) return -ENOMEM; @@ -82,7 +88,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb) spin_lock_bh(&dev->queue_lock); list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { - if (cn_cb_equal(&__cbq->cb->id, &cb->id)) { + if (cn_cb_equal(&__cbq->id.id, id)) { found = 1; break; } @@ -99,7 +105,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb) cbq->nls = dev->nls; cbq->seq = 0; - cbq->group = cbq->cb->id.idx; + cbq->group = cbq->id.id.idx; return 0; } @@ -111,7 +117,7 @@ void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id) spin_lock_bh(&dev->queue_lock); list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) { - if (cn_cb_equal(&cbq->cb->id, id)) { + if (cn_cb_equal(&cbq->id.id, id)) { list_del(&cbq->callback_entry); found = 1; break; diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index aaf6d468a8b9..505677fb3157 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c @@ -69,7 +69,7 @@ int cn_already_initialized = 0; * a new message. * */ -int cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask) +int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask) { struct cn_callback_entry *__cbq; unsigned int size; @@ -84,7 +84,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask) spin_lock_bh(&dev->cbdev->queue_lock); list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { - if (cn_cb_equal(&__cbq->cb->id, &msg->id)) { + if (cn_cb_equal(&__cbq->id.id, &msg->id)) { found = 1; group = __cbq->group; } @@ -127,42 +127,56 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v { struct cn_callback_entry *__cbq; struct cn_dev *dev = &cdev; - int found = 0; + int err = -ENODEV; spin_lock_bh(&dev->cbdev->queue_lock); list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { - if (cn_cb_equal(&__cbq->cb->id, &msg->id)) { - /* - * Let's scream if there is some magic and the - * data will arrive asynchronously here. - * [i.e. netlink messages will be queued]. - * After the first warning I will fix it - * quickly, but now I think it is - * impossible. --zbr (2004_04_27). - */ + if (cn_cb_equal(&__cbq->id.id, &msg->id)) { if (likely(!test_bit(0, &__cbq->work.pending) && - __cbq->ddata == NULL)) { - __cbq->cb->priv = msg; + __cbq->data.ddata == NULL)) { + __cbq->data.callback_priv = msg; - __cbq->ddata = data; - __cbq->destruct_data = destruct_data; + __cbq->data.ddata = data; + __cbq->data.destruct_data = destruct_data; if (queue_work(dev->cbdev->cn_queue, &__cbq->work)) - found = 1; + err = 0; } else { - printk("%s: cbq->data=%p, " - "work->pending=%08lx.\n", - __func__, __cbq->ddata, - __cbq->work.pending); - WARN_ON(1); + struct work_struct *w; + struct cn_callback_data *d; + + w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC); + if (w) { + d = (struct cn_callback_data *)(w+1); + + d->callback_priv = msg; + d->callback = __cbq->data.callback; + d->ddata = data; + d->destruct_data = destruct_data; + d->free = w; + + INIT_LIST_HEAD(&w->entry); + w->pending = 0; + w->func = &cn_queue_wrapper; + w->data = d; + init_timer(&w->timer); + + if (queue_work(dev->cbdev->cn_queue, w)) + err = 0; + else { + kfree(w); + err = -EINVAL; + } + } else + err = -ENOMEM; } break; } } spin_unlock_bh(&dev->cbdev->queue_lock); - return found ? 0 : -ENODEV; + return err; } /* @@ -291,22 +305,10 @@ int cn_add_callback(struct cb_id *id, char *name, void (*callback)(void *)) { int err; struct cn_dev *dev = &cdev; - struct cn_callback *cb; - cb = kzalloc(sizeof(*cb), GFP_KERNEL); - if (!cb) - return -ENOMEM; - - scnprintf(cb->name, sizeof(cb->name), "%s", name); - - memcpy(&cb->id, id, sizeof(cb->id)); - cb->callback = callback; - - err = cn_queue_add_callback(dev->cbdev, cb); - if (err) { - kfree(cb); + err = cn_queue_add_callback(dev->cbdev, name, id, callback); + if (err) return err; - } cn_notify(id, 0); diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index e1df376e709e..2ed5c4363b53 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -315,9 +315,9 @@ static void dbs_check_cpu(int cpu) policy = this_dbs_info->cur_policy; if ( init_flag == 0 ) { - for ( /* NULL */; init_flag < NR_CPUS; init_flag++ ) { - dbs_info = &per_cpu(cpu_dbs_info, init_flag); - requested_freq[cpu] = dbs_info->cur_policy->cur; + for_each_online_cpu(j) { + dbs_info = &per_cpu(cpu_dbs_info, j); + requested_freq[j] = dbs_info->cur_policy->cur; } init_flag = 1; } diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c index b66782398258..4f4ba9b6d182 100644 --- a/drivers/firmware/dell_rbu.c +++ b/drivers/firmware/dell_rbu.c @@ -50,7 +50,7 @@ MODULE_AUTHOR("Abhay Salunke "); MODULE_DESCRIPTION("Driver for updating BIOS image on DELL systems"); MODULE_LICENSE("GPL"); -MODULE_VERSION("2.0"); +MODULE_VERSION("3.0"); #define BIOS_SCAN_LIMIT 0xffffffff #define MAX_IMAGE_LENGTH 16 @@ -62,15 +62,16 @@ static struct _rbu_data { int dma_alloc; spinlock_t lock; unsigned long packet_read_count; - unsigned long packet_write_count; unsigned long num_packets; unsigned long packetsize; + unsigned long imagesize; int entry_created; } rbu_data; static char image_type[MAX_IMAGE_LENGTH + 1] = "mono"; module_param_string(image_type, image_type, sizeof (image_type), 0); -MODULE_PARM_DESC(image_type, "BIOS image type. choose- mono or packet"); +MODULE_PARM_DESC(image_type, + "BIOS image type. choose- mono or packet or init"); struct packet_data { struct list_head list; @@ -88,55 +89,13 @@ static dma_addr_t dell_rbu_dmaaddr; static void init_packet_head(void) { INIT_LIST_HEAD(&packet_data_head.list); - rbu_data.packet_write_count = 0; rbu_data.packet_read_count = 0; rbu_data.num_packets = 0; rbu_data.packetsize = 0; + rbu_data.imagesize = 0; } -static int fill_last_packet(void *data, size_t length) -{ - struct list_head *ptemp_list; - struct packet_data *packet = NULL; - int packet_count = 0; - - pr_debug("fill_last_packet: entry \n"); - - if (!rbu_data.num_packets) { - pr_debug("fill_last_packet: num_packets=0\n"); - return -ENOMEM; - } - - packet_count = rbu_data.num_packets; - - ptemp_list = (&packet_data_head.list)->prev; - - packet = list_entry(ptemp_list, struct packet_data, list); - - if ((rbu_data.packet_write_count + length) > rbu_data.packetsize) { - pr_debug("dell_rbu:%s: packet size data " - "overrun\n", __FUNCTION__); - return -EINVAL; - } - - pr_debug("fill_last_packet : buffer = %p\n", packet->data); - - memcpy((packet->data + rbu_data.packet_write_count), data, length); - - if ((rbu_data.packet_write_count + length) == rbu_data.packetsize) { - /* - * this was the last data chunk in the packet - * so reinitialize the packet data counter to zero - */ - rbu_data.packet_write_count = 0; - } else - rbu_data.packet_write_count += length; - - pr_debug("fill_last_packet: exit \n"); - return 0; -} - -static int create_packet(size_t length) +static int create_packet(void *data, size_t length) { struct packet_data *newpacket; int ordernum = 0; @@ -186,9 +145,11 @@ static int create_packet(size_t length) INIT_LIST_HEAD(&newpacket->list); list_add_tail(&newpacket->list, &packet_data_head.list); /* - * packets have fixed size + * packets may not have fixed size */ - newpacket->length = rbu_data.packetsize; + newpacket->length = length; + + memcpy(newpacket->data, data, length); pr_debug("create_packet: exit \n"); @@ -198,13 +159,37 @@ static int create_packet(size_t length) static int packetize_data(void *data, size_t length) { int rc = 0; - - if (!rbu_data.packet_write_count) { - if ((rc = create_packet(length))) - return rc; + int done = 0; + int packet_length; + u8 *temp; + u8 *end = (u8 *) data + length; + pr_debug("packetize_data: data length %d\n", length); + if (!rbu_data.packetsize) { + printk(KERN_WARNING + "dell_rbu: packetsize not specified\n"); + return -EIO; } - if ((rc = fill_last_packet(data, length))) - return rc; + + temp = (u8 *) data; + + /* packetize the hunk */ + while (!done) { + if ((temp + rbu_data.packetsize) < end) + packet_length = rbu_data.packetsize; + else { + /* this is the last packet */ + packet_length = end - temp; + done = 1; + } + + if ((rc = create_packet(temp, packet_length))) + return rc; + + pr_debug("%lu:%lu\n", temp, (end - temp)); + temp += packet_length; + } + + rbu_data.imagesize = length; return rc; } @@ -243,7 +228,7 @@ static int do_packet_read(char *data, struct list_head *ptemp_list, return bytes_copied; } -static int packet_read_list(char *data, size_t *pread_length) +static int packet_read_list(char *data, size_t * pread_length) { struct list_head *ptemp_list; int temp_count = 0; @@ -303,10 +288,9 @@ static void packet_empty_list(void) newpacket->ordernum); kfree(newpacket); } - rbu_data.packet_write_count = 0; rbu_data.packet_read_count = 0; rbu_data.num_packets = 0; - rbu_data.packetsize = 0; + rbu_data.imagesize = 0; } /* @@ -425,7 +409,6 @@ static ssize_t read_packet_data(char *buffer, loff_t pos, size_t count) size_t bytes_left; size_t data_length; char *ptempBuf = buffer; - unsigned long imagesize; /* check to see if we have something to return */ if (rbu_data.num_packets == 0) { @@ -434,22 +417,20 @@ static ssize_t read_packet_data(char *buffer, loff_t pos, size_t count) goto read_rbu_data_exit; } - imagesize = rbu_data.num_packets * rbu_data.packetsize; - - if (pos > imagesize) { + if (pos > rbu_data.imagesize) { retval = 0; printk(KERN_WARNING "dell_rbu:read_packet_data: " "data underrun\n"); goto read_rbu_data_exit; } - bytes_left = imagesize - pos; + bytes_left = rbu_data.imagesize - pos; data_length = min(bytes_left, count); if ((retval = packet_read_list(ptempBuf, &data_length)) < 0) goto read_rbu_data_exit; - if ((pos + count) > imagesize) { + if ((pos + count) > rbu_data.imagesize) { rbu_data.packet_read_count = 0; /* this was the last copy */ retval = bytes_left; @@ -499,7 +480,7 @@ static ssize_t read_rbu_mono_data(char *buffer, loff_t pos, size_t count) } static ssize_t read_rbu_data(struct kobject *kobj, char *buffer, - loff_t pos, size_t count) + loff_t pos, size_t count) { ssize_t ret_count = 0; @@ -531,13 +512,18 @@ static void callbackfn_rbu(const struct firmware *fw, void *context) memcpy(rbu_data.image_update_buffer, fw->data, fw->size); } else if (!strcmp(image_type, "packet")) { - if (!rbu_data.packetsize) - rbu_data.packetsize = fw->size; - else if (rbu_data.packetsize != fw->size) { + /* + * we need to free previous packets if a + * new hunk of packets needs to be downloaded + */ + packet_empty_list(); + if (packetize_data(fw->data, fw->size)) + /* Incase something goes wrong when we are + * in middle of packetizing the data, we + * need to free up whatever packets might + * have been created before we quit. + */ packet_empty_list(); - rbu_data.packetsize = fw->size; - } - packetize_data(fw->data, fw->size); } else pr_debug("invalid image type specified.\n"); spin_unlock(&rbu_data.lock); @@ -553,7 +539,7 @@ static void callbackfn_rbu(const struct firmware *fw, void *context) } static ssize_t read_rbu_image_type(struct kobject *kobj, char *buffer, - loff_t pos, size_t count) + loff_t pos, size_t count) { int size = 0; if (!pos) @@ -562,7 +548,7 @@ static ssize_t read_rbu_image_type(struct kobject *kobj, char *buffer, } static ssize_t write_rbu_image_type(struct kobject *kobj, char *buffer, - loff_t pos, size_t count) + loff_t pos, size_t count) { int rc = count; int req_firm_rc = 0; @@ -621,25 +607,49 @@ static ssize_t write_rbu_image_type(struct kobject *kobj, char *buffer, return rc; } +static ssize_t read_rbu_packet_size(struct kobject *kobj, char *buffer, + loff_t pos, size_t count) +{ + int size = 0; + if (!pos) { + spin_lock(&rbu_data.lock); + size = sprintf(buffer, "%lu\n", rbu_data.packetsize); + spin_unlock(&rbu_data.lock); + } + return size; +} + +static ssize_t write_rbu_packet_size(struct kobject *kobj, char *buffer, + loff_t pos, size_t count) +{ + unsigned long temp; + spin_lock(&rbu_data.lock); + packet_empty_list(); + sscanf(buffer, "%lu", &temp); + if (temp < 0xffffffff) + rbu_data.packetsize = temp; + + spin_unlock(&rbu_data.lock); + return count; +} + static struct bin_attribute rbu_data_attr = { - .attr = { - .name = "data", - .owner = THIS_MODULE, - .mode = 0444, - }, + .attr = {.name = "data",.owner = THIS_MODULE,.mode = 0444}, .read = read_rbu_data, }; static struct bin_attribute rbu_image_type_attr = { - .attr = { - .name = "image_type", - .owner = THIS_MODULE, - .mode = 0644, - }, + .attr = {.name = "image_type",.owner = THIS_MODULE,.mode = 0644}, .read = read_rbu_image_type, .write = write_rbu_image_type, }; +static struct bin_attribute rbu_packet_size_attr = { + .attr = {.name = "packet_size",.owner = THIS_MODULE,.mode = 0644}, + .read = read_rbu_packet_size, + .write = write_rbu_packet_size, +}; + static int __init dcdrbu_init(void) { int rc = 0; @@ -657,6 +667,8 @@ static int __init dcdrbu_init(void) sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_data_attr); sysfs_create_bin_file(&rbu_device->dev.kobj, &rbu_image_type_attr); + sysfs_create_bin_file(&rbu_device->dev.kobj, + &rbu_packet_size_attr); rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, "dell_rbu", &rbu_device->dev, &context, callbackfn_rbu); diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 7e72e922b41c..db358cfa7cbf 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -418,12 +418,11 @@ config SENSORS_HDAPS help This driver provides support for the IBM Hard Drive Active Protection System (hdaps), which provides an accelerometer and other misc. data. - Supported laptops include the IBM ThinkPad T41, T42, T43, and R51. - The accelerometer data is readable via sysfs. + ThinkPads starting with the R50, T41, and X40 are supported. The + accelerometer data is readable via sysfs. - This driver also provides an input class device, allowing the - laptop to act as a pinball machine-esque mouse. This is off by - default but enabled via sysfs or the module parameter "mousedev". + This driver also provides an absolute input class device, allowing + the laptop to act as a pinball machine-esque joystick. Say Y here if you have an applicable laptop and want to experience the awesome power of hdaps. diff --git a/drivers/hwmon/hdaps.c b/drivers/hwmon/hdaps.c index 4c56411f3993..7f0107613827 100644 --- a/drivers/hwmon/hdaps.c +++ b/drivers/hwmon/hdaps.c @@ -4,9 +4,9 @@ * Copyright (C) 2005 Robert Love * Copyright (C) 2005 Jesper Juhl * - * The HardDisk Active Protection System (hdaps) is present in the IBM ThinkPad - * T41, T42, T43, R50, R50p, R51, and X40, at least. It provides a basic - * two-axis accelerometer and other data, such as the device's temperature. + * The HardDisk Active Protection System (hdaps) is present in IBM ThinkPads + * starting with the R40, T41, and X40. It provides a basic two-axis + * accelerometer and other data, such as the device's temperature. * * This driver is based on the document by Mark A. Smith available at * http://www.almaden.ibm.com/cs/people/marksmith/tpaps.html and a lot of trial @@ -487,24 +487,19 @@ static struct attribute_group hdaps_attribute_group = { /* Module stuff */ -/* - * XXX: We should be able to return nonzero and halt the detection process. - * But there is a bug in dmi_check_system() where a nonzero return from the - * first match will result in a return of failure from dmi_check_system(). - * I fixed this; the patch is 2.6-git. Once in a released tree, we can make - * hdaps_dmi_match_invert() return hdaps_dmi_match(), which in turn returns 1. - */ +/* hdaps_dmi_match - found a match. return one, short-circuiting the hunt. */ static int hdaps_dmi_match(struct dmi_system_id *id) { printk(KERN_INFO "hdaps: %s detected.\n", id->ident); - return 0; + return 1; } +/* hdaps_dmi_match_invert - found an inverted match. */ static int hdaps_dmi_match_invert(struct dmi_system_id *id) { hdaps_invert = 1; printk(KERN_INFO "hdaps: inverting axis readings.\n"); - return 0; + return hdaps_dmi_match(id); } #define HDAPS_DMI_MATCH_NORMAL(model) { \ @@ -534,6 +529,7 @@ static int __init hdaps_init(void) HDAPS_DMI_MATCH_INVERT("ThinkPad R50p"), HDAPS_DMI_MATCH_NORMAL("ThinkPad R50"), HDAPS_DMI_MATCH_NORMAL("ThinkPad R51"), + HDAPS_DMI_MATCH_NORMAL("ThinkPad R52"), HDAPS_DMI_MATCH_INVERT("ThinkPad T41p"), HDAPS_DMI_MATCH_NORMAL("ThinkPad T41"), HDAPS_DMI_MATCH_INVERT("ThinkPad T42p"), @@ -541,6 +537,7 @@ static int __init hdaps_init(void) HDAPS_DMI_MATCH_NORMAL("ThinkPad T43"), HDAPS_DMI_MATCH_NORMAL("ThinkPad X40"), HDAPS_DMI_MATCH_NORMAL("ThinkPad X41 Tablet"), + HDAPS_DMI_MATCH_NORMAL("ThinkPad X41"), { .ident = NULL } }; diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 8334496a7e0a..3badfec75b1c 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -245,6 +245,18 @@ config I2C_KEYWEST This support is also available as a module. If so, the module will be called i2c-keywest. +config I2C_PMAC_SMU + tristate "Powermac SMU I2C interface" + depends on I2C && PMAC_SMU + help + This supports the use of the I2C interface in the SMU + chip on recent Apple machines like the iMac G5. It is used + among others by the thermal control driver for those machines. + Say Y if you have such a machine. + + This support is also available as a module. If so, the module + will be called i2c-pmac-smu. + config I2C_MPC tristate "MPC107/824x/85xx/52xx" depends on I2C && PPC32 diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 980b3e983670..f1df00f66c6c 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_I2C_ITE) += i2c-ite.o obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o obj-$(CONFIG_I2C_IXP4XX) += i2c-ixp4xx.o obj-$(CONFIG_I2C_KEYWEST) += i2c-keywest.o +obj-$(CONFIG_I2C_PMAC_SMU) += i2c-pmac-smu.o obj-$(CONFIG_I2C_MPC) += i2c-mpc.o obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o diff --git a/drivers/i2c/busses/i2c-pmac-smu.c b/drivers/i2c/busses/i2c-pmac-smu.c new file mode 100644 index 000000000000..8a9f5648a23d --- /dev/null +++ b/drivers/i2c/busses/i2c-pmac-smu.c @@ -0,0 +1,316 @@ +/* + i2c Support for Apple SMU Controller + + Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp. + + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int probe; + +MODULE_AUTHOR("Benjamin Herrenschmidt "); +MODULE_DESCRIPTION("I2C driver for Apple's SMU"); +MODULE_LICENSE("GPL"); +module_param(probe, bool, 0); + + +/* Physical interface */ +struct smu_iface +{ + struct i2c_adapter adapter; + struct completion complete; + u32 busid; +}; + +static void smu_i2c_done(struct smu_i2c_cmd *cmd, void *misc) +{ + struct smu_iface *iface = misc; + complete(&iface->complete); +} + +/* + * SMBUS-type transfer entrypoint + */ +static s32 smu_smbus_xfer( struct i2c_adapter* adap, + u16 addr, + unsigned short flags, + char read_write, + u8 command, + int size, + union i2c_smbus_data* data) +{ + struct smu_iface *iface = i2c_get_adapdata(adap); + struct smu_i2c_cmd cmd; + int rc = 0; + int read = (read_write == I2C_SMBUS_READ); + + cmd.info.bus = iface->busid; + cmd.info.devaddr = (addr << 1) | (read ? 0x01 : 0x00); + + /* Prepare datas & select mode */ + switch (size) { + case I2C_SMBUS_QUICK: + cmd.info.type = SMU_I2C_TRANSFER_SIMPLE; + cmd.info.datalen = 0; + break; + case I2C_SMBUS_BYTE: + cmd.info.type = SMU_I2C_TRANSFER_SIMPLE; + cmd.info.datalen = 1; + if (!read) + cmd.info.data[0] = data->byte; + break; + case I2C_SMBUS_BYTE_DATA: + cmd.info.type = SMU_I2C_TRANSFER_STDSUB; + cmd.info.datalen = 1; + cmd.info.sublen = 1; + cmd.info.subaddr[0] = command; + cmd.info.subaddr[1] = 0; + cmd.info.subaddr[2] = 0; + if (!read) + cmd.info.data[0] = data->byte; + break; + case I2C_SMBUS_WORD_DATA: + cmd.info.type = SMU_I2C_TRANSFER_STDSUB; + cmd.info.datalen = 2; + cmd.info.sublen = 1; + cmd.info.subaddr[0] = command; + cmd.info.subaddr[1] = 0; + cmd.info.subaddr[2] = 0; + if (!read) { + cmd.info.data[0] = data->byte & 0xff; + cmd.info.data[1] = (data->byte >> 8) & 0xff; + } + break; + /* Note that these are broken vs. the expected smbus API where + * on reads, the lenght is actually returned from the function, + * but I think the current API makes no sense and I don't want + * any driver that I haven't verified for correctness to go + * anywhere near a pmac i2c bus anyway ... + */ + case I2C_SMBUS_BLOCK_DATA: + cmd.info.type = SMU_I2C_TRANSFER_STDSUB; + cmd.info.datalen = data->block[0] + 1; + if (cmd.info.datalen > 6) + return -EINVAL; + if (!read) + memcpy(cmd.info.data, data->block, cmd.info.datalen); + cmd.info.sublen = 1; + cmd.info.subaddr[0] = command; + cmd.info.subaddr[1] = 0; + cmd.info.subaddr[2] = 0; + break; + case I2C_SMBUS_I2C_BLOCK_DATA: + cmd.info.type = SMU_I2C_TRANSFER_STDSUB; + cmd.info.datalen = data->block[0]; + if (cmd.info.datalen > 7) + return -EINVAL; + if (!read) + memcpy(cmd.info.data, &data->block[1], + cmd.info.datalen); + cmd.info.sublen = 1; + cmd.info.subaddr[0] = command; + cmd.info.subaddr[1] = 0; + cmd.info.subaddr[2] = 0; + break; + + default: + return -EINVAL; + } + + /* Turn a standardsub read into a combined mode access */ + if (read_write == I2C_SMBUS_READ && + cmd.info.type == SMU_I2C_TRANSFER_STDSUB) + cmd.info.type = SMU_I2C_TRANSFER_COMBINED; + + /* Finish filling command and submit it */ + cmd.done = smu_i2c_done; + cmd.misc = iface; + rc = smu_queue_i2c(&cmd); + if (rc < 0) + return rc; + wait_for_completion(&iface->complete); + rc = cmd.status; + + if (!read || rc < 0) + return rc; + + switch (size) { + case I2C_SMBUS_BYTE: + case I2C_SMBUS_BYTE_DATA: + data->byte = cmd.info.data[0]; + break; + case I2C_SMBUS_WORD_DATA: + data->word = ((u16)cmd.info.data[1]) << 8; + data->word |= cmd.info.data[0]; + break; + /* Note that these are broken vs. the expected smbus API where + * on reads, the lenght is actually returned from the function, + * but I think the current API makes no sense and I don't want + * any driver that I haven't verified for correctness to go + * anywhere near a pmac i2c bus anyway ... + */ + case I2C_SMBUS_BLOCK_DATA: + case I2C_SMBUS_I2C_BLOCK_DATA: + memcpy(&data->block[0], cmd.info.data, cmd.info.datalen); + break; + } + + return rc; +} + +static u32 +smu_smbus_func(struct i2c_adapter * adapter) +{ + return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | + I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | + I2C_FUNC_SMBUS_BLOCK_DATA; +} + +/* For now, we only handle combined mode (smbus) */ +static struct i2c_algorithm smu_algorithm = { + .smbus_xfer = smu_smbus_xfer, + .functionality = smu_smbus_func, +}; + +static int create_iface(struct device_node *np, struct device *dev) +{ + struct smu_iface* iface; + u32 *reg, busid; + int rc; + + reg = (u32 *)get_property(np, "reg", NULL); + if (reg == NULL) { + printk(KERN_ERR "i2c-pmac-smu: can't find bus number !\n"); + return -ENXIO; + } + busid = *reg; + + iface = kmalloc(sizeof(struct smu_iface), GFP_KERNEL); + if (iface == NULL) { + printk(KERN_ERR "i2c-pmac-smu: can't allocate inteface !\n"); + return -ENOMEM; + } + memset(iface, 0, sizeof(struct smu_iface)); + init_completion(&iface->complete); + iface->busid = busid; + + dev_set_drvdata(dev, iface); + + sprintf(iface->adapter.name, "smu-i2c-%02x", busid); + iface->adapter.algo = &smu_algorithm; + iface->adapter.algo_data = NULL; + iface->adapter.client_register = NULL; + iface->adapter.client_unregister = NULL; + i2c_set_adapdata(&iface->adapter, iface); + iface->adapter.dev.parent = dev; + + rc = i2c_add_adapter(&iface->adapter); + if (rc) { + printk(KERN_ERR "i2c-pamc-smu.c: Adapter %s registration " + "failed\n", iface->adapter.name); + i2c_set_adapdata(&iface->adapter, NULL); + } + + if (probe) { + unsigned char addr; + printk("Probe: "); + for (addr = 0x00; addr <= 0x7f; addr++) { + if (i2c_smbus_xfer(&iface->adapter,addr, + 0,0,0,I2C_SMBUS_QUICK,NULL) >= 0) + printk("%02x ", addr); + } + printk("\n"); + } + + printk(KERN_INFO "SMU i2c bus %x registered\n", busid); + + return 0; +} + +static int dispose_iface(struct device *dev) +{ + struct smu_iface *iface = dev_get_drvdata(dev); + int rc; + + rc = i2c_del_adapter(&iface->adapter); + i2c_set_adapdata(&iface->adapter, NULL); + /* We aren't that prepared to deal with this... */ + if (rc) + printk("i2c-pmac-smu.c: Failed to remove bus %s !\n", + iface->adapter.name); + dev_set_drvdata(dev, NULL); + kfree(iface); + + return 0; +} + + +static int create_iface_of_platform(struct of_device* dev, + const struct of_device_id *match) +{ + return create_iface(dev->node, &dev->dev); +} + + +static int dispose_iface_of_platform(struct of_device* dev) +{ + return dispose_iface(&dev->dev); +} + + +static struct of_device_id i2c_smu_match[] = +{ + { + .compatible = "smu-i2c", + }, + {}, +}; +static struct of_platform_driver i2c_smu_of_platform_driver = +{ + .name = "i2c-smu", + .match_table = i2c_smu_match, + .probe = create_iface_of_platform, + .remove = dispose_iface_of_platform +}; + + +static int __init i2c_pmac_smu_init(void) +{ + of_register_driver(&i2c_smu_of_platform_driver); + return 0; +} + + +static void __exit i2c_pmac_smu_cleanup(void) +{ + of_unregister_driver(&i2c_smu_of_platform_driver); +} + +module_init(i2c_pmac_smu_init); +module_exit(i2c_pmac_smu_cleanup); diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 9e9cf1407311..5275cbb1afe9 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -1101,6 +1101,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) ide_hwif_t *hwif; struct request *rq; ide_startstop_t startstop; + int loops = 0; /* for atari only: POSSIBLY BROKEN HERE(?) */ ide_get_lock(ide_intr, hwgroup); @@ -1153,6 +1154,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) /* no more work for this hwgroup (for now) */ return; } + again: hwif = HWIF(drive); if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif && @@ -1192,8 +1194,14 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) * though. I hope that doesn't happen too much, hopefully not * unless the subdriver triggers such a thing in its own PM * state machine. + * + * We count how many times we loop here to make sure we service + * all drives in the hwgroup without looping for ever */ if (drive->blocked && !blk_pm_request(rq) && !(rq->flags & REQ_PREEMPT)) { + drive = drive->next ? drive->next : hwgroup->drive; + if (loops++ < 4 && !blk_queue_plugged(drive->queue)) + goto again; /* We clear busy, there should be no pending ATA command at this point. */ hwgroup->busy = 0; break; diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c index 0ccf85fcee34..a35a58bef1a4 100644 --- a/drivers/ide/legacy/ide-cs.c +++ b/drivers/ide/legacy/ide-cs.c @@ -477,7 +477,7 @@ static struct pcmcia_device_id ide_ids[] = { PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674), PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b), - PCMCIA_DEVICE_PROD_ID12(" ", "NinjaATA-", 0x3b6e20c8, 0xebe0bd79), + PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591), PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728), PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1), diff --git a/drivers/ieee1394/amdtp.c b/drivers/ieee1394/amdtp.c index 84ae027b021a..e8e28569a668 100644 --- a/drivers/ieee1394/amdtp.c +++ b/drivers/ieee1394/amdtp.c @@ -1297,4 +1297,3 @@ static void __exit amdtp_exit_module (void) module_init(amdtp_init_module); module_exit(amdtp_exit_module); -MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_AMDTP * 16); diff --git a/drivers/ieee1394/csr1212.h b/drivers/ieee1394/csr1212.h index e6734263a1d3..28c5f4b726e2 100644 --- a/drivers/ieee1394/csr1212.h +++ b/drivers/ieee1394/csr1212.h @@ -37,7 +37,6 @@ #include #include #include -#include #include #include diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c index 4538b0235ca3..e34730c7a874 100644 --- a/drivers/ieee1394/dv1394.c +++ b/drivers/ieee1394/dv1394.c @@ -2660,4 +2660,3 @@ static int __init dv1394_init_module(void) module_init(dv1394_init_module); module_exit(dv1394_exit_module); -MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16); diff --git a/drivers/ieee1394/eth1394.c b/drivers/ieee1394/eth1394.c index cd53c174ced1..c9e92d85c893 100644 --- a/drivers/ieee1394/eth1394.c +++ b/drivers/ieee1394/eth1394.c @@ -89,7 +89,7 @@ #define TRACE() printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__) static char version[] __devinitdata = - "$Rev: 1264 $ Ben Collins "; + "$Rev: 1312 $ Ben Collins "; struct fragment_info { struct list_head list; @@ -221,9 +221,7 @@ static int ether1394_open (struct net_device *dev) if (priv->bc_state == ETHER1394_BC_ERROR) { /* we'll try again */ priv->iso = hpsb_iso_recv_init(priv->host, - ETHER1394_GASP_BUFFERS * 2 * - (1 << (priv->host->csr.max_rec + - 1)), + ETHER1394_ISO_BUF_SIZE, ETHER1394_GASP_BUFFERS, priv->broadcast_channel, HPSB_ISO_DMA_PACKET_PER_BUFFER, @@ -635,8 +633,8 @@ static void ether1394_add_host (struct hpsb_host *host) * be checked when the eth device is opened. */ priv->broadcast_channel = host->csr.broadcast_channel & 0x3f; - priv->iso = hpsb_iso_recv_init(host, (ETHER1394_GASP_BUFFERS * 2 * - (1 << (host->csr.max_rec + 1))), + priv->iso = hpsb_iso_recv_init(host, + ETHER1394_ISO_BUF_SIZE, ETHER1394_GASP_BUFFERS, priv->broadcast_channel, HPSB_ISO_DMA_PACKET_PER_BUFFER, @@ -1632,7 +1630,7 @@ static void ether1394_complete_cb(void *__ptask) /* Transmit a packet (called by kernel) */ static int ether1394_tx (struct sk_buff *skb, struct net_device *dev) { - int kmflags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; + gfp_t kmflags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; struct eth1394hdr *eth; struct eth1394_priv *priv = netdev_priv(dev); int proto; @@ -1770,7 +1768,7 @@ fail: static void ether1394_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy (info->driver, driver_name); - strcpy (info->version, "$Rev: 1264 $"); + strcpy (info->version, "$Rev: 1312 $"); /* FIXME XXX provide sane businfo */ strcpy (info->bus_info, "ieee1394"); } diff --git a/drivers/ieee1394/eth1394.h b/drivers/ieee1394/eth1394.h index ed8f1c4b7fd8..a77213cfc483 100644 --- a/drivers/ieee1394/eth1394.h +++ b/drivers/ieee1394/eth1394.h @@ -44,6 +44,12 @@ #define ETHER1394_GASP_BUFFERS 16 +/* rawiso buffer size - due to a limitation in rawiso, we must limit each + * GASP buffer to be less than PAGE_SIZE. */ +#define ETHER1394_ISO_BUF_SIZE ETHER1394_GASP_BUFFERS * \ + min((unsigned int)PAGE_SIZE, \ + 2 * (1U << (priv->host->csr.max_rec + 1))) + /* Node set == 64 */ #define NODE_SET (ALL_NODES + 1) diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c index c502c6e9c440..aeeaeb670d03 100644 --- a/drivers/ieee1394/hosts.c +++ b/drivers/ieee1394/hosts.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "csr1212.h" #include "ieee1394.h" @@ -217,7 +218,7 @@ int hpsb_update_config_rom_image(struct hpsb_host *host) /* IEEE 1394a-2000 prohibits using the same generation number * twice in a 60 second period. */ - if (jiffies - host->csr.gen_timestamp[next_gen] < 60 * HZ) + if (time_before(jiffies, host->csr.gen_timestamp[next_gen] + 60 * HZ)) /* Wait 60 seconds from the last time this generation number was * used. */ reset_delay = (60 * HZ) + host->csr.gen_timestamp[next_gen] - jiffies; diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h index 739e76840d51..38f42112dff0 100644 --- a/drivers/ieee1394/hosts.h +++ b/drivers/ieee1394/hosts.h @@ -135,17 +135,17 @@ enum isoctl_cmd { enum reset_types { /* 166 microsecond reset -- only type of reset available on - non-1394a capable IEEE 1394 controllers */ + non-1394a capable controllers */ LONG_RESET, /* Short (arbitrated) reset -- only available on 1394a capable - IEEE 1394 capable controllers */ + controllers */ SHORT_RESET, - /* Variants, that set force_root before issueing the bus reset */ + /* Variants that set force_root before issueing the bus reset */ LONG_RESET_FORCE_ROOT, SHORT_RESET_FORCE_ROOT, - /* Variants, that clear force_root before issueing the bus reset */ + /* Variants that clear force_root before issueing the bus reset */ LONG_RESET_NO_FORCE_ROOT, SHORT_RESET_NO_FORCE_ROOT }; diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c index d633770fac8e..32a1e016c85e 100644 --- a/drivers/ieee1394/ieee1394_core.c +++ b/drivers/ieee1394/ieee1394_core.c @@ -70,7 +70,7 @@ const char *hpsb_speedto_str[] = { "S100", "S200", "S400", "S800", "S1600", "S32 struct class *hpsb_protocol_class; #ifdef CONFIG_IEEE1394_VERBOSEDEBUG -static void dump_packet(const char *text, quadlet_t *data, int size) +static void dump_packet(const char *text, quadlet_t *data, int size, int speed) { int i; @@ -78,12 +78,15 @@ static void dump_packet(const char *text, quadlet_t *data, int size) size = (size > 4 ? 4 : size); printk(KERN_DEBUG "ieee1394: %s", text); + if (speed > -1 && speed < 6) + printk(" at %s", hpsb_speedto_str[speed]); + printk(":"); for (i = 0; i < size; i++) printk(" %08x", data[i]); printk("\n"); } #else -#define dump_packet(x,y,z) +#define dump_packet(a,b,c,d) #endif static void abort_requests(struct hpsb_host *host); @@ -544,8 +547,7 @@ int hpsb_send_packet(struct hpsb_packet *packet) if (packet->data_size) memcpy(((u8*)data) + packet->header_size, packet->data, packet->data_size); - dump_packet("send packet local:", packet->header, - packet->header_size); + dump_packet("send packet local", packet->header, packet->header_size, -1); hpsb_packet_sent(host, packet, packet->expect_response ? ACK_PENDING : ACK_COMPLETE); hpsb_packet_received(host, data, size, 0); @@ -561,21 +563,7 @@ int hpsb_send_packet(struct hpsb_packet *packet) + NODEID_TO_NODE(packet->node_id)]; } -#ifdef CONFIG_IEEE1394_VERBOSEDEBUG - switch (packet->speed_code) { - case 2: - dump_packet("send packet 400:", packet->header, - packet->header_size); - break; - case 1: - dump_packet("send packet 200:", packet->header, - packet->header_size); - break; - default: - dump_packet("send packet 100:", packet->header, - packet->header_size); - } -#endif + dump_packet("send packet", packet->header, packet->header_size, packet->speed_code); return host->driver->transmit_packet(host, packet); } @@ -636,7 +624,7 @@ static void handle_packet_response(struct hpsb_host *host, int tcode, if (packet == NULL) { HPSB_DEBUG("unsolicited response packet received - no tlabel match"); - dump_packet("contents:", data, 16); + dump_packet("contents", data, 16, -1); spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); return; } @@ -677,7 +665,7 @@ static void handle_packet_response(struct hpsb_host *host, int tcode, if (!tcode_match) { spin_unlock_irqrestore(&host->pending_packet_queue.lock, flags); HPSB_INFO("unsolicited response packet received - tcode mismatch"); - dump_packet("contents:", data, 16); + dump_packet("contents", data, 16, -1); return; } @@ -914,7 +902,7 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size, return; } - dump_packet("received packet:", data, size); + dump_packet("received packet", data, size, -1); tcode = (data[0] >> 4) & 0xf; diff --git a/drivers/ieee1394/nodemgr.c b/drivers/ieee1394/nodemgr.c index b23322523ef5..347ece6b583c 100644 --- a/drivers/ieee1394/nodemgr.c +++ b/drivers/ieee1394/nodemgr.c @@ -64,10 +64,10 @@ static int nodemgr_bus_read(struct csr1212_csr *csr, u64 addr, u16 length, struct nodemgr_csr_info *ci = (struct nodemgr_csr_info*)__ci; int i, ret = 0; - for (i = 0; i < 3; i++) { + for (i = 1; ; i++) { ret = hpsb_read(ci->host, ci->nodeid, ci->generation, addr, buffer, length); - if (!ret) + if (!ret || i == 3) break; if (msleep_interruptible(334)) @@ -1438,9 +1438,13 @@ static int nodemgr_do_irm_duties(struct hpsb_host *host, int cycles) if (host->busmgr_id == 0xffff && host->node_count > 1) { u16 root_node = host->node_count - 1; - struct node_entry *ne = find_entry_by_nodeid(host, root_node | LOCAL_BUS); - if (ne && ne->busopt.cmc) + /* get cycle master capability flag from root node */ + if (host->is_cycmst || + (!hpsb_read(host, LOCAL_BUS | root_node, get_hpsb_generation(host), + (CSR_REGISTER_BASE + CSR_CONFIG_ROM + 2 * sizeof(quadlet_t)), + &bc, sizeof(quadlet_t)) && + be32_to_cpu(bc) & 1 << CSR_CMC_SHIFT)) hpsb_send_phy_config(host, root_node, -1); else { HPSB_DEBUG("The root node is not cycle master capable; " @@ -1557,24 +1561,19 @@ static int nodemgr_host_thread(void *__hi) } } - if (!nodemgr_check_irm_capability(host, reset_cycles)) { + if (!nodemgr_check_irm_capability(host, reset_cycles) || + !nodemgr_do_irm_duties(host, reset_cycles)) { reset_cycles++; up(&nodemgr_serialize); continue; } + reset_cycles = 0; /* Scan our nodes to get the bus options and create node * entries. This does not do the sysfs stuff, since that * would trigger hotplug callbacks and such, which is a * bad idea at this point. */ nodemgr_node_scan(hi, generation); - if (!nodemgr_do_irm_duties(host, reset_cycles)) { - reset_cycles++; - up(&nodemgr_serialize); - continue; - } - - reset_cycles = 0; /* This actually does the full probe, with sysfs * registration. */ diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c index 27018c8efc24..4cf9b8f3e336 100644 --- a/drivers/ieee1394/ohci1394.c +++ b/drivers/ieee1394/ohci1394.c @@ -162,7 +162,7 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args) printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args) static char version[] __devinitdata = - "$Rev: 1299 $ Ben Collins "; + "$Rev: 1313 $ Ben Collins "; /* Module Parameters */ static int phys_dma = 1; @@ -1084,7 +1084,7 @@ static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg) initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1); if (printk_ratelimit()) - PRINT(KERN_ERR, "IR legacy activated"); + DBGMSG("IR legacy activated"); } spin_lock_irqsave(&ohci->IR_channel_lock, flags); @@ -2283,8 +2283,9 @@ static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci, { struct ohci1394_iso_tasklet *t; unsigned long mask; + unsigned long flags; - spin_lock(&ohci->iso_tasklet_list_lock); + spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags); list_for_each_entry(t, &ohci->iso_tasklet_list, link) { mask = 1 << t->context; @@ -2295,8 +2296,7 @@ static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci, tasklet_schedule(&t->tasklet); } - spin_unlock(&ohci->iso_tasklet_list_lock); - + spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags); } static irqreturn_t ohci_irq_handler(int irq, void *dev_id, diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c index b4fa14793fe5..0470f77a9cd1 100644 --- a/drivers/ieee1394/raw1394.c +++ b/drivers/ieee1394/raw1394.c @@ -98,7 +98,7 @@ static struct hpsb_address_ops arm_ops = { static void queue_complete_cb(struct pending_request *req); -static struct pending_request *__alloc_pending_request(unsigned int __nocast flags) +static struct pending_request *__alloc_pending_request(gfp_t flags) { struct pending_request *req; @@ -412,6 +412,7 @@ static void fcp_request(struct hpsb_host *host, int nodeid, int direction, static ssize_t raw1394_read(struct file *file, char __user * buffer, size_t count, loff_t * offset_is_ignored) { + unsigned long flags; struct file_info *fi = (struct file_info *)file->private_data; struct list_head *lh; struct pending_request *req; @@ -435,10 +436,10 @@ static ssize_t raw1394_read(struct file *file, char __user * buffer, } } - spin_lock_irq(&fi->reqlists_lock); + spin_lock_irqsave(&fi->reqlists_lock, flags); lh = fi->req_complete.next; list_del(lh); - spin_unlock_irq(&fi->reqlists_lock); + spin_unlock_irqrestore(&fi->reqlists_lock, flags); req = list_entry(lh, struct pending_request, list); @@ -486,6 +487,7 @@ static int state_opened(struct file_info *fi, struct pending_request *req) static int state_initialized(struct file_info *fi, struct pending_request *req) { + unsigned long flags; struct host_info *hi; struct raw1394_khost_list *khl; @@ -499,7 +501,7 @@ static int state_initialized(struct file_info *fi, struct pending_request *req) switch (req->req.type) { case RAW1394_REQ_LIST_CARDS: - spin_lock_irq(&host_info_lock); + spin_lock_irqsave(&host_info_lock, flags); khl = kmalloc(sizeof(struct raw1394_khost_list) * host_count, SLAB_ATOMIC); @@ -513,7 +515,7 @@ static int state_initialized(struct file_info *fi, struct pending_request *req) khl++; } } - spin_unlock_irq(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, flags); if (khl != NULL) { req->req.error = RAW1394_ERROR_NONE; @@ -528,7 +530,7 @@ static int state_initialized(struct file_info *fi, struct pending_request *req) break; case RAW1394_REQ_SET_CARD: - spin_lock_irq(&host_info_lock); + spin_lock_irqsave(&host_info_lock, flags); if (req->req.misc < host_count) { list_for_each_entry(hi, &host_info_list, list) { if (!req->req.misc--) @@ -550,7 +552,7 @@ static int state_initialized(struct file_info *fi, struct pending_request *req) } else { req->req.error = RAW1394_ERROR_INVALID_ARG; } - spin_unlock_irq(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, flags); req->req.length = 0; break; @@ -569,7 +571,6 @@ static void handle_iso_listen(struct file_info *fi, struct pending_request *req) { int channel = req->req.misc; - spin_lock_irq(&host_info_lock); if ((channel > 63) || (channel < -64)) { req->req.error = RAW1394_ERROR_INVALID_ARG; } else if (channel >= 0) { @@ -601,7 +602,6 @@ static void handle_iso_listen(struct file_info *fi, struct pending_request *req) req->req.length = 0; queue_complete_req(req); - spin_unlock_irq(&host_info_lock); } static void handle_fcp_listen(struct file_info *fi, struct pending_request *req) @@ -627,6 +627,7 @@ static void handle_fcp_listen(struct file_info *fi, struct pending_request *req) static int handle_async_request(struct file_info *fi, struct pending_request *req, int node) { + unsigned long flags; struct hpsb_packet *packet = NULL; u64 addr = req->req.address & 0xffffffffffffULL; @@ -761,9 +762,9 @@ static int handle_async_request(struct file_info *fi, hpsb_set_packet_complete_task(packet, (void (*)(void *))queue_complete_cb, req); - spin_lock_irq(&fi->reqlists_lock); + spin_lock_irqsave(&fi->reqlists_lock, flags); list_add_tail(&req->list, &fi->req_pending); - spin_unlock_irq(&fi->reqlists_lock); + spin_unlock_irqrestore(&fi->reqlists_lock, flags); packet->generation = req->req.generation; @@ -779,6 +780,7 @@ static int handle_async_request(struct file_info *fi, static int handle_iso_send(struct file_info *fi, struct pending_request *req, int channel) { + unsigned long flags; struct hpsb_packet *packet; packet = hpsb_make_isopacket(fi->host, req->req.length, channel & 0x3f, @@ -804,9 +806,9 @@ static int handle_iso_send(struct file_info *fi, struct pending_request *req, (void (*)(void *))queue_complete_req, req); - spin_lock_irq(&fi->reqlists_lock); + spin_lock_irqsave(&fi->reqlists_lock, flags); list_add_tail(&req->list, &fi->req_pending); - spin_unlock_irq(&fi->reqlists_lock); + spin_unlock_irqrestore(&fi->reqlists_lock, flags); /* Update the generation of the packet just before sending. */ packet->generation = req->req.generation; @@ -821,6 +823,7 @@ static int handle_iso_send(struct file_info *fi, struct pending_request *req, static int handle_async_send(struct file_info *fi, struct pending_request *req) { + unsigned long flags; struct hpsb_packet *packet; int header_length = req->req.misc & 0xffff; int expect_response = req->req.misc >> 16; @@ -867,9 +870,9 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req) hpsb_set_packet_complete_task(packet, (void (*)(void *))queue_complete_cb, req); - spin_lock_irq(&fi->reqlists_lock); + spin_lock_irqsave(&fi->reqlists_lock, flags); list_add_tail(&req->list, &fi->req_pending); - spin_unlock_irq(&fi->reqlists_lock); + spin_unlock_irqrestore(&fi->reqlists_lock, flags); /* Update the generation of the packet just before sending. */ packet->generation = req->req.generation; @@ -885,6 +888,7 @@ static int handle_async_send(struct file_info *fi, struct pending_request *req) static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer, u64 addr, size_t length, u16 flags) { + unsigned long irqflags; struct pending_request *req; struct host_info *hi; struct file_info *fi = NULL; @@ -899,7 +903,7 @@ static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer, "addr: %4.4x %8.8x length: %Zu", nodeid, (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF), length); - spin_lock(&host_info_lock); + spin_lock_irqsave(&host_info_lock, irqflags); hi = find_host_info(host); /* search address-entry */ if (hi != NULL) { list_for_each_entry(fi, &hi->file_info_list, list) { @@ -924,7 +928,7 @@ static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer, if (!found) { printk(KERN_ERR "raw1394: arm_read FAILED addr_entry not found" " -> rcode_address_error\n"); - spin_unlock(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, irqflags); return (RCODE_ADDRESS_ERROR); } else { DBGMSG("arm_read addr_entry FOUND"); @@ -954,7 +958,7 @@ static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer, req = __alloc_pending_request(SLAB_ATOMIC); if (!req) { DBGMSG("arm_read -> rcode_conflict_error"); - spin_unlock(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, irqflags); return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected. The request may be retried */ } @@ -974,7 +978,7 @@ static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer, if (!(req->data)) { free_pending_request(req); DBGMSG("arm_read -> rcode_conflict_error"); - spin_unlock(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, irqflags); return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected. The request may be retried */ } @@ -1031,13 +1035,14 @@ static int arm_read(struct hpsb_host *host, int nodeid, quadlet_t * buffer, sizeof(struct arm_request)); queue_complete_req(req); } - spin_unlock(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, irqflags); return (rcode); } static int arm_write(struct hpsb_host *host, int nodeid, int destid, quadlet_t * data, u64 addr, size_t length, u16 flags) { + unsigned long irqflags; struct pending_request *req; struct host_info *hi; struct file_info *fi = NULL; @@ -1052,7 +1057,7 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid, "addr: %4.4x %8.8x length: %Zu", nodeid, (u16) ((addr >> 32) & 0xFFFF), (u32) (addr & 0xFFFFFFFF), length); - spin_lock(&host_info_lock); + spin_lock_irqsave(&host_info_lock, irqflags); hi = find_host_info(host); /* search address-entry */ if (hi != NULL) { list_for_each_entry(fi, &hi->file_info_list, list) { @@ -1077,7 +1082,7 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid, if (!found) { printk(KERN_ERR "raw1394: arm_write FAILED addr_entry not found" " -> rcode_address_error\n"); - spin_unlock(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, irqflags); return (RCODE_ADDRESS_ERROR); } else { DBGMSG("arm_write addr_entry FOUND"); @@ -1106,7 +1111,7 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid, req = __alloc_pending_request(SLAB_ATOMIC); if (!req) { DBGMSG("arm_write -> rcode_conflict_error"); - spin_unlock(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, irqflags); return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected. The request my be retried */ } @@ -1118,7 +1123,7 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid, if (!(req->data)) { free_pending_request(req); DBGMSG("arm_write -> rcode_conflict_error"); - spin_unlock(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, irqflags); return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected. The request may be retried */ } @@ -1165,7 +1170,7 @@ static int arm_write(struct hpsb_host *host, int nodeid, int destid, sizeof(struct arm_request)); queue_complete_req(req); } - spin_unlock(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, irqflags); return (rcode); } @@ -1173,6 +1178,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store, u64 addr, quadlet_t data, quadlet_t arg, int ext_tcode, u16 flags) { + unsigned long irqflags; struct pending_request *req; struct host_info *hi; struct file_info *fi = NULL; @@ -1198,7 +1204,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store, (u32) (addr & 0xFFFFFFFF), ext_tcode & 0xFF, be32_to_cpu(data), be32_to_cpu(arg)); } - spin_lock(&host_info_lock); + spin_lock_irqsave(&host_info_lock, irqflags); hi = find_host_info(host); /* search address-entry */ if (hi != NULL) { list_for_each_entry(fi, &hi->file_info_list, list) { @@ -1224,7 +1230,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store, if (!found) { printk(KERN_ERR "raw1394: arm_lock FAILED addr_entry not found" " -> rcode_address_error\n"); - spin_unlock(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, irqflags); return (RCODE_ADDRESS_ERROR); } else { DBGMSG("arm_lock addr_entry FOUND"); @@ -1307,7 +1313,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store, req = __alloc_pending_request(SLAB_ATOMIC); if (!req) { DBGMSG("arm_lock -> rcode_conflict_error"); - spin_unlock(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, irqflags); return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected. The request may be retried */ } @@ -1316,7 +1322,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store, if (!(req->data)) { free_pending_request(req); DBGMSG("arm_lock -> rcode_conflict_error"); - spin_unlock(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, irqflags); return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected. The request may be retried */ } @@ -1382,7 +1388,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store, sizeof(struct arm_response) + 2 * sizeof(*store)); queue_complete_req(req); } - spin_unlock(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, irqflags); return (rcode); } @@ -1390,6 +1396,7 @@ static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, u64 addr, octlet_t data, octlet_t arg, int ext_tcode, u16 flags) { + unsigned long irqflags; struct pending_request *req; struct host_info *hi; struct file_info *fi = NULL; @@ -1422,7 +1429,7 @@ static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, (u32) ((be64_to_cpu(arg) >> 32) & 0xFFFFFFFF), (u32) (be64_to_cpu(arg) & 0xFFFFFFFF)); } - spin_lock(&host_info_lock); + spin_lock_irqsave(&host_info_lock, irqflags); hi = find_host_info(host); /* search addressentry in file_info's for host */ if (hi != NULL) { list_for_each_entry(fi, &hi->file_info_list, list) { @@ -1449,7 +1456,7 @@ static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, printk(KERN_ERR "raw1394: arm_lock64 FAILED addr_entry not found" " -> rcode_address_error\n"); - spin_unlock(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, irqflags); return (RCODE_ADDRESS_ERROR); } else { DBGMSG("arm_lock64 addr_entry FOUND"); @@ -1533,7 +1540,7 @@ static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, DBGMSG("arm_lock64 -> entering notification-section"); req = __alloc_pending_request(SLAB_ATOMIC); if (!req) { - spin_unlock(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, irqflags); DBGMSG("arm_lock64 -> rcode_conflict_error"); return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected. The request may be retried */ @@ -1542,7 +1549,7 @@ static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, req->data = kmalloc(size, SLAB_ATOMIC); if (!(req->data)) { free_pending_request(req); - spin_unlock(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, irqflags); DBGMSG("arm_lock64 -> rcode_conflict_error"); return (RCODE_CONFLICT_ERROR); /* A resource conflict was detected. The request may be retried */ @@ -1609,7 +1616,7 @@ static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store, sizeof(struct arm_response) + 2 * sizeof(*store)); queue_complete_req(req); } - spin_unlock(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, irqflags); return (rcode); } @@ -1980,6 +1987,7 @@ static int write_phypacket(struct file_info *fi, struct pending_request *req) struct hpsb_packet *packet = NULL; int retval = 0; quadlet_t data; + unsigned long flags; data = be32_to_cpu((u32) req->req.sendb); DBGMSG("write_phypacket called - quadlet 0x%8.8x ", data); @@ -1990,9 +1998,9 @@ static int write_phypacket(struct file_info *fi, struct pending_request *req) req->packet = packet; hpsb_set_packet_complete_task(packet, (void (*)(void *))queue_complete_cb, req); - spin_lock_irq(&fi->reqlists_lock); + spin_lock_irqsave(&fi->reqlists_lock, flags); list_add_tail(&req->list, &fi->req_pending); - spin_unlock_irq(&fi->reqlists_lock); + spin_unlock_irqrestore(&fi->reqlists_lock, flags); packet->generation = req->req.generation; retval = hpsb_send_packet(packet); DBGMSG("write_phypacket send_packet called => retval: %d ", retval); @@ -2659,14 +2667,15 @@ static unsigned int raw1394_poll(struct file *file, poll_table * pt) { struct file_info *fi = file->private_data; unsigned int mask = POLLOUT | POLLWRNORM; + unsigned long flags; poll_wait(file, &fi->poll_wait_complete, pt); - spin_lock_irq(&fi->reqlists_lock); + spin_lock_irqsave(&fi->reqlists_lock, flags); if (!list_empty(&fi->req_complete)) { mask |= POLLIN | POLLRDNORM; } - spin_unlock_irq(&fi->reqlists_lock); + spin_unlock_irqrestore(&fi->reqlists_lock, flags); return mask; } @@ -2710,6 +2719,7 @@ static int raw1394_release(struct inode *inode, struct file *file) struct arm_addr *arm_addr = NULL; int another_host; int csr_mod = 0; + unsigned long flags; if (fi->iso_state != RAW1394_ISO_INACTIVE) raw1394_iso_shutdown(fi); @@ -2720,13 +2730,11 @@ static int raw1394_release(struct inode *inode, struct file *file) } } - spin_lock_irq(&host_info_lock); + spin_lock_irqsave(&host_info_lock, flags); fi->listen_channels = 0; - spin_unlock_irq(&host_info_lock); fail = 0; /* set address-entries invalid */ - spin_lock_irq(&host_info_lock); while (!list_empty(&fi->addr_list)) { another_host = 0; @@ -2777,14 +2785,14 @@ static int raw1394_release(struct inode *inode, struct file *file) vfree(addr->addr_space_buffer); kfree(addr); } /* while */ - spin_unlock_irq(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, flags); if (fail > 0) { printk(KERN_ERR "raw1394: during addr_list-release " "error(s) occurred \n"); } while (!done) { - spin_lock_irq(&fi->reqlists_lock); + spin_lock_irqsave(&fi->reqlists_lock, flags); while (!list_empty(&fi->req_complete)) { lh = fi->req_complete.next; @@ -2798,7 +2806,7 @@ static int raw1394_release(struct inode *inode, struct file *file) if (list_empty(&fi->req_pending)) done = 1; - spin_unlock_irq(&fi->reqlists_lock); + spin_unlock_irqrestore(&fi->reqlists_lock, flags); if (!done) down_interruptible(&fi->complete_sem); @@ -2828,9 +2836,9 @@ static int raw1394_release(struct inode *inode, struct file *file) fi->host->id); if (fi->state == connected) { - spin_lock_irq(&host_info_lock); + spin_lock_irqsave(&host_info_lock, flags); list_del(&fi->list); - spin_unlock_irq(&host_info_lock); + spin_unlock_irqrestore(&host_info_lock, flags); put_device(&fi->host->device); } @@ -2958,4 +2966,3 @@ static void __exit cleanup_raw1394(void) module_init(init_raw1394); module_exit(cleanup_raw1394); MODULE_LICENSE("GPL"); -MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16); diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index de88218ef7cc..12cec7c4a342 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -97,16 +97,18 @@ static char version[] __devinitdata = */ static int max_speed = IEEE1394_SPEED_MAX; module_param(max_speed, int, 0644); -MODULE_PARM_DESC(max_speed, "Force max speed (3 = 800mb, 2 = 400mb default, 1 = 200mb, 0 = 100mb)"); +MODULE_PARM_DESC(max_speed, "Force max speed (3 = 800mb, 2 = 400mb, 1 = 200mb, 0 = 100mb)"); /* * Set serialize_io to 1 if you'd like only one scsi command sent * down to us at a time (debugging). This might be necessary for very * badly behaved sbp2 devices. + * + * TODO: Make this configurable per device. */ -static int serialize_io; +static int serialize_io = 1; module_param(serialize_io, int, 0444); -MODULE_PARM_DESC(serialize_io, "Serialize all I/O coming down from the scsi drivers (default = 0)"); +MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers (default = 1, faster = 0)"); /* * Bump up max_sectors if you'd like to support very large sized @@ -596,6 +598,14 @@ static void sbp2util_mark_command_completed(struct scsi_id_instance_data *scsi_i spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); } +/* + * Is scsi_id valid? Is the 1394 node still present? + */ +static inline int sbp2util_node_is_available(struct scsi_id_instance_data *scsi_id) +{ + return scsi_id && scsi_id->ne && !scsi_id->ne->in_limbo; +} + /********************************************* @@ -631,11 +641,23 @@ static int sbp2_remove(struct device *dev) { struct unit_directory *ud; struct scsi_id_instance_data *scsi_id; + struct scsi_device *sdev; SBP2_DEBUG("sbp2_remove"); ud = container_of(dev, struct unit_directory, device); scsi_id = ud->device.driver_data; + if (!scsi_id) + return 0; + + /* Trigger shutdown functions in scsi's highlevel. */ + if (scsi_id->scsi_host) + scsi_unblock_requests(scsi_id->scsi_host); + sdev = scsi_id->sdev; + if (sdev) { + scsi_id->sdev = NULL; + scsi_remove_device(sdev); + } sbp2_logout_device(scsi_id); sbp2_remove_device(scsi_id); @@ -2473,37 +2495,26 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt, struct scsi_id_instance_data *scsi_id = (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0]; struct sbp2scsi_host_info *hi; + int result = DID_NO_CONNECT << 16; SBP2_DEBUG("sbp2scsi_queuecommand"); - /* - * If scsi_id is null, it means there is no device in this slot, - * so we should return selection timeout. - */ - if (!scsi_id) { - SCpnt->result = DID_NO_CONNECT << 16; - done (SCpnt); - return 0; - } + if (!sbp2util_node_is_available(scsi_id)) + goto done; hi = scsi_id->hi; if (!hi) { SBP2_ERR("sbp2scsi_host_info is NULL - this is bad!"); - SCpnt->result = DID_NO_CONNECT << 16; - done (SCpnt); - return(0); + goto done; } /* * Until we handle multiple luns, just return selection time-out * to any IO directed at non-zero LUNs */ - if (SCpnt->device->lun) { - SCpnt->result = DID_NO_CONNECT << 16; - done (SCpnt); - return(0); - } + if (SCpnt->device->lun) + goto done; /* * Check for request sense command, and handle it here @@ -2514,7 +2525,7 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt, memcpy(SCpnt->request_buffer, SCpnt->sense_buffer, SCpnt->request_bufflen); memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer)); sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_GOOD, SCpnt, done); - return(0); + return 0; } /* @@ -2522,9 +2533,8 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt, */ if (!hpsb_node_entry_valid(scsi_id->ne)) { SBP2_ERR("Bus reset in progress - rejecting command"); - SCpnt->result = DID_BUS_BUSY << 16; - done (SCpnt); - return(0); + result = DID_BUS_BUSY << 16; + goto done; } /* @@ -2535,8 +2545,12 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt, sbp2scsi_complete_command(scsi_id, SBP2_SCSI_STATUS_SELECTION_TIMEOUT, SCpnt, done); } + return 0; - return(0); +done: + SCpnt->result = result; + done(SCpnt); + return 0; } /* @@ -2683,14 +2697,27 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id, } -static int sbp2scsi_slave_configure (struct scsi_device *sdev) +static int sbp2scsi_slave_alloc(struct scsi_device *sdev) +{ + ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = sdev; + return 0; +} + + +static int sbp2scsi_slave_configure(struct scsi_device *sdev) { blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); - return 0; } +static void sbp2scsi_slave_destroy(struct scsi_device *sdev) +{ + ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = NULL; + return; +} + + /* * Called by scsi stack when something has really gone wrong. Usually * called when a command has timed-out for some reason. @@ -2705,7 +2732,7 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt) SBP2_ERR("aborting sbp2 command"); scsi_print_command(SCpnt); - if (scsi_id) { + if (sbp2util_node_is_available(scsi_id)) { /* * Right now, just return any matching command structures @@ -2742,31 +2769,24 @@ static int sbp2scsi_abort(struct scsi_cmnd *SCpnt) /* * Called by scsi stack when something has really gone wrong. */ -static int __sbp2scsi_reset(struct scsi_cmnd *SCpnt) +static int sbp2scsi_reset(struct scsi_cmnd *SCpnt) { struct scsi_id_instance_data *scsi_id = (struct scsi_id_instance_data *)SCpnt->device->host->hostdata[0]; + unsigned long flags; SBP2_ERR("reset requested"); - if (scsi_id) { + spin_lock_irqsave(SCpnt->device->host->host_lock, flags); + + if (sbp2util_node_is_available(scsi_id)) { SBP2_ERR("Generating sbp2 fetch agent reset"); sbp2_agent_reset(scsi_id, 0); } - return(SUCCESS); -} - -static int sbp2scsi_reset(struct scsi_cmnd *SCpnt) -{ - unsigned long flags; - int rc; - - spin_lock_irqsave(SCpnt->device->host->host_lock, flags); - rc = __sbp2scsi_reset(SCpnt); spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags); - return rc; + return SUCCESS; } static const char *sbp2scsi_info (struct Scsi_Host *host) @@ -2817,7 +2837,9 @@ static struct scsi_host_template scsi_driver_template = { .eh_device_reset_handler = sbp2scsi_reset, .eh_bus_reset_handler = sbp2scsi_reset, .eh_host_reset_handler = sbp2scsi_reset, + .slave_alloc = sbp2scsi_slave_alloc, .slave_configure = sbp2scsi_slave_configure, + .slave_destroy = sbp2scsi_slave_destroy, .this_id = -1, .sg_tablesize = SG_ALL, .use_clustering = ENABLE_CLUSTERING, @@ -2837,7 +2859,8 @@ static int sbp2_module_init(void) /* Module load debug option to force one command at a time (serializing I/O) */ if (serialize_io) { - SBP2_ERR("Driver forced to serialize I/O (serialize_io = 1)"); + SBP2_INFO("Driver forced to serialize I/O (serialize_io=1)"); + SBP2_INFO("Try serialize_io=0 for better performance"); scsi_driver_template.can_queue = 1; scsi_driver_template.cmd_per_lun = 1; } diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c index 9d6facf2f78f..11be9c9c82a8 100644 --- a/drivers/ieee1394/video1394.c +++ b/drivers/ieee1394/video1394.c @@ -1571,4 +1571,3 @@ static int __init video1394_init_module (void) module_init(video1394_init_module); module_exit(video1394_exit_module); -MODULE_ALIAS_CHARDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index a4a4d9c1eef3..a14ca87fda18 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -783,7 +783,7 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, u32 remote_qpn, u16 pkey_index, struct ib_ah *ah, int rmpp_active, int hdr_len, int data_len, - unsigned int __nocast gfp_mask) + gfp_t gfp_mask) { struct ib_mad_agent_private *mad_agent_priv; struct ib_mad_send_buf *send_buf; diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 78de2dd1a4f2..262618210c1c 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -574,7 +574,7 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, struct ib_sa_path_rec *rec, ib_sa_comp_mask comp_mask, - int timeout_ms, unsigned int __nocast gfp_mask, + int timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct ib_sa_path_rec *resp, void *context), @@ -676,7 +676,7 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, struct ib_sa_service_rec *rec, ib_sa_comp_mask comp_mask, - int timeout_ms, unsigned int __nocast gfp_mask, + int timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct ib_sa_service_rec *resp, void *context), @@ -759,7 +759,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, u8 method, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, - int timeout_ms, unsigned int __nocast gfp_mask, + int timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct ib_sa_mcmember_rec *resp, void *context), diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index b1897bed14ad..cc124344dd2c 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -69,6 +69,7 @@ struct ib_uverbs_event_file { struct ib_uverbs_file { struct kref ref; + struct semaphore mutex; struct ib_uverbs_device *device; struct ib_ucontext *ucontext; struct ib_event_handler event_handler; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index e91ebde46481..562445165d2b 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -76,8 +76,9 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, struct ib_uverbs_get_context_resp resp; struct ib_udata udata; struct ib_device *ibdev = file->device->ib_dev; + struct ib_ucontext *ucontext; int i; - int ret = in_len; + int ret; if (out_len < sizeof resp) return -ENOSPC; @@ -85,45 +86,56 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; + down(&file->mutex); + + if (file->ucontext) { + ret = -EINVAL; + goto err; + } + INIT_UDATA(&udata, buf + sizeof cmd, (unsigned long) cmd.response + sizeof resp, in_len - sizeof cmd, out_len - sizeof resp); - file->ucontext = ibdev->alloc_ucontext(ibdev, &udata); - if (IS_ERR(file->ucontext)) { - ret = PTR_ERR(file->ucontext); - file->ucontext = NULL; - return ret; - } + ucontext = ibdev->alloc_ucontext(ibdev, &udata); + if (IS_ERR(ucontext)) + return PTR_ERR(file->ucontext); - file->ucontext->device = ibdev; - INIT_LIST_HEAD(&file->ucontext->pd_list); - INIT_LIST_HEAD(&file->ucontext->mr_list); - INIT_LIST_HEAD(&file->ucontext->mw_list); - INIT_LIST_HEAD(&file->ucontext->cq_list); - INIT_LIST_HEAD(&file->ucontext->qp_list); - INIT_LIST_HEAD(&file->ucontext->srq_list); - INIT_LIST_HEAD(&file->ucontext->ah_list); - spin_lock_init(&file->ucontext->lock); + ucontext->device = ibdev; + INIT_LIST_HEAD(&ucontext->pd_list); + INIT_LIST_HEAD(&ucontext->mr_list); + INIT_LIST_HEAD(&ucontext->mw_list); + INIT_LIST_HEAD(&ucontext->cq_list); + INIT_LIST_HEAD(&ucontext->qp_list); + INIT_LIST_HEAD(&ucontext->srq_list); + INIT_LIST_HEAD(&ucontext->ah_list); resp.async_fd = file->async_file.fd; for (i = 0; i < file->device->num_comp; ++i) if (copy_to_user((void __user *) (unsigned long) cmd.cq_fd_tab + i * sizeof (__u32), - &file->comp_file[i].fd, sizeof (__u32))) - goto err; + &file->comp_file[i].fd, sizeof (__u32))) { + ret = -EFAULT; + goto err_free; + } if (copy_to_user((void __user *) (unsigned long) cmd.response, - &resp, sizeof resp)) - goto err; + &resp, sizeof resp)) { + ret = -EFAULT; + goto err_free; + } + + file->ucontext = ucontext; + up(&file->mutex); return in_len; -err: - ibdev->dealloc_ucontext(file->ucontext); - file->ucontext = NULL; +err_free: + ibdev->dealloc_ucontext(ucontext); - return -EFAULT; +err: + up(&file->mutex); + return ret; } ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, @@ -352,9 +364,9 @@ retry: if (ret) goto err_pd; - spin_lock_irq(&file->ucontext->lock); + down(&file->mutex); list_add_tail(&uobj->list, &file->ucontext->pd_list); - spin_unlock_irq(&file->ucontext->lock); + up(&file->mutex); memset(&resp, 0, sizeof resp); resp.pd_handle = uobj->id; @@ -368,9 +380,9 @@ retry: return in_len; err_list: - spin_lock_irq(&file->ucontext->lock); + down(&file->mutex); list_del(&uobj->list); - spin_unlock_irq(&file->ucontext->lock); + up(&file->mutex); down(&ib_uverbs_idr_mutex); idr_remove(&ib_uverbs_pd_idr, uobj->id); @@ -410,9 +422,9 @@ ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle); - spin_lock_irq(&file->ucontext->lock); + down(&file->mutex); list_del(&uobj->list); - spin_unlock_irq(&file->ucontext->lock); + up(&file->mutex); kfree(uobj); @@ -512,9 +524,9 @@ retry: resp.mr_handle = obj->uobject.id; - spin_lock_irq(&file->ucontext->lock); + down(&file->mutex); list_add_tail(&obj->uobject.list, &file->ucontext->mr_list); - spin_unlock_irq(&file->ucontext->lock); + up(&file->mutex); if (copy_to_user((void __user *) (unsigned long) cmd.response, &resp, sizeof resp)) { @@ -527,9 +539,9 @@ retry: return in_len; err_list: - spin_lock_irq(&file->ucontext->lock); + down(&file->mutex); list_del(&obj->uobject.list); - spin_unlock_irq(&file->ucontext->lock); + up(&file->mutex); err_unreg: ib_dereg_mr(mr); @@ -570,9 +582,9 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle); - spin_lock_irq(&file->ucontext->lock); + down(&file->mutex); list_del(&memobj->uobject.list); - spin_unlock_irq(&file->ucontext->lock); + up(&file->mutex); ib_umem_release(file->device->ib_dev, &memobj->umem); kfree(memobj); @@ -647,9 +659,9 @@ retry: if (ret) goto err_cq; - spin_lock_irq(&file->ucontext->lock); + down(&file->mutex); list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list); - spin_unlock_irq(&file->ucontext->lock); + up(&file->mutex); memset(&resp, 0, sizeof resp); resp.cq_handle = uobj->uobject.id; @@ -664,9 +676,9 @@ retry: return in_len; err_list: - spin_lock_irq(&file->ucontext->lock); + down(&file->mutex); list_del(&uobj->uobject.list); - spin_unlock_irq(&file->ucontext->lock); + up(&file->mutex); down(&ib_uverbs_idr_mutex); idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id); @@ -712,9 +724,9 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle); - spin_lock_irq(&file->ucontext->lock); + down(&file->mutex); list_del(&uobj->uobject.list); - spin_unlock_irq(&file->ucontext->lock); + up(&file->mutex); spin_lock_irq(&file->comp_file[0].lock); list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) { @@ -847,9 +859,9 @@ retry: resp.qp_handle = uobj->uobject.id; - spin_lock_irq(&file->ucontext->lock); + down(&file->mutex); list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list); - spin_unlock_irq(&file->ucontext->lock); + up(&file->mutex); if (copy_to_user((void __user *) (unsigned long) cmd.response, &resp, sizeof resp)) { @@ -862,9 +874,9 @@ retry: return in_len; err_list: - spin_lock_irq(&file->ucontext->lock); + down(&file->mutex); list_del(&uobj->uobject.list); - spin_unlock_irq(&file->ucontext->lock); + up(&file->mutex); err_destroy: ib_destroy_qp(qp); @@ -989,9 +1001,9 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle); - spin_lock_irq(&file->ucontext->lock); + down(&file->mutex); list_del(&uobj->uobject.list); - spin_unlock_irq(&file->ucontext->lock); + up(&file->mutex); spin_lock_irq(&file->async_file.lock); list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { @@ -1136,9 +1148,9 @@ retry: resp.srq_handle = uobj->uobject.id; - spin_lock_irq(&file->ucontext->lock); + down(&file->mutex); list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list); - spin_unlock_irq(&file->ucontext->lock); + up(&file->mutex); if (copy_to_user((void __user *) (unsigned long) cmd.response, &resp, sizeof resp)) { @@ -1151,9 +1163,9 @@ retry: return in_len; err_list: - spin_lock_irq(&file->ucontext->lock); + down(&file->mutex); list_del(&uobj->uobject.list); - spin_unlock_irq(&file->ucontext->lock); + up(&file->mutex); err_destroy: ib_destroy_srq(srq); @@ -1227,9 +1239,9 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle); - spin_lock_irq(&file->ucontext->lock); + down(&file->mutex); list_del(&uobj->uobject.list); - spin_unlock_irq(&file->ucontext->lock); + up(&file->mutex); spin_lock_irq(&file->async_file.lock); list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index ce5bdb7af306..12511808de21 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -448,7 +448,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, if (hdr.in_words * 4 != count) return -EINVAL; - if (hdr.command < 0 || hdr.command >= ARRAY_SIZE(uverbs_cmd_table)) + if (hdr.command < 0 || + hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || + !uverbs_cmd_table[hdr.command]) return -EINVAL; if (!file->ucontext && @@ -484,27 +486,29 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) file = kmalloc(sizeof *file + (dev->num_comp - 1) * sizeof (struct ib_uverbs_event_file), GFP_KERNEL); - if (!file) - return -ENOMEM; + if (!file) { + ret = -ENOMEM; + goto err; + } file->device = dev; kref_init(&file->ref); + init_MUTEX(&file->mutex); file->ucontext = NULL; + kref_get(&file->ref); ret = ib_uverbs_event_init(&file->async_file, file); if (ret) - goto err; + goto err_kref; file->async_file.is_async = 1; - kref_get(&file->ref); - for (i = 0; i < dev->num_comp; ++i) { + kref_get(&file->ref); ret = ib_uverbs_event_init(&file->comp_file[i], file); if (ret) goto err_async; - kref_get(&file->ref); file->comp_file[i].is_async = 0; } @@ -524,9 +528,16 @@ err_async: ib_uverbs_event_release(&file->async_file); -err: +err_kref: + /* + * One extra kref_put() because we took a reference before the + * event file creation that failed and got us here. + */ + kref_put(&file->ref, ib_uverbs_release_file); kref_put(&file->ref, ib_uverbs_release_file); +err: + module_put(dev->ib_dev->owner); return ret; } diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index cc758a2d2bc6..378646b5a1b8 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -524,7 +524,7 @@ void mthca_cmd_use_polling(struct mthca_dev *dev) } struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev, - unsigned int gfp_mask) + gfp_t gfp_mask) { struct mthca_mailbox *mailbox; @@ -605,7 +605,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, err = -EINVAL; goto out; } - for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i, ++nent) { + for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i) { if (virt != -1) { pages[nent * 2] = cpu_to_be64(virt); virt += 1 << lg; @@ -616,7 +616,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, ts += 1 << (lg - 10); ++tc; - if (nent == MTHCA_MAILBOX_SIZE / 16) { + if (++nent == MTHCA_MAILBOX_SIZE / 16) { err = mthca_cmd(dev, mailbox->dma, nent, 0, op, CMD_TIME_CLASS_B, status); if (err || *status) diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h index 65f976a13e02..18175bec84c2 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.h +++ b/drivers/infiniband/hw/mthca/mthca_cmd.h @@ -248,7 +248,7 @@ void mthca_cmd_event(struct mthca_dev *dev, u16 token, u8 status, u64 out_param); struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev, - unsigned int gfp_mask); + gfp_t gfp_mask); void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox); int mthca_SYS_EN(struct mthca_dev *dev, u8 *status); diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index 78152a8ad17d..8dfafda5ed24 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c @@ -396,20 +396,21 @@ static irqreturn_t mthca_tavor_interrupt(int irq, void *dev_ptr, struct pt_regs writel(dev->eq_table.clr_mask, dev->eq_table.clr_int); ecr = readl(dev->eq_regs.tavor.ecr_base + 4); - if (ecr) { - writel(ecr, dev->eq_regs.tavor.ecr_base + - MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4); + if (!ecr) + return IRQ_NONE; - for (i = 0; i < MTHCA_NUM_EQ; ++i) - if (ecr & dev->eq_table.eq[i].eqn_mask && - mthca_eq_int(dev, &dev->eq_table.eq[i])) { + writel(ecr, dev->eq_regs.tavor.ecr_base + + MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4); + + for (i = 0; i < MTHCA_NUM_EQ; ++i) + if (ecr & dev->eq_table.eq[i].eqn_mask) { + if (mthca_eq_int(dev, &dev->eq_table.eq[i])) tavor_set_eq_ci(dev, &dev->eq_table.eq[i], dev->eq_table.eq[i].cons_index); - tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn); - } - } + tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn); + } - return IRQ_RETVAL(ecr); + return IRQ_HANDLED; } static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr, @@ -836,7 +837,7 @@ int __devinit mthca_init_eq_table(struct mthca_dev *dev) dev->eq_table.clr_mask = swab32(1 << (dev->eq_table.inta_pin & 31)); dev->eq_table.clr_int = dev->clr_base + - (dev->eq_table.inta_pin < 31 ? 4 : 0); + (dev->eq_table.inta_pin < 32 ? 4 : 0); } dev->eq_table.arm_mask = 0; diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index ffbcd40418d5..23a3f56c7899 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -503,6 +503,25 @@ err_free_aux: return err; } +static void mthca_free_icms(struct mthca_dev *mdev) +{ + u8 status; + + mthca_free_icm_table(mdev, mdev->mcg_table.table); + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + mthca_free_icm_table(mdev, mdev->srq_table.table); + mthca_free_icm_table(mdev, mdev->cq_table.table); + mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); + mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); + mthca_free_icm_table(mdev, mdev->qp_table.qp_table); + mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); + mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); + mthca_unmap_eq_icm(mdev); + + mthca_UNMAP_ICM_AUX(mdev, &status); + mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); +} + static int __devinit mthca_init_arbel(struct mthca_dev *mdev) { struct mthca_dev_lim dev_lim; @@ -580,18 +599,7 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev) return 0; err_free_icm: - if (mdev->mthca_flags & MTHCA_FLAG_SRQ) - mthca_free_icm_table(mdev, mdev->srq_table.table); - mthca_free_icm_table(mdev, mdev->cq_table.table); - mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); - mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); - mthca_free_icm_table(mdev, mdev->qp_table.qp_table); - mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); - mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); - mthca_unmap_eq_icm(mdev); - - mthca_UNMAP_ICM_AUX(mdev, &status); - mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); + mthca_free_icms(mdev); err_stop_fw: mthca_UNMAP_FA(mdev, &status); @@ -611,18 +619,7 @@ static void mthca_close_hca(struct mthca_dev *mdev) mthca_CLOSE_HCA(mdev, 0, &status); if (mthca_is_memfree(mdev)) { - if (mdev->mthca_flags & MTHCA_FLAG_SRQ) - mthca_free_icm_table(mdev, mdev->srq_table.table); - mthca_free_icm_table(mdev, mdev->cq_table.table); - mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); - mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); - mthca_free_icm_table(mdev, mdev->qp_table.qp_table); - mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); - mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); - mthca_unmap_eq_icm(mdev); - - mthca_UNMAP_ICM_AUX(mdev, &status); - mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); + mthca_free_icms(mdev); mthca_UNMAP_FA(mdev, &status); mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index 1827400f189b..9ad8b3b6cfef 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c @@ -82,7 +82,7 @@ void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm) } struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, - unsigned int gfp_mask) + gfp_t gfp_mask) { struct mthca_icm *icm; struct mthca_icm_chunk *chunk = NULL; @@ -290,7 +290,7 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev, int i; u8 status; - num_icm = obj_size * nobj / MTHCA_TABLE_CHUNK_SIZE; + num_icm = (obj_size * nobj + MTHCA_TABLE_CHUNK_SIZE - 1) / MTHCA_TABLE_CHUNK_SIZE; table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL); if (!table) @@ -529,12 +529,25 @@ int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db) goto found; } + for (i = start; i != end; i += dir) + if (!dev->db_tab->page[i].db_rec) { + page = dev->db_tab->page + i; + goto alloc; + } + if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) { ret = -ENOMEM; goto out; } + if (group == 0) + ++dev->db_tab->max_group1; + else + --dev->db_tab->min_group2; + page = dev->db_tab->page + end; + +alloc: page->db_rec = dma_alloc_coherent(&dev->pdev->dev, 4096, &page->mapping, GFP_KERNEL); if (!page->db_rec) { @@ -554,10 +567,6 @@ int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db) } bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE); - if (group == 0) - ++dev->db_tab->max_group1; - else - --dev->db_tab->min_group2; found: j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE); diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h index bafa51544aa3..29433f295253 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.h +++ b/drivers/infiniband/hw/mthca/mthca_memfree.h @@ -77,7 +77,7 @@ struct mthca_icm_iter { struct mthca_dev; struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, - unsigned int gfp_mask); + gfp_t gfp_mask); void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm); struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev, diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 1c1c2e230871..3f5319a46577 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -84,7 +84,7 @@ static int mthca_query_device(struct ib_device *ibdev, props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 0xffffff; props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); - props->hw_ver = be16_to_cpup((__be16 *) (out_mad->data + 32)); + props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); memcpy(&props->sys_image_guid, out_mad->data + 4, 8); memcpy(&props->node_guid, out_mad->data + 12, 8); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 704f48e0b6a7..6c5bf07489f4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -474,7 +474,7 @@ err: spin_unlock(&priv->lock); } -static void path_lookup(struct sk_buff *skb, struct net_device *dev) +static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(skb->dev); @@ -569,7 +569,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) if (skb->dst && skb->dst->neighbour) { if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { - path_lookup(skb, dev); + ipoib_path_lookup(skb, dev); goto out; } diff --git a/drivers/input/input.c b/drivers/input/input.c index 88636a204525..14ae5583e198 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -308,6 +308,7 @@ static struct input_device_id *input_match_device(struct input_device_id *id, st MATCH_BIT(ledbit, LED_MAX); MATCH_BIT(sndbit, SND_MAX); MATCH_BIT(ffbit, FF_MAX); + MATCH_BIT(swbit, SW_MAX); return id; } diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 444f7756fee6..571a68691a4a 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -93,7 +93,7 @@ config KEYBOARD_LKKBD config KEYBOARD_LOCOMO tristate "LoCoMo Keyboard Support" - depends on SHARP_LOCOMO + depends on SHARP_LOCOMO && INPUT_KEYBOARD help Say Y here if you are running Linux on a Sharp Zaurus Collie or Poodle based PDA diff --git a/drivers/input/keyboard/spitzkbd.c b/drivers/input/keyboard/spitzkbd.c index 1714045a182b..344f46005401 100644 --- a/drivers/input/keyboard/spitzkbd.c +++ b/drivers/input/keyboard/spitzkbd.c @@ -53,7 +53,7 @@ static unsigned char spitzkbd_keycode[NR_SCANCODES] = { KEY_LEFTCTRL, KEY_1, KEY_3, KEY_5, KEY_6, KEY_7, KEY_9, KEY_0, KEY_BACKSPACE, SPITZ_KEY_EXOK, SPITZ_KEY_EXCANCEL, 0, 0, 0, 0, 0, /* 1-16 */ 0, KEY_2, KEY_4, KEY_R, KEY_Y, KEY_8, KEY_I, KEY_O, KEY_P, SPITZ_KEY_EXJOGDOWN, SPITZ_KEY_EXJOGUP, 0, 0, 0, 0, 0, /* 17-32 */ KEY_TAB, KEY_Q, KEY_E, KEY_T, KEY_G, KEY_U, KEY_J, KEY_K, 0, 0, 0, 0, 0, 0, 0, 0, /* 33-48 */ - SPITZ_KEY_CALENDER, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, /* 49-64 */ + SPITZ_KEY_CALENDER, KEY_W, KEY_S, KEY_F, KEY_V, KEY_H, KEY_M, KEY_L, 0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0, /* 49-64 */ SPITZ_KEY_ADDRESS, KEY_A, KEY_D, KEY_C, KEY_B, KEY_N, KEY_DOT, 0, KEY_ENTER, KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, /* 65-80 */ SPITZ_KEY_MAIL, KEY_Z, KEY_X, KEY_MINUS, KEY_SPACE, KEY_COMMA, 0, KEY_UP, 0, 0, SPITZ_KEY_FN, 0, 0, 0, 0, 0, /* 81-96 */ KEY_SYSRQ, SPITZ_KEY_JAP1, SPITZ_KEY_JAP2, SPITZ_KEY_CANCEL, SPITZ_KEY_OK, SPITZ_KEY_MENU, KEY_LEFT, KEY_DOWN, KEY_RIGHT, 0, 0, 0, 0, 0, 0, 0 /* 97-112 */ diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index d5c5b32045af..4015a91f4b6e 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -90,11 +90,11 @@ static inline int uinput_request_reserve_slot(struct uinput_device *udev, struct static void uinput_request_done(struct uinput_device *udev, struct uinput_request *request) { - complete(&request->done); - /* Mark slot as available */ udev->requests[request->id] = NULL; wake_up_interruptible(&udev->requests_waitq); + + complete(&request->done); } static int uinput_request_submit(struct input_dev *dev, struct uinput_request *request) diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c index e1f0d87de0eb..0b0ea26023e5 100644 --- a/drivers/isdn/divert/divert_procfs.c +++ b/drivers/isdn/divert/divert_procfs.c @@ -287,12 +287,12 @@ divert_dev_init(void) init_waitqueue_head(&rd_queue); #ifdef CONFIG_PROC_FS - isdn_proc_entry = create_proc_entry("isdn", S_IFDIR | S_IRUGO | S_IXUGO, proc_net); + isdn_proc_entry = proc_mkdir("net/isdn", NULL); if (!isdn_proc_entry) return (-1); isdn_divert_entry = create_proc_entry("divert", S_IFREG | S_IRUGO, isdn_proc_entry); if (!isdn_divert_entry) { - remove_proc_entry("isdn", proc_net); + remove_proc_entry("net/isdn", NULL); return (-1); } isdn_divert_entry->proc_fops = &isdn_fops; @@ -312,7 +312,7 @@ divert_dev_deinit(void) #ifdef CONFIG_PROC_FS remove_proc_entry("divert", isdn_proc_entry); - remove_proc_entry("isdn", proc_net); + remove_proc_entry("net/isdn", NULL); #endif /* CONFIG_PROC_FS */ return (0); diff --git a/drivers/isdn/hardware/eicon/diva_didd.c b/drivers/isdn/hardware/eicon/diva_didd.c index 7fdf8ae5be52..27204f4b111a 100644 --- a/drivers/isdn/hardware/eicon/diva_didd.c +++ b/drivers/isdn/hardware/eicon/diva_didd.c @@ -30,8 +30,6 @@ static char *DRIVERNAME = static char *DRIVERLNAME = "divadidd"; char *DRIVERRELEASE_DIDD = "2.0"; -static char *main_proc_dir = "eicon"; - MODULE_DESCRIPTION("DIDD table driver for diva drivers"); MODULE_AUTHOR("Cytronics & Melware, Eicon Networks"); MODULE_SUPPORTED_DEVICE("Eicon diva drivers"); @@ -89,7 +87,7 @@ proc_read(char *page, char **start, off_t off, int count, int *eof, static int DIVA_INIT_FUNCTION create_proc(void) { - proc_net_eicon = create_proc_entry(main_proc_dir, S_IFDIR, proc_net); + proc_net_eicon = proc_mkdir("net/eicon", NULL); if (proc_net_eicon) { if ((proc_didd = @@ -105,7 +103,7 @@ static int DIVA_INIT_FUNCTION create_proc(void) static void DIVA_EXIT_FUNCTION remove_proc(void) { remove_proc_entry(DRIVERLNAME, proc_net_eicon); - remove_proc_entry(main_proc_dir, proc_net); + remove_proc_entry("net/eicon", NULL); } static int DIVA_INIT_FUNCTION divadidd_init(void) diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c index b6435589d459..c12efa6f8429 100644 --- a/drivers/isdn/hardware/eicon/divasproc.c +++ b/drivers/isdn/hardware/eicon/divasproc.c @@ -381,7 +381,7 @@ int create_adapter_proc(diva_os_xdi_adapter_t * a) char tmp[16]; sprintf(tmp, "%s%d", adapter_dir_name, a->controller); - if (!(de = create_proc_entry(tmp, S_IFDIR, proc_net_eicon))) + if (!(de = proc_mkdir(tmp, proc_net_eicon))) return (0); a->proc_adapter_dir = (void *) de; diff --git a/drivers/isdn/hisax/st5481_b.c b/drivers/isdn/hisax/st5481_b.c index 0a2536d62402..657817a591fe 100644 --- a/drivers/isdn/hisax/st5481_b.c +++ b/drivers/isdn/hisax/st5481_b.c @@ -209,9 +209,7 @@ static void st5481B_mode(struct st5481_bcs *bcs, int mode) bcs->mode = mode; // Cancel all USB transfers on this B channel - b_out->urb[0]->transfer_flags |= URB_ASYNC_UNLINK; usb_unlink_urb(b_out->urb[0]); - b_out->urb[1]->transfer_flags |= URB_ASYNC_UNLINK; usb_unlink_urb(b_out->urb[1]); b_out->busy = 0; diff --git a/drivers/isdn/hisax/st5481_usb.c b/drivers/isdn/hisax/st5481_usb.c index ffd5b2d45552..89fbeb58485d 100644 --- a/drivers/isdn/hisax/st5481_usb.c +++ b/drivers/isdn/hisax/st5481_usb.c @@ -645,9 +645,7 @@ void st5481_in_mode(struct st5481_in *in, int mode) in->mode = mode; - in->urb[0]->transfer_flags |= URB_ASYNC_UNLINK; usb_unlink_urb(in->urb[0]); - in->urb[1]->transfer_flags |= URB_ASYNC_UNLINK; usb_unlink_urb(in->urb[1]); if (in->mode != L1_MODE_NULL) { diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c index 5da507e532fc..639582f61f41 100644 --- a/drivers/isdn/hysdn/hysdn_procconf.c +++ b/drivers/isdn/hysdn/hysdn_procconf.c @@ -394,7 +394,7 @@ hysdn_procconf_init(void) hysdn_card *card; uchar conf_name[20]; - hysdn_proc_entry = create_proc_entry(PROC_SUBDIR_NAME, S_IFDIR | S_IRUGO | S_IXUGO, proc_net); + hysdn_proc_entry = proc_mkdir(PROC_SUBDIR_NAME, proc_net); if (!hysdn_proc_entry) { printk(KERN_ERR "HYSDN: unable to create hysdn subdir\n"); return (-1); diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index fb535737d17d..9b38674fbf75 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -8,21 +8,15 @@ */ /* - * For now, this driver includes: - * - RTC get & set - * - reboot & shutdown commands - * all synchronous with IRQ disabled (ugh) - * * TODO: - * rework in a way the PMU driver works, that is asynchronous - * with a queue of commands. I'll do that as soon as I have an - * SMU based machine at hand. Some more cleanup is needed too, - * like maybe fitting it into a platform device, etc... - * Also check what's up with cache coherency, and if we really - * can't do better than flushing the cache, maybe build a table - * of command len/reply len like the PMU driver to only flush - * what is actually necessary. - * --BenH. + * - maybe add timeout to commands ? + * - blocking version of time functions + * - polling version of i2c commands (including timer that works with + * interrutps off) + * - maybe avoid some data copies with i2c by directly using the smu cmd + * buffer and a lower level internal interface + * - understand SMU -> CPU events and implement reception of them via + * the userland interface */ #include @@ -36,6 +30,11 @@ #include #include #include +#include +#include +#include +#include +#include #include #include @@ -45,8 +44,13 @@ #include #include #include +#include +#include -#define DEBUG_SMU 1 +#define VERSION "0.6" +#define AUTHOR "(c) 2005 Benjamin Herrenschmidt, IBM Corp." + +#undef DEBUG_SMU #ifdef DEBUG_SMU #define DPRINTK(fmt, args...) do { printk(KERN_DEBUG fmt , ##args); } while (0) @@ -57,20 +61,30 @@ /* * This is the command buffer passed to the SMU hardware */ +#define SMU_MAX_DATA 254 + struct smu_cmd_buf { u8 cmd; u8 length; - u8 data[0x0FFE]; + u8 data[SMU_MAX_DATA]; }; struct smu_device { spinlock_t lock; struct device_node *of_node; - int db_ack; /* doorbell ack GPIO */ - int db_req; /* doorbell req GPIO */ + struct of_device *of_dev; + int doorbell; /* doorbell gpio */ u32 __iomem *db_buf; /* doorbell buffer */ + int db_irq; + int msg; + int msg_irq; struct smu_cmd_buf *cmd_buf; /* command buffer virtual */ u32 cmd_buf_abs; /* command buffer absolute */ + struct list_head cmd_list; + struct smu_cmd *cmd_cur; /* pending command */ + struct list_head cmd_i2c_list; + struct smu_i2c_cmd *cmd_i2c_cur; /* pending i2c command */ + struct timer_list i2c_timer; }; /* @@ -79,113 +93,245 @@ struct smu_device { */ static struct smu_device *smu; + /* - * SMU low level communication stuff + * SMU driver low level stuff */ -static inline int smu_cmd_stat(struct smu_cmd_buf *cmd_buf, u8 cmd_ack) -{ - rmb(); - return cmd_buf->cmd == cmd_ack && cmd_buf->length != 0; -} -static inline u8 smu_save_ack_cmd(struct smu_cmd_buf *cmd_buf) +static void smu_start_cmd(void) { - return (~cmd_buf->cmd) & 0xff; -} + unsigned long faddr, fend; + struct smu_cmd *cmd; -static void smu_send_cmd(struct smu_device *dev) -{ - /* SMU command buf is currently cacheable, we need a physical - * address. This isn't exactly a DMA mapping here, I suspect + if (list_empty(&smu->cmd_list)) + return; + + /* Fetch first command in queue */ + cmd = list_entry(smu->cmd_list.next, struct smu_cmd, link); + smu->cmd_cur = cmd; + list_del(&cmd->link); + + DPRINTK("SMU: starting cmd %x, %d bytes data\n", cmd->cmd, + cmd->data_len); + DPRINTK("SMU: data buffer: %02x %02x %02x %02x ...\n", + ((u8 *)cmd->data_buf)[0], ((u8 *)cmd->data_buf)[1], + ((u8 *)cmd->data_buf)[2], ((u8 *)cmd->data_buf)[3]); + + /* Fill the SMU command buffer */ + smu->cmd_buf->cmd = cmd->cmd; + smu->cmd_buf->length = cmd->data_len; + memcpy(smu->cmd_buf->data, cmd->data_buf, cmd->data_len); + + /* Flush command and data to RAM */ + faddr = (unsigned long)smu->cmd_buf; + fend = faddr + smu->cmd_buf->length + 2; + flush_inval_dcache_range(faddr, fend); + + /* This isn't exactly a DMA mapping here, I suspect * the SMU is actually communicating with us via i2c to the * northbridge or the CPU to access RAM. */ - writel(dev->cmd_buf_abs, dev->db_buf); + writel(smu->cmd_buf_abs, smu->db_buf); /* Ring the SMU doorbell */ - pmac_do_feature_call(PMAC_FTR_WRITE_GPIO, NULL, dev->db_req, 4); - pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, dev->db_req, 4); + pmac_do_feature_call(PMAC_FTR_WRITE_GPIO, NULL, smu->doorbell, 4); } -static int smu_cmd_done(struct smu_device *dev) + +static irqreturn_t smu_db_intr(int irq, void *arg, struct pt_regs *regs) { - unsigned long wait = 0; - int gpio; + unsigned long flags; + struct smu_cmd *cmd; + void (*done)(struct smu_cmd *cmd, void *misc) = NULL; + void *misc = NULL; + u8 gpio; + int rc = 0; - /* Check the SMU doorbell */ - do { - gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, - NULL, dev->db_ack); - if ((gpio & 7) == 7) - return 0; - udelay(100); - } while(++wait < 10000); + /* SMU completed the command, well, we hope, let's make sure + * of it + */ + spin_lock_irqsave(&smu->lock, flags); - printk(KERN_ERR "SMU timeout !\n"); - return -ENXIO; + gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell); + if ((gpio & 7) != 7) { + spin_unlock_irqrestore(&smu->lock, flags); + return IRQ_HANDLED; + } + + cmd = smu->cmd_cur; + smu->cmd_cur = NULL; + if (cmd == NULL) + goto bail; + + if (rc == 0) { + unsigned long faddr; + int reply_len; + u8 ack; + + /* CPU might have brought back the cache line, so we need + * to flush again before peeking at the SMU response. We + * flush the entire buffer for now as we haven't read the + * reply lenght (it's only 2 cache lines anyway) + */ + faddr = (unsigned long)smu->cmd_buf; + flush_inval_dcache_range(faddr, faddr + 256); + + /* Now check ack */ + ack = (~cmd->cmd) & 0xff; + if (ack != smu->cmd_buf->cmd) { + DPRINTK("SMU: incorrect ack, want %x got %x\n", + ack, smu->cmd_buf->cmd); + rc = -EIO; + } + reply_len = rc == 0 ? smu->cmd_buf->length : 0; + DPRINTK("SMU: reply len: %d\n", reply_len); + if (reply_len > cmd->reply_len) { + printk(KERN_WARNING "SMU: reply buffer too small," + "got %d bytes for a %d bytes buffer\n", + reply_len, cmd->reply_len); + reply_len = cmd->reply_len; + } + cmd->reply_len = reply_len; + if (cmd->reply_buf && reply_len) + memcpy(cmd->reply_buf, smu->cmd_buf->data, reply_len); + } + + /* Now complete the command. Write status last in order as we lost + * ownership of the command structure as soon as it's no longer -1 + */ + done = cmd->done; + misc = cmd->misc; + mb(); + cmd->status = rc; + bail: + /* Start next command if any */ + smu_start_cmd(); + spin_unlock_irqrestore(&smu->lock, flags); + + /* Call command completion handler if any */ + if (done) + done(cmd, misc); + + /* It's an edge interrupt, nothing to do */ + return IRQ_HANDLED; } -static int smu_do_cmd(struct smu_device *dev) + +static irqreturn_t smu_msg_intr(int irq, void *arg, struct pt_regs *regs) { - int rc; - u8 cmd_ack; + /* I don't quite know what to do with this one, we seem to never + * receive it, so I suspect we have to arm it someway in the SMU + * to start getting events that way. + */ - DPRINTK("SMU do_cmd %02x len=%d %02x\n", - dev->cmd_buf->cmd, dev->cmd_buf->length, - dev->cmd_buf->data[0]); + printk(KERN_INFO "SMU: message interrupt !\n"); - cmd_ack = smu_save_ack_cmd(dev->cmd_buf); - - /* Clear cmd_buf cache lines */ - flush_inval_dcache_range((unsigned long)dev->cmd_buf, - ((unsigned long)dev->cmd_buf) + - sizeof(struct smu_cmd_buf)); - smu_send_cmd(dev); - rc = smu_cmd_done(dev); - if (rc == 0) - rc = smu_cmd_stat(dev->cmd_buf, cmd_ack) ? 0 : -1; - - DPRINTK("SMU do_cmd %02x len=%d %02x => %d (%02x)\n", - dev->cmd_buf->cmd, dev->cmd_buf->length, - dev->cmd_buf->data[0], rc, cmd_ack); - - return rc; + /* It's an edge interrupt, nothing to do */ + return IRQ_HANDLED; } + +/* + * Queued command management. + * + */ + +int smu_queue_cmd(struct smu_cmd *cmd) +{ + unsigned long flags; + + if (smu == NULL) + return -ENODEV; + if (cmd->data_len > SMU_MAX_DATA || + cmd->reply_len > SMU_MAX_DATA) + return -EINVAL; + + cmd->status = 1; + spin_lock_irqsave(&smu->lock, flags); + list_add_tail(&cmd->link, &smu->cmd_list); + if (smu->cmd_cur == NULL) + smu_start_cmd(); + spin_unlock_irqrestore(&smu->lock, flags); + + return 0; +} +EXPORT_SYMBOL(smu_queue_cmd); + + +int smu_queue_simple(struct smu_simple_cmd *scmd, u8 command, + unsigned int data_len, + void (*done)(struct smu_cmd *cmd, void *misc), + void *misc, ...) +{ + struct smu_cmd *cmd = &scmd->cmd; + va_list list; + int i; + + if (data_len > sizeof(scmd->buffer)) + return -EINVAL; + + memset(scmd, 0, sizeof(*scmd)); + cmd->cmd = command; + cmd->data_len = data_len; + cmd->data_buf = scmd->buffer; + cmd->reply_len = sizeof(scmd->buffer); + cmd->reply_buf = scmd->buffer; + cmd->done = done; + cmd->misc = misc; + + va_start(list, misc); + for (i = 0; i < data_len; ++i) + scmd->buffer[i] = (u8)va_arg(list, int); + va_end(list); + + return smu_queue_cmd(cmd); +} +EXPORT_SYMBOL(smu_queue_simple); + + +void smu_poll(void) +{ + u8 gpio; + + if (smu == NULL) + return; + + gpio = pmac_do_feature_call(PMAC_FTR_READ_GPIO, NULL, smu->doorbell); + if ((gpio & 7) == 7) + smu_db_intr(smu->db_irq, smu, NULL); +} +EXPORT_SYMBOL(smu_poll); + + +void smu_done_complete(struct smu_cmd *cmd, void *misc) +{ + struct completion *comp = misc; + + complete(comp); +} +EXPORT_SYMBOL(smu_done_complete); + + +void smu_spinwait_cmd(struct smu_cmd *cmd) +{ + while(cmd->status == 1) + smu_poll(); +} +EXPORT_SYMBOL(smu_spinwait_cmd); + + /* RTC low level commands */ static inline int bcd2hex (int n) { return (((n & 0xf0) >> 4) * 10) + (n & 0xf); } + static inline int hex2bcd (int n) { return ((n / 10) << 4) + (n % 10); } -#if 0 -static inline void smu_fill_set_pwrup_timer_cmd(struct smu_cmd_buf *cmd_buf) -{ - cmd_buf->cmd = 0x8e; - cmd_buf->length = 8; - cmd_buf->data[0] = 0x00; - memset(cmd_buf->data + 1, 0, 7); -} - -static inline void smu_fill_get_pwrup_timer_cmd(struct smu_cmd_buf *cmd_buf) -{ - cmd_buf->cmd = 0x8e; - cmd_buf->length = 1; - cmd_buf->data[0] = 0x01; -} - -static inline void smu_fill_dis_pwrup_timer_cmd(struct smu_cmd_buf *cmd_buf) -{ - cmd_buf->cmd = 0x8e; - cmd_buf->length = 1; - cmd_buf->data[0] = 0x02; -} -#endif static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf, struct rtc_time *time) @@ -202,100 +348,96 @@ static inline void smu_fill_set_rtc_cmd(struct smu_cmd_buf *cmd_buf, cmd_buf->data[7] = hex2bcd(time->tm_year - 100); } -static inline void smu_fill_get_rtc_cmd(struct smu_cmd_buf *cmd_buf) -{ - cmd_buf->cmd = 0x8e; - cmd_buf->length = 1; - cmd_buf->data[0] = 0x81; -} -static void smu_parse_get_rtc_reply(struct smu_cmd_buf *cmd_buf, - struct rtc_time *time) +int smu_get_rtc_time(struct rtc_time *time, int spinwait) { - time->tm_sec = bcd2hex(cmd_buf->data[0]); - time->tm_min = bcd2hex(cmd_buf->data[1]); - time->tm_hour = bcd2hex(cmd_buf->data[2]); - time->tm_wday = bcd2hex(cmd_buf->data[3]); - time->tm_mday = bcd2hex(cmd_buf->data[4]); - time->tm_mon = bcd2hex(cmd_buf->data[5]) - 1; - time->tm_year = bcd2hex(cmd_buf->data[6]) + 100; -} - -int smu_get_rtc_time(struct rtc_time *time) -{ - unsigned long flags; + struct smu_simple_cmd cmd; int rc; if (smu == NULL) return -ENODEV; memset(time, 0, sizeof(struct rtc_time)); - spin_lock_irqsave(&smu->lock, flags); - smu_fill_get_rtc_cmd(smu->cmd_buf); - rc = smu_do_cmd(smu); - if (rc == 0) - smu_parse_get_rtc_reply(smu->cmd_buf, time); - spin_unlock_irqrestore(&smu->lock, flags); + rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 1, NULL, NULL, + SMU_CMD_RTC_GET_DATETIME); + if (rc) + return rc; + smu_spinwait_simple(&cmd); - return rc; + time->tm_sec = bcd2hex(cmd.buffer[0]); + time->tm_min = bcd2hex(cmd.buffer[1]); + time->tm_hour = bcd2hex(cmd.buffer[2]); + time->tm_wday = bcd2hex(cmd.buffer[3]); + time->tm_mday = bcd2hex(cmd.buffer[4]); + time->tm_mon = bcd2hex(cmd.buffer[5]) - 1; + time->tm_year = bcd2hex(cmd.buffer[6]) + 100; + + return 0; } -int smu_set_rtc_time(struct rtc_time *time) + +int smu_set_rtc_time(struct rtc_time *time, int spinwait) { - unsigned long flags; + struct smu_simple_cmd cmd; int rc; if (smu == NULL) return -ENODEV; - spin_lock_irqsave(&smu->lock, flags); - smu_fill_set_rtc_cmd(smu->cmd_buf, time); - rc = smu_do_cmd(smu); - spin_unlock_irqrestore(&smu->lock, flags); + rc = smu_queue_simple(&cmd, SMU_CMD_RTC_COMMAND, 8, NULL, NULL, + SMU_CMD_RTC_SET_DATETIME, + hex2bcd(time->tm_sec), + hex2bcd(time->tm_min), + hex2bcd(time->tm_hour), + time->tm_wday, + hex2bcd(time->tm_mday), + hex2bcd(time->tm_mon) + 1, + hex2bcd(time->tm_year - 100)); + if (rc) + return rc; + smu_spinwait_simple(&cmd); - return rc; + return 0; } + void smu_shutdown(void) { - const unsigned char *command = "SHUTDOWN"; - unsigned long flags; + struct smu_simple_cmd cmd; if (smu == NULL) return; - spin_lock_irqsave(&smu->lock, flags); - smu->cmd_buf->cmd = 0xaa; - smu->cmd_buf->length = strlen(command); - strcpy(smu->cmd_buf->data, command); - smu_do_cmd(smu); + if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 9, NULL, NULL, + 'S', 'H', 'U', 'T', 'D', 'O', 'W', 'N', 0)) + return; + smu_spinwait_simple(&cmd); for (;;) ; - spin_unlock_irqrestore(&smu->lock, flags); } + void smu_restart(void) { - const unsigned char *command = "RESTART"; - unsigned long flags; + struct smu_simple_cmd cmd; if (smu == NULL) return; - spin_lock_irqsave(&smu->lock, flags); - smu->cmd_buf->cmd = 0xaa; - smu->cmd_buf->length = strlen(command); - strcpy(smu->cmd_buf->data, command); - smu_do_cmd(smu); + if (smu_queue_simple(&cmd, SMU_CMD_POWER_COMMAND, 8, NULL, NULL, + 'R', 'E', 'S', 'T', 'A', 'R', 'T', 0)) + return; + smu_spinwait_simple(&cmd); for (;;) ; - spin_unlock_irqrestore(&smu->lock, flags); } + int smu_present(void) { return smu != NULL; } +EXPORT_SYMBOL(smu_present); int smu_init (void) @@ -307,6 +449,8 @@ int smu_init (void) if (np == NULL) return -ENODEV; + printk(KERN_INFO "SMU driver %s %s\n", VERSION, AUTHOR); + if (smu_cmdbuf_abs == 0) { printk(KERN_ERR "SMU: Command buffer not allocated !\n"); return -EINVAL; @@ -318,7 +462,13 @@ int smu_init (void) memset(smu, 0, sizeof(*smu)); spin_lock_init(&smu->lock); + INIT_LIST_HEAD(&smu->cmd_list); + INIT_LIST_HEAD(&smu->cmd_i2c_list); smu->of_node = np; + smu->db_irq = NO_IRQ; + smu->msg_irq = NO_IRQ; + init_timer(&smu->i2c_timer); + /* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a * 32 bits value safely */ @@ -331,8 +481,8 @@ int smu_init (void) goto fail; } data = (u32 *)get_property(np, "reg", NULL); - of_node_put(np); if (data == NULL) { + of_node_put(np); printk(KERN_ERR "SMU: Can't find doorbell GPIO address !\n"); goto fail; } @@ -341,8 +491,31 @@ int smu_init (void) * and ack. GPIOs are at 0x50, best would be to find that out * in the device-tree though. */ - smu->db_req = 0x50 + *data; - smu->db_ack = 0x50 + *data; + smu->doorbell = *data; + if (smu->doorbell < 0x50) + smu->doorbell += 0x50; + if (np->n_intrs > 0) + smu->db_irq = np->intrs[0].line; + + of_node_put(np); + + /* Now look for the smu-interrupt GPIO */ + do { + np = of_find_node_by_name(NULL, "smu-interrupt"); + if (np == NULL) + break; + data = (u32 *)get_property(np, "reg", NULL); + if (data == NULL) { + of_node_put(np); + break; + } + smu->msg = *data; + if (smu->msg < 0x50) + smu->msg += 0x50; + if (np->n_intrs > 0) + smu->msg_irq = np->intrs[0].line; + of_node_put(np); + } while(0); /* Doorbell buffer is currently hard-coded, I didn't find a proper * device-tree entry giving the address. Best would probably to use @@ -362,3 +535,584 @@ int smu_init (void) return -ENXIO; } + + +static int smu_late_init(void) +{ + if (!smu) + return 0; + + /* + * Try to request the interrupts + */ + + if (smu->db_irq != NO_IRQ) { + if (request_irq(smu->db_irq, smu_db_intr, + SA_SHIRQ, "SMU doorbell", smu) < 0) { + printk(KERN_WARNING "SMU: can't " + "request interrupt %d\n", + smu->db_irq); + smu->db_irq = NO_IRQ; + } + } + + if (smu->msg_irq != NO_IRQ) { + if (request_irq(smu->msg_irq, smu_msg_intr, + SA_SHIRQ, "SMU message", smu) < 0) { + printk(KERN_WARNING "SMU: can't " + "request interrupt %d\n", + smu->msg_irq); + smu->msg_irq = NO_IRQ; + } + } + + return 0; +} +arch_initcall(smu_late_init); + +/* + * sysfs visibility + */ + +static void smu_expose_childs(void *unused) +{ + struct device_node *np; + + for (np = NULL; (np = of_get_next_child(smu->of_node, np)) != NULL;) { + if (device_is_compatible(np, "smu-i2c")) { + char name[32]; + u32 *reg = (u32 *)get_property(np, "reg", NULL); + + if (reg == NULL) + continue; + sprintf(name, "smu-i2c-%02x", *reg); + of_platform_device_create(np, name, &smu->of_dev->dev); + } + } + +} + +static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs, NULL); + +static int smu_platform_probe(struct of_device* dev, + const struct of_device_id *match) +{ + if (!smu) + return -ENODEV; + smu->of_dev = dev; + + /* + * Ok, we are matched, now expose all i2c busses. We have to defer + * that unfortunately or it would deadlock inside the device model + */ + schedule_work(&smu_expose_childs_work); + + return 0; +} + +static struct of_device_id smu_platform_match[] = +{ + { + .type = "smu", + }, + {}, +}; + +static struct of_platform_driver smu_of_platform_driver = +{ + .name = "smu", + .match_table = smu_platform_match, + .probe = smu_platform_probe, +}; + +static int __init smu_init_sysfs(void) +{ + int rc; + + /* + * Due to sysfs bogosity, a sysdev is not a real device, so + * we should in fact create both if we want sysdev semantics + * for power management. + * For now, we don't power manage machines with an SMU chip, + * I'm a bit too far from figuring out how that works with those + * new chipsets, but that will come back and bite us + */ + rc = of_register_driver(&smu_of_platform_driver); + return 0; +} + +device_initcall(smu_init_sysfs); + +struct of_device *smu_get_ofdev(void) +{ + if (!smu) + return NULL; + return smu->of_dev; +} + +EXPORT_SYMBOL_GPL(smu_get_ofdev); + +/* + * i2c interface + */ + +static void smu_i2c_complete_command(struct smu_i2c_cmd *cmd, int fail) +{ + void (*done)(struct smu_i2c_cmd *cmd, void *misc) = cmd->done; + void *misc = cmd->misc; + unsigned long flags; + + /* Check for read case */ + if (!fail && cmd->read) { + if (cmd->pdata[0] < 1) + fail = 1; + else + memcpy(cmd->info.data, &cmd->pdata[1], + cmd->info.datalen); + } + + DPRINTK("SMU: completing, success: %d\n", !fail); + + /* Update status and mark no pending i2c command with lock + * held so nobody comes in while we dequeue an eventual + * pending next i2c command + */ + spin_lock_irqsave(&smu->lock, flags); + smu->cmd_i2c_cur = NULL; + wmb(); + cmd->status = fail ? -EIO : 0; + + /* Is there another i2c command waiting ? */ + if (!list_empty(&smu->cmd_i2c_list)) { + struct smu_i2c_cmd *newcmd; + + /* Fetch it, new current, remove from list */ + newcmd = list_entry(smu->cmd_i2c_list.next, + struct smu_i2c_cmd, link); + smu->cmd_i2c_cur = newcmd; + list_del(&cmd->link); + + /* Queue with low level smu */ + list_add_tail(&cmd->scmd.link, &smu->cmd_list); + if (smu->cmd_cur == NULL) + smu_start_cmd(); + } + spin_unlock_irqrestore(&smu->lock, flags); + + /* Call command completion handler if any */ + if (done) + done(cmd, misc); + +} + + +static void smu_i2c_retry(unsigned long data) +{ + struct smu_i2c_cmd *cmd = (struct smu_i2c_cmd *)data; + + DPRINTK("SMU: i2c failure, requeuing...\n"); + + /* requeue command simply by resetting reply_len */ + cmd->pdata[0] = 0xff; + cmd->scmd.reply_len = 0x10; + smu_queue_cmd(&cmd->scmd); +} + + +static void smu_i2c_low_completion(struct smu_cmd *scmd, void *misc) +{ + struct smu_i2c_cmd *cmd = misc; + int fail = 0; + + DPRINTK("SMU: i2c compl. stage=%d status=%x pdata[0]=%x rlen: %x\n", + cmd->stage, scmd->status, cmd->pdata[0], scmd->reply_len); + + /* Check for possible status */ + if (scmd->status < 0) + fail = 1; + else if (cmd->read) { + if (cmd->stage == 0) + fail = cmd->pdata[0] != 0; + else + fail = cmd->pdata[0] >= 0x80; + } else { + fail = cmd->pdata[0] != 0; + } + + /* Handle failures by requeuing command, after 5ms interval + */ + if (fail && --cmd->retries > 0) { + DPRINTK("SMU: i2c failure, starting timer...\n"); + smu->i2c_timer.function = smu_i2c_retry; + smu->i2c_timer.data = (unsigned long)cmd; + smu->i2c_timer.expires = jiffies + msecs_to_jiffies(5); + add_timer(&smu->i2c_timer); + return; + } + + /* If failure or stage 1, command is complete */ + if (fail || cmd->stage != 0) { + smu_i2c_complete_command(cmd, fail); + return; + } + + DPRINTK("SMU: going to stage 1\n"); + + /* Ok, initial command complete, now poll status */ + scmd->reply_buf = cmd->pdata; + scmd->reply_len = 0x10; + scmd->data_buf = cmd->pdata; + scmd->data_len = 1; + cmd->pdata[0] = 0; + cmd->stage = 1; + cmd->retries = 20; + smu_queue_cmd(scmd); +} + + +int smu_queue_i2c(struct smu_i2c_cmd *cmd) +{ + unsigned long flags; + + if (smu == NULL) + return -ENODEV; + + /* Fill most fields of scmd */ + cmd->scmd.cmd = SMU_CMD_I2C_COMMAND; + cmd->scmd.done = smu_i2c_low_completion; + cmd->scmd.misc = cmd; + cmd->scmd.reply_buf = cmd->pdata; + cmd->scmd.reply_len = 0x10; + cmd->scmd.data_buf = (u8 *)(char *)&cmd->info; + cmd->scmd.status = 1; + cmd->stage = 0; + cmd->pdata[0] = 0xff; + cmd->retries = 20; + cmd->status = 1; + + /* Check transfer type, sanitize some "info" fields + * based on transfer type and do more checking + */ + cmd->info.caddr = cmd->info.devaddr; + cmd->read = cmd->info.devaddr & 0x01; + switch(cmd->info.type) { + case SMU_I2C_TRANSFER_SIMPLE: + memset(&cmd->info.sublen, 0, 4); + break; + case SMU_I2C_TRANSFER_COMBINED: + cmd->info.devaddr &= 0xfe; + case SMU_I2C_TRANSFER_STDSUB: + if (cmd->info.sublen > 3) + return -EINVAL; + break; + default: + return -EINVAL; + } + + /* Finish setting up command based on transfer direction + */ + if (cmd->read) { + if (cmd->info.datalen > SMU_I2C_READ_MAX) + return -EINVAL; + memset(cmd->info.data, 0xff, cmd->info.datalen); + cmd->scmd.data_len = 9; + } else { + if (cmd->info.datalen > SMU_I2C_WRITE_MAX) + return -EINVAL; + cmd->scmd.data_len = 9 + cmd->info.datalen; + } + + DPRINTK("SMU: i2c enqueuing command\n"); + DPRINTK("SMU: %s, len=%d bus=%x addr=%x sub0=%x type=%x\n", + cmd->read ? "read" : "write", cmd->info.datalen, + cmd->info.bus, cmd->info.caddr, + cmd->info.subaddr[0], cmd->info.type); + + + /* Enqueue command in i2c list, and if empty, enqueue also in + * main command list + */ + spin_lock_irqsave(&smu->lock, flags); + if (smu->cmd_i2c_cur == NULL) { + smu->cmd_i2c_cur = cmd; + list_add_tail(&cmd->scmd.link, &smu->cmd_list); + if (smu->cmd_cur == NULL) + smu_start_cmd(); + } else + list_add_tail(&cmd->link, &smu->cmd_i2c_list); + spin_unlock_irqrestore(&smu->lock, flags); + + return 0; +} + + + +/* + * Userland driver interface + */ + + +static LIST_HEAD(smu_clist); +static DEFINE_SPINLOCK(smu_clist_lock); + +enum smu_file_mode { + smu_file_commands, + smu_file_events, + smu_file_closing +}; + +struct smu_private +{ + struct list_head list; + enum smu_file_mode mode; + int busy; + struct smu_cmd cmd; + spinlock_t lock; + wait_queue_head_t wait; + u8 buffer[SMU_MAX_DATA]; +}; + + +static int smu_open(struct inode *inode, struct file *file) +{ + struct smu_private *pp; + unsigned long flags; + + pp = kmalloc(sizeof(struct smu_private), GFP_KERNEL); + if (pp == 0) + return -ENOMEM; + memset(pp, 0, sizeof(struct smu_private)); + spin_lock_init(&pp->lock); + pp->mode = smu_file_commands; + init_waitqueue_head(&pp->wait); + + spin_lock_irqsave(&smu_clist_lock, flags); + list_add(&pp->list, &smu_clist); + spin_unlock_irqrestore(&smu_clist_lock, flags); + file->private_data = pp; + + return 0; +} + + +static void smu_user_cmd_done(struct smu_cmd *cmd, void *misc) +{ + struct smu_private *pp = misc; + + wake_up_all(&pp->wait); +} + + +static ssize_t smu_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct smu_private *pp = file->private_data; + unsigned long flags; + struct smu_user_cmd_hdr hdr; + int rc = 0; + + if (pp->busy) + return -EBUSY; + else if (copy_from_user(&hdr, buf, sizeof(hdr))) + return -EFAULT; + else if (hdr.cmdtype == SMU_CMDTYPE_WANTS_EVENTS) { + pp->mode = smu_file_events; + return 0; + } else if (hdr.cmdtype != SMU_CMDTYPE_SMU) + return -EINVAL; + else if (pp->mode != smu_file_commands) + return -EBADFD; + else if (hdr.data_len > SMU_MAX_DATA) + return -EINVAL; + + spin_lock_irqsave(&pp->lock, flags); + if (pp->busy) { + spin_unlock_irqrestore(&pp->lock, flags); + return -EBUSY; + } + pp->busy = 1; + pp->cmd.status = 1; + spin_unlock_irqrestore(&pp->lock, flags); + + if (copy_from_user(pp->buffer, buf + sizeof(hdr), hdr.data_len)) { + pp->busy = 0; + return -EFAULT; + } + + pp->cmd.cmd = hdr.cmd; + pp->cmd.data_len = hdr.data_len; + pp->cmd.reply_len = SMU_MAX_DATA; + pp->cmd.data_buf = pp->buffer; + pp->cmd.reply_buf = pp->buffer; + pp->cmd.done = smu_user_cmd_done; + pp->cmd.misc = pp; + rc = smu_queue_cmd(&pp->cmd); + if (rc < 0) + return rc; + return count; +} + + +static ssize_t smu_read_command(struct file *file, struct smu_private *pp, + char __user *buf, size_t count) +{ + DECLARE_WAITQUEUE(wait, current); + struct smu_user_reply_hdr hdr; + unsigned long flags; + int size, rc = 0; + + if (!pp->busy) + return 0; + if (count < sizeof(struct smu_user_reply_hdr)) + return -EOVERFLOW; + spin_lock_irqsave(&pp->lock, flags); + if (pp->cmd.status == 1) { + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + add_wait_queue(&pp->wait, &wait); + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + rc = 0; + if (pp->cmd.status != 1) + break; + rc = -ERESTARTSYS; + if (signal_pending(current)) + break; + spin_unlock_irqrestore(&pp->lock, flags); + schedule(); + spin_lock_irqsave(&pp->lock, flags); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(&pp->wait, &wait); + } + spin_unlock_irqrestore(&pp->lock, flags); + if (rc) + return rc; + if (pp->cmd.status != 0) + pp->cmd.reply_len = 0; + size = sizeof(hdr) + pp->cmd.reply_len; + if (count < size) + size = count; + rc = size; + hdr.status = pp->cmd.status; + hdr.reply_len = pp->cmd.reply_len; + if (copy_to_user(buf, &hdr, sizeof(hdr))) + return -EFAULT; + size -= sizeof(hdr); + if (size && copy_to_user(buf + sizeof(hdr), pp->buffer, size)) + return -EFAULT; + pp->busy = 0; + + return rc; +} + + +static ssize_t smu_read_events(struct file *file, struct smu_private *pp, + char __user *buf, size_t count) +{ + /* Not implemented */ + msleep_interruptible(1000); + return 0; +} + + +static ssize_t smu_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct smu_private *pp = file->private_data; + + if (pp->mode == smu_file_commands) + return smu_read_command(file, pp, buf, count); + if (pp->mode == smu_file_events) + return smu_read_events(file, pp, buf, count); + + return -EBADFD; +} + +static unsigned int smu_fpoll(struct file *file, poll_table *wait) +{ + struct smu_private *pp = file->private_data; + unsigned int mask = 0; + unsigned long flags; + + if (pp == 0) + return 0; + + if (pp->mode == smu_file_commands) { + poll_wait(file, &pp->wait, wait); + + spin_lock_irqsave(&pp->lock, flags); + if (pp->busy && pp->cmd.status != 1) + mask |= POLLIN; + spin_unlock_irqrestore(&pp->lock, flags); + } if (pp->mode == smu_file_events) { + /* Not yet implemented */ + } + return mask; +} + +static int smu_release(struct inode *inode, struct file *file) +{ + struct smu_private *pp = file->private_data; + unsigned long flags; + unsigned int busy; + + if (pp == 0) + return 0; + + file->private_data = NULL; + + /* Mark file as closing to avoid races with new request */ + spin_lock_irqsave(&pp->lock, flags); + pp->mode = smu_file_closing; + busy = pp->busy; + + /* Wait for any pending request to complete */ + if (busy && pp->cmd.status == 1) { + DECLARE_WAITQUEUE(wait, current); + + add_wait_queue(&pp->wait, &wait); + for (;;) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (pp->cmd.status != 1) + break; + spin_lock_irqsave(&pp->lock, flags); + schedule(); + spin_unlock_irqrestore(&pp->lock, flags); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(&pp->wait, &wait); + } + spin_unlock_irqrestore(&pp->lock, flags); + + spin_lock_irqsave(&smu_clist_lock, flags); + list_del(&pp->list); + spin_unlock_irqrestore(&smu_clist_lock, flags); + kfree(pp); + + return 0; +} + + +static struct file_operations smu_device_fops __pmacdata = { + .llseek = no_llseek, + .read = smu_read, + .write = smu_write, + .poll = smu_fpoll, + .open = smu_open, + .release = smu_release, +}; + +static struct miscdevice pmu_device __pmacdata = { + MISC_DYNAMIC_MINOR, "smu", &smu_device_fops +}; + +static int smu_device_init(void) +{ + if (!smu) + return -ENODEV; + if (misc_register(&pmu_device) < 0) + printk(KERN_ERR "via-pmu: cannot register misc device.\n"); + return 0; +} +device_initcall(smu_device_init); diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c index c9ca1118e449..f38696622eb4 100644 --- a/drivers/macintosh/therm_adt746x.c +++ b/drivers/macintosh/therm_adt746x.c @@ -599,7 +599,7 @@ thermostat_init(void) sensor_location[2] = "?"; } - of_dev = of_platform_device_create(np, "temperatures"); + of_dev = of_platform_device_create(np, "temperatures", NULL); if (of_dev == NULL) { printk(KERN_ERR "Can't register temperatures device !\n"); diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c index 703e31973314..cc507ceef153 100644 --- a/drivers/macintosh/therm_pm72.c +++ b/drivers/macintosh/therm_pm72.c @@ -2051,7 +2051,7 @@ static int __init therm_pm72_init(void) return -ENODEV; } } - of_dev = of_platform_device_create(np, "temperature"); + of_dev = of_platform_device_create(np, "temperature", NULL); if (of_dev == NULL) { printk(KERN_ERR "Can't register FCU platform device !\n"); return -ENODEV; diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c index cbb72eb0426d..6aaa1df1a64e 100644 --- a/drivers/macintosh/therm_windtunnel.c +++ b/drivers/macintosh/therm_windtunnel.c @@ -504,7 +504,7 @@ g4fan_init( void ) } if( !(np=of_find_node_by_name(NULL, "fan")) ) return -ENODEV; - x.of_dev = of_platform_device_create( np, "temperature" ); + x.of_dev = of_platform_device_create(np, "temperature", NULL); of_node_put( np ); if( !x.of_dev ) { diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 2fba2bbe72d8..01654fcabc52 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -91,7 +91,7 @@ int bitmap_active(struct bitmap *bitmap) #define WRITE_POOL_SIZE 256 /* mempool for queueing pending writes on the bitmap file */ -static void *write_pool_alloc(unsigned int gfp_flags, void *data) +static void *write_pool_alloc(gfp_t gfp_flags, void *data) { return kmalloc(sizeof(struct page_list), gfp_flags); } diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index b82bc3150476..28c1a628621f 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -96,7 +96,7 @@ static kmem_cache_t *_crypt_io_pool; /* * Mempool alloc and free functions for the page */ -static void *mempool_alloc_page(unsigned int __nocast gfp_mask, void *data) +static void *mempool_alloc_page(gfp_t gfp_mask, void *data) { return alloc_page(gfp_mask); } @@ -331,7 +331,7 @@ crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, { struct bio *bio; unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; - int gfp_mask = GFP_NOIO | __GFP_HIGHMEM; + gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; unsigned int i; /* diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 9de000131a8a..4809b209fbb1 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -32,7 +32,7 @@ struct io { static unsigned _num_ios; static mempool_t *_io_pool; -static void *alloc_io(unsigned int __nocast gfp_mask, void *pool_data) +static void *alloc_io(gfp_t gfp_mask, void *pool_data) { return kmalloc(sizeof(struct io), gfp_mask); } diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 200a0688f717..54ec737195e0 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -230,11 +230,20 @@ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_devi static void __hash_remove(struct hash_cell *hc) { + struct dm_table *table; + /* remove from the dev hash */ list_del(&hc->uuid_list); list_del(&hc->name_list); unregister_with_devfs(hc); dm_set_mdptr(hc->md, NULL); + + table = dm_get_table(hc->md); + if (table) { + dm_table_event(table); + dm_table_put(table); + } + dm_put(hc->md); if (hc->new_map) dm_table_put(hc->new_map); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 785806bdb248..f9b7b32d5d5c 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -329,13 +329,17 @@ static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio, /* * If we run out of usable paths, should we queue I/O or error it? */ -static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path) +static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, + unsigned save_old_value) { unsigned long flags; spin_lock_irqsave(&m->lock, flags); - m->saved_queue_if_no_path = m->queue_if_no_path; + if (save_old_value) + m->saved_queue_if_no_path = m->queue_if_no_path; + else + m->saved_queue_if_no_path = queue_if_no_path; m->queue_if_no_path = queue_if_no_path; if (!m->queue_if_no_path && m->queue_size) queue_work(kmultipathd, &m->process_queued_ios); @@ -677,7 +681,7 @@ static int parse_features(struct arg_set *as, struct multipath *m, return 0; if (!strnicmp(shift(as), MESG_STR("queue_if_no_path"))) - return queue_if_no_path(m, 1); + return queue_if_no_path(m, 1, 0); else { ti->error = "Unrecognised multipath feature request"; return -EINVAL; @@ -1077,7 +1081,7 @@ static void multipath_presuspend(struct dm_target *ti) { struct multipath *m = (struct multipath *) ti->private; - queue_if_no_path(m, 0); + queue_if_no_path(m, 0, 1); } /* @@ -1222,9 +1226,9 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) if (argc == 1) { if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) - return queue_if_no_path(m, 1); + return queue_if_no_path(m, 1, 0); else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) - return queue_if_no_path(m, 0); + return queue_if_no_path(m, 0, 0); } if (argc != 2) diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 863282513753..2375709a392c 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -122,7 +122,7 @@ static inline sector_t region_to_sector(struct region_hash *rh, region_t region) /* FIXME move this */ static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); -static void *region_alloc(unsigned int __nocast gfp_mask, void *pool_data) +static void *region_alloc(gfp_t gfp_mask, void *pool_data) { return kmalloc(sizeof(struct region), gfp_mask); } diff --git a/drivers/md/md.c b/drivers/md/md.c index 2897df90df44..2a8a5696bf8a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3063,6 +3063,7 @@ static int md_thread(void * arg) * many dirty RAID5 blocks. */ + allow_signal(SIGKILL); complete(thread->event); while (!kthread_should_stop()) { void (*run)(mddev_t *); @@ -3111,7 +3112,7 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, thread->mddev = mddev; thread->name = name; thread->timeout = MAX_SCHEDULE_TIMEOUT; - thread->tsk = kthread_run(md_thread, thread, mdname(thread->mddev)); + thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev)); if (IS_ERR(thread->tsk)) { kfree(thread); return NULL; @@ -3567,8 +3568,10 @@ static void md_do_sync(mddev_t *mddev) mddev->curr_resync = 2; try_again: - if (signal_pending(current)) { + if (signal_pending(current) || + kthread_should_stop()) { flush_signals(current); + set_bit(MD_RECOVERY_INTR, &mddev->recovery); goto skip; } ITERATE_MDDEV(mddev2,tmp) { @@ -3588,8 +3591,9 @@ static void md_do_sync(mddev_t *mddev) */ continue; prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); - if (!signal_pending(current) - && mddev2->curr_resync >= mddev->curr_resync) { + if (!signal_pending(current) && + !kthread_should_stop() && + mddev2->curr_resync >= mddev->curr_resync) { printk(KERN_INFO "md: delaying resync of %s" " until %s has finished resync (they" " share one or more physical units)\n", @@ -3695,7 +3699,7 @@ static void md_do_sync(mddev_t *mddev) } - if (signal_pending(current)) { + if (signal_pending(current) || kthread_should_stop()) { /* * got a signal, exit. */ diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 286342375fb7..1151c3ed3006 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -38,7 +38,7 @@ static mdk_personality_t multipath_personality; -static void *mp_pool_alloc(unsigned int __nocast gfp_flags, void *data) +static void *mp_pool_alloc(gfp_t gfp_flags, void *data) { struct multipath_bh *mpb; mpb = kmalloc(sizeof(*mpb), gfp_flags); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a93ca478142a..0e1f148dd41d 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -52,7 +52,7 @@ static mdk_personality_t raid1_personality; static void unplug_slaves(mddev_t *mddev); -static void * r1bio_pool_alloc(unsigned int __nocast gfp_flags, void *data) +static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) { struct pool_info *pi = data; r1bio_t *r1_bio; @@ -79,7 +79,7 @@ static void r1bio_pool_free(void *r1_bio, void *data) #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) #define RESYNC_WINDOW (2048*1024) -static void * r1buf_pool_alloc(unsigned int __nocast gfp_flags, void *data) +static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) { struct pool_info *pi = data; struct page *page; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 5bd1e9ec899d..28dd028415e4 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -47,7 +47,7 @@ static void unplug_slaves(mddev_t *mddev); -static void * r10bio_pool_alloc(unsigned int __nocast gfp_flags, void *data) +static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) { conf_t *conf = data; r10bio_t *r10_bio; @@ -81,7 +81,7 @@ static void r10bio_pool_free(void *r10_bio, void *data) * one for write (we recover only one drive per r10buf) * */ -static void * r10buf_pool_alloc(unsigned int __nocast gfp_flags, void *data) +static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) { conf_t *conf = data; struct page *page; diff --git a/drivers/media/dvb/frontends/tda10021.c b/drivers/media/dvb/frontends/tda10021.c index 87d5f4d8790f..eaf130e666d8 100644 --- a/drivers/media/dvb/frontends/tda10021.c +++ b/drivers/media/dvb/frontends/tda10021.c @@ -100,8 +100,8 @@ static u8 tda10021_readreg (struct tda10021_state* state, u8 reg) ret = i2c_transfer (state->i2c, msg, 2); if (ret != 2) - printk("DVB: TDA10021(%d): %s: readreg error (ret == %i)\n", - state->frontend.dvb->num, __FUNCTION__, ret); + printk("DVB: TDA10021: %s: readreg error (ret == %i)\n", + __FUNCTION__, ret); return b1[0]; } diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c index 022913da8c59..9b0406318f2d 100644 --- a/drivers/media/radio/radio-cadet.c +++ b/drivers/media/radio/radio-cadet.c @@ -543,7 +543,7 @@ static int cadet_probe(void) for(i=0;i<8;i++) { io=iovals[i]; - if(request_region(io,2, "cadet-probe")>=0) { + if (request_region(io, 2, "cadet-probe")) { cadet_setfreq(1410); if(cadet_getfreq()==1410) { release_region(io, 2); diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index 93570355819a..bbb989df4cf0 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig @@ -262,7 +262,6 @@ config VIDEO_SAA7134_DVB depends on VIDEO_SAA7134 && DVB_CORE select VIDEO_BUF_DVB select DVB_MT352 - select DVB_CX22702 select DVB_TDA1004X ---help--- This adds support for DVB cards based on the diff --git a/drivers/media/video/bttv-cards.c b/drivers/media/video/bttv-cards.c index 190977a1e549..0881a17d5226 100644 --- a/drivers/media/video/bttv-cards.c +++ b/drivers/media/video/bttv-cards.c @@ -2393,12 +2393,12 @@ struct tvcard bttv_tvcards[] = { .tuner = 0, .tuner_type = TUNER_LG_TDVS_H062F, .tuner_addr = ADDR_UNSET, - .video_inputs = 2, + .video_inputs = 3, .audio_inputs = 1, .svhs = 2, - .muxsel = { 2, 3 }, + .muxsel = { 2, 3, 1 }, .gpiomask = 0x00e00007, - .audiomux = { 0x00400005, 0, 0, 0, 0, 0 }, + .audiomux = { 0x00400005, 0, 0x00000001, 0, 0x00c00007, 0 }, .no_msp34xx = 1, .no_tda9875 = 1, .no_tda7432 = 1, diff --git a/drivers/media/video/bttv-driver.c b/drivers/media/video/bttv-driver.c index a564321db2f0..c062a017491e 100644 --- a/drivers/media/video/bttv-driver.c +++ b/drivers/media/video/bttv-driver.c @@ -763,21 +763,21 @@ static void set_pll(struct bttv *btv) /* no PLL needed */ if (btv->pll.pll_current == 0) return; - vprintk(KERN_INFO "bttv%d: PLL can sleep, using XTAL (%d).\n", - btv->c.nr,btv->pll.pll_ifreq); + bttv_printk(KERN_INFO "bttv%d: PLL can sleep, using XTAL (%d).\n", + btv->c.nr,btv->pll.pll_ifreq); btwrite(0x00,BT848_TGCTRL); btwrite(0x00,BT848_PLL_XCI); btv->pll.pll_current = 0; return; } - vprintk(KERN_INFO "bttv%d: PLL: %d => %d ",btv->c.nr, - btv->pll.pll_ifreq, btv->pll.pll_ofreq); + bttv_printk(KERN_INFO "bttv%d: PLL: %d => %d ",btv->c.nr, + btv->pll.pll_ifreq, btv->pll.pll_ofreq); set_pll_freq(btv, btv->pll.pll_ifreq, btv->pll.pll_ofreq); for (i=0; i<10; i++) { /* Let other people run while the PLL stabilizes */ - vprintk("."); + bttv_printk("."); msleep(10); if (btread(BT848_DSTATUS) & BT848_DSTATUS_PLOCK) { @@ -785,12 +785,12 @@ static void set_pll(struct bttv *btv) } else { btwrite(0x08,BT848_TGCTRL); btv->pll.pll_current = btv->pll.pll_ofreq; - vprintk(" ok\n"); + bttv_printk(" ok\n"); return; } } btv->pll.pll_current = -1; - vprintk("failed\n"); + bttv_printk("failed\n"); return; } diff --git a/drivers/media/video/bttvp.h b/drivers/media/video/bttvp.h index 9b0b7ca035f8..7a312f79340a 100644 --- a/drivers/media/video/bttvp.h +++ b/drivers/media/video/bttvp.h @@ -221,7 +221,7 @@ extern void bttv_gpio_tracking(struct bttv *btv, char *comment); extern int init_bttv_i2c(struct bttv *btv); extern int fini_bttv_i2c(struct bttv *btv); -#define vprintk if (bttv_verbose) printk +#define bttv_printk if (bttv_verbose) printk #define dprintk if (bttv_debug >= 1) printk #define d2printk if (bttv_debug >= 2) printk diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c index 8c08b7f1ad23..b7ec9bf45085 100644 --- a/drivers/media/video/cpia.c +++ b/drivers/media/video/cpia.c @@ -1397,7 +1397,7 @@ static void destroy_proc_cpia_cam(struct cam_data *cam) static void proc_cpia_create(void) { - cpia_proc_root = create_proc_entry("cpia", S_IFDIR, NULL); + cpia_proc_root = proc_mkdir("cpia", NULL); if (cpia_proc_root) cpia_proc_root->owner = THIS_MODULE; diff --git a/drivers/media/video/rds.h b/drivers/media/video/rds.h index 30337d0f1a87..0d30eb744e61 100644 --- a/drivers/media/video/rds.h +++ b/drivers/media/video/rds.h @@ -31,7 +31,7 @@ struct rds_command { unsigned int block_count; int result; - unsigned char *buffer; + unsigned char __user *buffer; struct file *instance; poll_table *event_list; }; diff --git a/drivers/media/video/saa6588.c b/drivers/media/video/saa6588.c index 1a657a70ff43..72b70eb5da1d 100644 --- a/drivers/media/video/saa6588.c +++ b/drivers/media/video/saa6588.c @@ -157,7 +157,7 @@ static struct i2c_client client_template; /* ---------------------------------------------------------------------- */ -static int block_to_user_buf(struct saa6588 *s, unsigned char *user_buf) +static int block_to_user_buf(struct saa6588 *s, unsigned char __user *user_buf) { int i; @@ -191,7 +191,7 @@ static void read_from_buf(struct saa6588 *s, struct rds_command *a) { unsigned long flags; - unsigned char *buf_ptr = a->buffer; /* This is a user space buffer! */ + unsigned char __user *buf_ptr = a->buffer; unsigned int i; unsigned int rd_blocks; diff --git a/drivers/media/video/vpx3220.c b/drivers/media/video/vpx3220.c index 4437bdebe24f..137b58f2c666 100644 --- a/drivers/media/video/vpx3220.c +++ b/drivers/media/video/vpx3220.c @@ -203,7 +203,7 @@ static const unsigned short init_ntsc[] = { 0x8c, 640, /* Horizontal length */ 0x8d, 640, /* Number of pixels */ 0x8f, 0xc00, /* Disable window 2 */ - 0xf0, 0x173, /* 13.5 MHz transport, Forced + 0xf0, 0x73, /* 13.5 MHz transport, Forced * mode, latch windows */ 0xf2, 0x13, /* NTSC M, composite input */ 0xe7, 0x1e1, /* Enable vertical standard @@ -212,38 +212,36 @@ static const unsigned short init_ntsc[] = { static const unsigned short init_pal[] = { 0x88, 23, /* Window 1 vertical begin */ - 0x89, 288 + 16, /* Vertical lines in (16 lines + 0x89, 288, /* Vertical lines in (16 lines * skipped by the VFE) */ - 0x8a, 288 + 16, /* Vertical lines out (16 lines + 0x8a, 288, /* Vertical lines out (16 lines * skipped by the VFE) */ 0x8b, 16, /* Horizontal begin */ 0x8c, 768, /* Horizontal length */ 0x8d, 784, /* Number of pixels * Must be >= Horizontal begin + Horizontal length */ 0x8f, 0xc00, /* Disable window 2 */ - 0xf0, 0x177, /* 13.5 MHz transport, Forced + 0xf0, 0x77, /* 13.5 MHz transport, Forced * mode, latch windows */ 0xf2, 0x3d1, /* PAL B,G,H,I, composite input */ - 0xe7, 0x261, /* PAL/SECAM set to 288 + 16 lines - * change to 0x241 for 288 lines */ + 0xe7, 0x241, /* PAL/SECAM set to 288 lines */ }; static const unsigned short init_secam[] = { - 0x88, 23 - 16, /* Window 1 vertical begin */ - 0x89, 288 + 16, /* Vertical lines in (16 lines + 0x88, 23, /* Window 1 vertical begin */ + 0x89, 288, /* Vertical lines in (16 lines * skipped by the VFE) */ - 0x8a, 288 + 16, /* Vertical lines out (16 lines + 0x8a, 288, /* Vertical lines out (16 lines * skipped by the VFE) */ 0x8b, 16, /* Horizontal begin */ 0x8c, 768, /* Horizontal length */ 0x8d, 784, /* Number of pixels * Must be >= Horizontal begin + Horizontal length */ 0x8f, 0xc00, /* Disable window 2 */ - 0xf0, 0x177, /* 13.5 MHz transport, Forced + 0xf0, 0x77, /* 13.5 MHz transport, Forced * mode, latch windows */ 0xf2, 0x3d5, /* SECAM, composite input */ - 0xe7, 0x261, /* PAL/SECAM set to 288 + 16 lines - * change to 0x241 for 288 lines */ + 0xe7, 0x241, /* PAL/SECAM set to 288 lines */ }; static const unsigned char init_common[] = { @@ -410,6 +408,12 @@ vpx3220_command (struct i2c_client *client, case DECODER_SET_NORM: { int *iarg = arg, data; + int temp_input; + + /* Here we back up the input selection because it gets + overwritten when we fill the registers with the + choosen video norm */ + temp_input = vpx3220_fp_read(client, 0xf2); dprintk(1, KERN_DEBUG "%s: DECODER_SET_NORM %d\n", I2C_NAME(client), *iarg); @@ -449,6 +453,10 @@ vpx3220_command (struct i2c_client *client, } decoder->norm = *iarg; + + /* And here we set the backed up video input again */ + vpx3220_fp_write(client, 0xf2, temp_input | 0x0010); + udelay(10); } break; diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 429820e48c69..7de19a84dc74 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -257,8 +257,8 @@ static void mptsas_print_device_pg0(SasDevicePage0_t *pg0) printk("SAS Address=0x%llX\n", le64_to_cpu(sas_address)); printk("Target ID=0x%X\n", pg0->TargetID); printk("Bus=0x%X\n", pg0->Bus); - printk("PhyNum=0x%X\n", pg0->PhyNum); - printk("AccessStatus=0x%X\n", le16_to_cpu(pg0->AccessStatus)); + printk("Parent Phy Num=0x%X\n", pg0->PhyNum); + printk("Access Status=0x%X\n", le16_to_cpu(pg0->AccessStatus)); printk("Device Info=0x%X\n", le32_to_cpu(pg0->DeviceInfo)); printk("Flags=0x%X\n", le16_to_cpu(pg0->Flags)); printk("Physical Port=0x%X\n", pg0->PhysicalPort); @@ -270,7 +270,7 @@ static void mptsas_print_expander_pg1(SasExpanderPage1_t *pg1) printk("---- SAS EXPANDER PAGE 1 ------------\n"); printk("Physical Port=0x%X\n", pg1->PhysicalPort); - printk("PHY Identifier=0x%X\n", pg1->Phy); + printk("PHY Identifier=0x%X\n", pg1->PhyIdentifier); printk("Negotiated Link Rate=0x%X\n", pg1->NegotiatedLinkRate); printk("Programmed Link Rate=0x%X\n", pg1->ProgrammedLinkRate); printk("Hardware Link Rate=0x%X\n", pg1->HwLinkRate); @@ -604,7 +604,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, mptsas_print_expander_pg1(buffer); /* save config data */ - phy_info->phy_id = buffer->Phy; + phy_info->phy_id = buffer->PhyIdentifier; phy_info->port_id = buffer->PhysicalPort; phy_info->negotiated_link_rate = buffer->NegotiatedLinkRate; phy_info->programmed_link_rate = buffer->ProgrammedLinkRate; @@ -825,6 +825,8 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc, int *index) mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify, (MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE << MPI_SAS_DEVICE_PGAD_FORM_SHIFT), handle); + port_info->phy_info[i].identify.phy_id = + port_info->phy_info[i].phy_id; handle = port_info->phy_info[i].identify.handle; if (port_info->phy_info[i].attached.handle) { @@ -881,6 +883,8 @@ mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle, int *index) (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << MPI_SAS_DEVICE_PGAD_FORM_SHIFT), port_info->phy_info[i].identify.handle); + port_info->phy_info[i].identify.phy_id = + port_info->phy_info[i].phy_id; } if (port_info->phy_info[i].attached.handle) { diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c index 10f6ce1bc0ab..e335d54c4659 100644 --- a/drivers/mfd/ucb1x00-core.c +++ b/drivers/mfd/ucb1x00-core.c @@ -457,6 +457,17 @@ static int ucb1x00_detect_irq(struct ucb1x00 *ucb) return probe_irq_off(mask); } +static void ucb1x00_release(struct class_device *dev) +{ + struct ucb1x00 *ucb = classdev_to_ucb1x00(dev); + kfree(ucb); +} + +static struct class ucb1x00_class = { + .name = "ucb1x00", + .release = ucb1x00_release, +}; + static int ucb1x00_probe(struct mcp *mcp) { struct ucb1x00 *ucb; @@ -546,17 +557,6 @@ static void ucb1x00_remove(struct mcp *mcp) class_device_unregister(&ucb->cdev); } -static void ucb1x00_release(struct class_device *dev) -{ - struct ucb1x00 *ucb = classdev_to_ucb1x00(dev); - kfree(ucb); -} - -static struct class ucb1x00_class = { - .name = "ucb1x00", - .release = ucb1x00_release, -}; - int ucb1x00_register_driver(struct ucb1x00_driver *drv) { struct ucb1x00 *ucb; @@ -642,8 +642,6 @@ static void __exit ucb1x00_exit(void) module_init(ucb1x00_init); module_exit(ucb1x00_exit); -EXPORT_SYMBOL(ucb1x00_class); - EXPORT_SYMBOL(ucb1x00_io_set_dir); EXPORT_SYMBOL(ucb1x00_io_write); EXPORT_SYMBOL(ucb1x00_io_read); diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c index a851d65c7cfe..a260f83bcb02 100644 --- a/drivers/mfd/ucb1x00-ts.c +++ b/drivers/mfd/ucb1x00-ts.c @@ -48,8 +48,8 @@ struct ucb1x00_ts { u16 x_res; u16 y_res; - int restart:1; - int adcsync:1; + unsigned int restart:1; + unsigned int adcsync:1; }; static int adcsync; diff --git a/drivers/mfd/ucb1x00.h b/drivers/mfd/ucb1x00.h index 6b632644f59a..9c9a647d8b7b 100644 --- a/drivers/mfd/ucb1x00.h +++ b/drivers/mfd/ucb1x00.h @@ -106,8 +106,6 @@ struct ucb1x00_irq { void (*fn)(int, void *); }; -extern struct class ucb1x00_class; - struct ucb1x00 { spinlock_t lock; struct mcp *mcp; diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c index 9a087c1fb0b7..24f670b5a4f3 100644 --- a/drivers/mtd/devices/docecc.c +++ b/drivers/mtd/devices/docecc.c @@ -40,7 +40,7 @@ #include #include -#define DEBUG 0 +#define DEBUG_ECC 0 /* need to undef it (from asm/termbits.h) */ #undef B0 @@ -249,7 +249,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1], lambda[j] ^= Alpha_to[modnn(u + tmp)]; } } -#if DEBUG >= 1 +#if DEBUG_ECC >= 1 /* Test code that verifies the erasure locator polynomial just constructed Needed only for decoder debugging. */ @@ -276,7 +276,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1], count = -1; goto finish; } -#if DEBUG >= 2 +#if DEBUG_ECC >= 2 printf("\n Erasure positions as determined by roots of Eras Loc Poly:\n"); for (i = 0; i < count; i++) printf("%d ", loc[i]); @@ -409,7 +409,7 @@ eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1], den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])]; } if (den == 0) { -#if DEBUG >= 1 +#if DEBUG_ECC >= 1 printf("\n ERROR: denominator = 0\n"); #endif /* Convert to dual- basis */ diff --git a/drivers/mtd/maps/bast-flash.c b/drivers/mtd/maps/bast-flash.c index 0c45464e3f7b..0ba0ff7d43b9 100644 --- a/drivers/mtd/maps/bast-flash.c +++ b/drivers/mtd/maps/bast-flash.c @@ -39,7 +39,6 @@ #include #include -#include #include #include diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c index 3e94b616743d..a9f86c7fbd52 100644 --- a/drivers/mtd/maps/ixp2000.c +++ b/drivers/mtd/maps/ixp2000.c @@ -30,7 +30,6 @@ #include #include -#include #include #include diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c index 5afe660aa2c4..3fcc32884074 100644 --- a/drivers/mtd/maps/ixp4xx.c +++ b/drivers/mtd/maps/ixp4xx.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include @@ -254,6 +253,6 @@ module_init(ixp4xx_flash_init); module_exit(ixp4xx_flash_exit); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems") +MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems"); MODULE_AUTHOR("Deepak Saxena"); diff --git a/drivers/mtd/maps/omap_nor.c b/drivers/mtd/maps/omap_nor.c index 8cc71409a328..b17bca657daf 100644 --- a/drivers/mtd/maps/omap_nor.c +++ b/drivers/mtd/maps/omap_nor.c @@ -42,7 +42,6 @@ #include #include -#include #include #include diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c index 52385705da09..8dcaa357b4bb 100644 --- a/drivers/mtd/maps/sa1100-flash.c +++ b/drivers/mtd/maps/sa1100-flash.c @@ -21,7 +21,6 @@ #include #include -#include #include #include #include diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index 891e3a1b9110..b47ebcb31e0f 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c @@ -58,7 +58,6 @@ #include #include -#include #include #include diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index bc537440ca02..f822cd3025ff 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c @@ -1027,8 +1027,7 @@ static void cp_reset_hw (struct cp_private *cp) if (!(cpr8(Cmd) & CmdReset)) return; - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(10); + schedule_timeout_uninterruptible(10); } printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name); @@ -1575,6 +1574,7 @@ static struct ethtool_ops cp_ethtool_ops = { .set_wol = cp_set_wol, .get_strings = cp_get_strings, .get_ethtool_stats = cp_get_ethtool_stats, + .get_perm_addr = ethtool_op_get_perm_addr, }; static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) @@ -1773,6 +1773,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) for (i = 0; i < 3; i++) ((u16 *) (dev->dev_addr))[i] = le16_to_cpu (read_eeprom (regs, i + 7, addr_len)); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); dev->open = cp_open; dev->stop = cp_close; diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index 4c2cf7bbd252..30bee11c48bd 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c @@ -552,7 +552,8 @@ const static struct { { "RTL-8100B/8139D", HW_REVID(1, 1, 1, 0, 1, 0, 1), - HasLWake, + HasHltClk /* XXX undocumented? */ + | HasLWake, }, { "RTL-8101", @@ -970,6 +971,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, for (i = 0; i < 3; i++) ((u16 *) (dev->dev_addr))[i] = le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len)); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); /* The Rtl8139-specific entries in the device structure. */ dev->open = rtl8139_open; @@ -2465,6 +2467,7 @@ static struct ethtool_ops rtl8139_ethtool_ops = { .get_strings = rtl8139_get_strings, .get_stats_count = rtl8139_get_stats_count, .get_ethtool_stats = rtl8139_get_ethtool_stats, + .get_perm_addr = ethtool_op_get_perm_addr, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/8390.c b/drivers/net/8390.c index 6d76f3a99b17..f87027420081 100644 --- a/drivers/net/8390.c +++ b/drivers/net/8390.c @@ -1094,7 +1094,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length, outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD); - if (inb_p(e8390_base) & E8390_TRANS) + if (inb_p(e8390_base + E8390_CMD) & E8390_TRANS) { printk(KERN_WARNING "%s: trigger_send() called with the transmitter busy.\n", dev->name); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index d8adb4222ca3..b39cba36d15e 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -475,6 +475,14 @@ config SGI_IOC3_ETH_HW_TX_CSUM the moment only acceleration of IPv4 is supported. This option enables offloading for checksums on transmit. If unsure, say Y. +config MIPS_SIM_NET + tristate "MIPS simulator Network device (EXPERIMENTAL)" + depends on NETDEVICES && MIPS_SIM && EXPERIMENTAL + help + The MIPSNET device is a simple Ethernet network device which is + emulated by the MIPS Simulator. + If you are not using a MIPSsim or are unsure, say N. + config SGI_O2MACE_ETH tristate "SGI O2 MACE Fast Ethernet support" depends on NET_ETHERNET && SGI_IP32=y @@ -548,6 +556,14 @@ config SUNGEM Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also . +config CASSINI + tristate "Sun Cassini support" + depends on NET_ETHERNET && PCI + select CRC32 + help + Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also + + config NET_VENDOR_3COM bool "3COM cards" depends on NET_ETHERNET && (ISA || EISA || MCA || PCI) @@ -1647,7 +1663,7 @@ config LAN_SAA9730 config NET_POCKET bool "Pocket and portable adapters" - depends on NET_ETHERNET && ISA + depends on NET_ETHERNET && PARPORT ---help--- Cute little network (Ethernet) devices which attach to the parallel port ("pocket adapters"), commonly used with laptops. If you have @@ -1671,7 +1687,7 @@ config NET_POCKET config ATP tristate "AT-LAN-TEC/RealTek pocket adapter support" - depends on NET_POCKET && ISA && X86 + depends on NET_POCKET && PARPORT && X86 select CRC32 ---help--- This is a network (Ethernet) device which attaches to your parallel @@ -1686,7 +1702,7 @@ config ATP config DE600 tristate "D-Link DE600 pocket adapter support" - depends on NET_POCKET && ISA + depends on NET_POCKET && PARPORT ---help--- This is a network (Ethernet) device which attaches to your parallel port. Read as well as the @@ -1701,7 +1717,7 @@ config DE600 config DE620 tristate "D-Link DE620 pocket adapter support" - depends on NET_POCKET && ISA + depends on NET_POCKET && PARPORT ---help--- This is a network (Ethernet) device which attaches to your parallel port. Read as well as the @@ -2093,6 +2109,7 @@ config SPIDER_NET config GIANFAR tristate "Gianfar Ethernet" depends on 85xx || 83xx + select PHYLIB help This driver supports the Gigabit TSEC on the MPC85xx family of chips, and the FEC on the 8540 @@ -2253,6 +2270,20 @@ config ISERIES_VETH tristate "iSeries Virtual Ethernet driver support" depends on PPC_ISERIES +config RIONET + tristate "RapidIO Ethernet over messaging driver support" + depends on NETDEVICES && RAPIDIO + +config RIONET_TX_SIZE + int "Number of outbound queue entries" + depends on RIONET + default "128" + +config RIONET_RX_SIZE + int "Number of inbound queue entries" + depends on RIONET + default "128" + config FDDI bool "FDDI driver support" depends on (PCI || EISA) diff --git a/drivers/net/Makefile b/drivers/net/Makefile index c849afc25c00..8b3403c7f5e0 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -13,7 +13,7 @@ obj-$(CONFIG_CHELSIO_T1) += chelsio/ obj-$(CONFIG_BONDING) += bonding/ obj-$(CONFIG_GIANFAR) += gianfar_driver.o -gianfar_driver-objs := gianfar.o gianfar_ethtool.o gianfar_phy.o +gianfar_driver-objs := gianfar.o gianfar_ethtool.o gianfar_mii.o # # link order important here @@ -28,6 +28,7 @@ obj-$(CONFIG_SUNQE) += sunqe.o obj-$(CONFIG_SUNBMAC) += sunbmac.o obj-$(CONFIG_MYRI_SBUS) += myri_sbus.o obj-$(CONFIG_SUNGEM) += sungem.o sungem_phy.o +obj-$(CONFIG_CASSINI) += cassini.o obj-$(CONFIG_MACE) += mace.o obj-$(CONFIG_BMAC) += bmac.o @@ -64,6 +65,7 @@ obj-$(CONFIG_SKFP) += skfp/ obj-$(CONFIG_VIA_RHINE) += via-rhine.o obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o +obj-$(CONFIG_RIONET) += rionet.o # # end link order section @@ -166,6 +168,7 @@ obj-$(CONFIG_EQUALIZER) += eql.o obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o obj-$(CONFIG_MIPS_GT96100ETH) += gt96100eth.o obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o +obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o obj-$(CONFIG_DECLANCE) += declance.o obj-$(CONFIG_ATARILANCE) += atarilance.o diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c index 9b659e3c8d67..c56d86d371a9 100644 --- a/drivers/net/arm/am79c961a.c +++ b/drivers/net/arm/am79c961a.c @@ -15,16 +15,13 @@ */ #include #include -#include #include #include -#include #include #include #include #include #include -#include #include #include #include @@ -33,7 +30,6 @@ #include #include #include -#include #define TX_BUFFERS 15 #define RX_BUFFERS 25 @@ -85,7 +81,7 @@ static inline unsigned short read_ireg(u_long base_addr, u_int reg) u_short v; __asm__( "str%?h %1, [%2] @ NAT_RAP\n\t" - "str%?h %0, [%2, #8] @ NET_IDP\n\t" + "ldr%?h %0, [%2, #8] @ NET_IDP\n\t" : "=r" (v) : "r" (reg), "r" (ISAIO_BASE + 0x0464)); return v; @@ -288,7 +284,7 @@ static void am79c961_timer(unsigned long data) else if (!lnkstat && carrier) netif_carrier_off(dev); - mod_timer(&priv->timer, jiffies + 5*HZ); + mod_timer(&priv->timer, jiffies + msecs_to_jiffies(500)); } /* @@ -709,13 +705,9 @@ static int __init am79c961_init(void) goto release; am79c961_banner(); - printk(KERN_INFO "%s: ether address ", dev->name); - /* Retrive and print the ethernet address. */ - for (i = 0; i < 6; i++) { + for (i = 0; i < 6; i++) dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff; - printk (i == 5 ? "%02x\n" : "%02x:", dev->dev_addr[i]); - } spin_lock_init(&priv->chip_lock); init_timer(&priv->timer); @@ -736,8 +728,14 @@ static int __init am79c961_init(void) #endif ret = register_netdev(dev); - if (ret == 0) + if (ret == 0) { + printk(KERN_INFO "%s: ether address ", dev->name); + + for (i = 0; i < 6; i++) + printk (i == 5 ? "%02x\n" : "%02x:", dev->dev_addr[i]); + return 0; + } release: release_region(dev->base_addr, 0x18); diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index c82b9cd1c924..78506911d656 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -151,13 +151,6 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES]; SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ SUPPORTED_Autoneg -static char *phy_link[] = -{ "unknown", - "10Base2", "10BaseT", - "AUI", - "100BaseT", "100BaseTX", "100BaseFX" -}; - int bcm_5201_init(struct net_device *dev, int phy_addr) { s16 data; @@ -785,6 +778,7 @@ static struct mii_chip_info { {"Broadcom BCM5201 10/100 BaseT PHY",0x0040,0x6212, &bcm_5201_ops,0}, {"Broadcom BCM5221 10/100 BaseT PHY",0x0040,0x61e4, &bcm_5201_ops,0}, {"Broadcom BCM5222 10/100 BaseT PHY",0x0040,0x6322, &bcm_5201_ops,1}, + {"NS DP83847 PHY", 0x2000, 0x5c30, &bcm_5201_ops ,0}, {"AMD 79C901 HomePNA PHY",0x0000,0x35c8, &am79c901_ops,0}, {"AMD 79C874 10/100 BaseT PHY",0x0022,0x561b, &am79c874_ops,0}, {"LSI 80227 10/100 BaseT PHY",0x0016,0xf840, &lsi_80227_ops,0}, @@ -1045,7 +1039,7 @@ found: #endif if (aup->mii->chip_info == NULL) { - printk(KERN_ERR "%s: Au1x No MII transceivers found!\n", + printk(KERN_ERR "%s: Au1x No known MII transceivers found!\n", dev->name); return -1; } @@ -1546,6 +1540,9 @@ au1000_probe(u32 ioaddr, int irq, int port_num) printk(KERN_ERR "%s: out of memory\n", dev->name); goto err_out; } + aup->mii->next = NULL; + aup->mii->chip_info = NULL; + aup->mii->status = 0; aup->mii->mii_control_reg = 0; aup->mii->mii_data_reg = 0; diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 94939f570f78..282ebd15f011 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -106,6 +106,29 @@ static int b44_poll(struct net_device *dev, int *budget); static void b44_poll_controller(struct net_device *dev); #endif +static int dma_desc_align_mask; +static int dma_desc_sync_size; + +static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev, + dma_addr_t dma_base, + unsigned long offset, + enum dma_data_direction dir) +{ + dma_sync_single_range_for_device(&pdev->dev, dma_base, + offset & dma_desc_align_mask, + dma_desc_sync_size, dir); +} + +static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev, + dma_addr_t dma_base, + unsigned long offset, + enum dma_data_direction dir) +{ + dma_sync_single_range_for_cpu(&pdev->dev, dma_base, + offset & dma_desc_align_mask, + dma_desc_sync_size, dir); +} + static inline unsigned long br32(const struct b44 *bp, unsigned long reg) { return readl(bp->regs + reg); @@ -668,6 +691,11 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) dp->ctrl = cpu_to_le32(ctrl); dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset); + if (bp->flags & B44_FLAG_RX_RING_HACK) + b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma, + dest_idx * sizeof(dp), + DMA_BIDIRECTIONAL); + return RX_PKT_BUF_SZ; } @@ -692,6 +720,11 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) pci_unmap_addr_set(dest_map, mapping, pci_unmap_addr(src_map, mapping)); + if (bp->flags & B44_FLAG_RX_RING_HACK) + b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma, + src_idx * sizeof(src_desc), + DMA_BIDIRECTIONAL); + ctrl = src_desc->ctrl; if (dest_idx == (B44_RX_RING_SIZE - 1)) ctrl |= cpu_to_le32(DESC_CTRL_EOT); @@ -700,8 +733,14 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) dest_desc->ctrl = ctrl; dest_desc->addr = src_desc->addr; + src_map->skb = NULL; + if (bp->flags & B44_FLAG_RX_RING_HACK) + b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma, + dest_idx * sizeof(dest_desc), + DMA_BIDIRECTIONAL); + pci_dma_sync_single_for_device(bp->pdev, src_desc->addr, RX_PKT_BUF_SZ, PCI_DMA_FROMDEVICE); @@ -959,6 +998,11 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl); bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); + if (bp->flags & B44_FLAG_TX_RING_HACK) + b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma, + entry * sizeof(bp->tx_ring[0]), + DMA_TO_DEVICE); + entry = NEXT_TX(entry); bp->tx_prod = entry; @@ -1064,6 +1108,16 @@ static void b44_init_rings(struct b44 *bp) memset(bp->rx_ring, 0, B44_RX_RING_BYTES); memset(bp->tx_ring, 0, B44_TX_RING_BYTES); + if (bp->flags & B44_FLAG_RX_RING_HACK) + dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma, + DMA_TABLE_BYTES, + PCI_DMA_BIDIRECTIONAL); + + if (bp->flags & B44_FLAG_TX_RING_HACK) + dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma, + DMA_TABLE_BYTES, + PCI_DMA_TODEVICE); + for (i = 0; i < bp->rx_pending; i++) { if (b44_alloc_rx_skb(bp, -1, i) < 0) break; @@ -1085,14 +1139,28 @@ static void b44_free_consistent(struct b44 *bp) bp->tx_buffers = NULL; } if (bp->rx_ring) { - pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, - bp->rx_ring, bp->rx_ring_dma); + if (bp->flags & B44_FLAG_RX_RING_HACK) { + dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma, + DMA_TABLE_BYTES, + DMA_BIDIRECTIONAL); + kfree(bp->rx_ring); + } else + pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, + bp->rx_ring, bp->rx_ring_dma); bp->rx_ring = NULL; + bp->flags &= ~B44_FLAG_RX_RING_HACK; } if (bp->tx_ring) { - pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, - bp->tx_ring, bp->tx_ring_dma); + if (bp->flags & B44_FLAG_TX_RING_HACK) { + dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma, + DMA_TABLE_BYTES, + DMA_TO_DEVICE); + kfree(bp->tx_ring); + } else + pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, + bp->tx_ring, bp->tx_ring_dma); bp->tx_ring = NULL; + bp->flags &= ~B44_FLAG_TX_RING_HACK; } } @@ -1118,12 +1186,56 @@ static int b44_alloc_consistent(struct b44 *bp) size = DMA_TABLE_BYTES; bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma); - if (!bp->rx_ring) - goto out_err; + if (!bp->rx_ring) { + /* Allocation may have failed due to pci_alloc_consistent + insisting on use of GFP_DMA, which is more restrictive + than necessary... */ + struct dma_desc *rx_ring; + dma_addr_t rx_ring_dma; + + if (!(rx_ring = (struct dma_desc *)kmalloc(size, GFP_KERNEL))) + goto out_err; + + memset(rx_ring, 0, size); + rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring, + DMA_TABLE_BYTES, + DMA_BIDIRECTIONAL); + + if (rx_ring_dma + size > B44_DMA_MASK) { + kfree(rx_ring); + goto out_err; + } + + bp->rx_ring = rx_ring; + bp->rx_ring_dma = rx_ring_dma; + bp->flags |= B44_FLAG_RX_RING_HACK; + } bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma); - if (!bp->tx_ring) - goto out_err; + if (!bp->tx_ring) { + /* Allocation may have failed due to pci_alloc_consistent + insisting on use of GFP_DMA, which is more restrictive + than necessary... */ + struct dma_desc *tx_ring; + dma_addr_t tx_ring_dma; + + if (!(tx_ring = (struct dma_desc *)kmalloc(size, GFP_KERNEL))) + goto out_err; + + memset(tx_ring, 0, size); + tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring, + DMA_TABLE_BYTES, + DMA_TO_DEVICE); + + if (tx_ring_dma + size > B44_DMA_MASK) { + kfree(tx_ring); + goto out_err; + } + + bp->tx_ring = tx_ring; + bp->tx_ring_dma = tx_ring_dma; + bp->flags |= B44_FLAG_TX_RING_HACK; + } return 0; @@ -1676,6 +1788,7 @@ static struct ethtool_ops b44_ethtool_ops = { .set_pauseparam = b44_set_pauseparam, .get_msglevel = b44_get_msglevel, .set_msglevel = b44_set_msglevel, + .get_perm_addr = ethtool_op_get_perm_addr, }; static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) @@ -1718,6 +1831,7 @@ static int __devinit b44_get_invariants(struct b44 *bp) bp->dev->dev_addr[3] = eeprom[80]; bp->dev->dev_addr[4] = eeprom[83]; bp->dev->dev_addr[5] = eeprom[82]; + memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len); bp->phy_addr = eeprom[90] & 0x1f; @@ -1971,6 +2085,12 @@ static struct pci_driver b44_driver = { static int __init b44_init(void) { + unsigned int dma_desc_align_size = dma_get_cache_alignment(); + + /* Setup paramaters for syncing RX/TX DMA descriptors */ + dma_desc_align_mask = ~(dma_desc_align_size - 1); + dma_desc_sync_size = max(dma_desc_align_size, sizeof(struct dma_desc)); + return pci_module_init(&b44_driver); } diff --git a/drivers/net/b44.h b/drivers/net/b44.h index 11c40a2e71c7..593cb0ad4100 100644 --- a/drivers/net/b44.h +++ b/drivers/net/b44.h @@ -400,6 +400,8 @@ struct b44 { #define B44_FLAG_ADV_100HALF 0x04000000 #define B44_FLAG_ADV_100FULL 0x08000000 #define B44_FLAG_INTERNAL_PHY 0x10000000 +#define B44_FLAG_RX_RING_HACK 0x20000000 +#define B44_FLAG_TX_RING_HACK 0x40000000 u32 rx_offset; diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c index 8dc657fc8afb..60dba4a1ca5c 100644 --- a/drivers/net/bmac.c +++ b/drivers/net/bmac.c @@ -218,7 +218,7 @@ void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data ) static inline -volatile unsigned short bmread(struct net_device *dev, unsigned long reg_offset ) +unsigned short bmread(struct net_device *dev, unsigned long reg_offset ) { return in_le16((void __iomem *)dev->base_addr + reg_offset); } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 90449a0f2a6c..8032126fd589 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -487,6 +487,8 @@ * * Added xmit_hash_policy_layer34() * - Modified by Jay Vosburgh to also support mode 4. * Set version to 2.6.3. + * 2005/09/26 - Jay Vosburgh + * - Removed backwards compatibility for old ifenslaves. Version 2.6.4. */ //#define BONDING_DEBUG 1 @@ -595,14 +597,7 @@ static int arp_ip_count = 0; static int bond_mode = BOND_MODE_ROUNDROBIN; static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2; static int lacp_fast = 0; -static int app_abi_ver = 0; -static int orig_app_abi_ver = -1; /* This is used to save the first ABI version - * we receive from the application. Once set, - * it won't be changed, and the module will - * refuse to enslave/release interfaces if the - * command comes from an application using - * another ABI version. - */ + struct bond_parm_tbl { char *modename; int mode; @@ -1294,12 +1289,13 @@ static void bond_mc_list_destroy(struct bonding *bond) /* * Copy all the Multicast addresses from src to the bonding device dst */ -static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond, int gpf_flag) +static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond, + gfp_t gfp_flag) { struct dev_mc_list *dmi, *new_dmi; for (dmi = mc_list; dmi; dmi = dmi->next) { - new_dmi = kmalloc(sizeof(struct dev_mc_list), gpf_flag); + new_dmi = kmalloc(sizeof(struct dev_mc_list), gfp_flag); if (!new_dmi) { /* FIXME: Potential memory leak !!! */ @@ -1653,7 +1649,8 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de int old_features = bond_dev->features; int res = 0; - if (slave_dev->do_ioctl == NULL) { + if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL && + slave_dev->do_ioctl == NULL) { printk(KERN_WARNING DRV_NAME ": Warning : no link monitoring support for %s\n", slave_dev->name); @@ -1701,51 +1698,29 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de } } - if (app_abi_ver >= 1) { - /* The application is using an ABI, which requires the - * slave interface to be closed. - */ - if ((slave_dev->flags & IFF_UP)) { - printk(KERN_ERR DRV_NAME - ": Error: %s is up\n", - slave_dev->name); - res = -EPERM; - goto err_undo_flags; - } + /* + * Old ifenslave binaries are no longer supported. These can + * be identified with moderate accurary by the state of the slave: + * the current ifenslave will set the interface down prior to + * enslaving it; the old ifenslave will not. + */ + if ((slave_dev->flags & IFF_UP)) { + printk(KERN_ERR DRV_NAME ": %s is up. " + "This may be due to an out of date ifenslave.\n", + slave_dev->name); + res = -EPERM; + goto err_undo_flags; + } - if (slave_dev->set_mac_address == NULL) { - printk(KERN_ERR DRV_NAME - ": Error: The slave device you specified does " - "not support setting the MAC address.\n"); - printk(KERN_ERR - "Your kernel likely does not support slave " - "devices.\n"); + if (slave_dev->set_mac_address == NULL) { + printk(KERN_ERR DRV_NAME + ": Error: The slave device you specified does " + "not support setting the MAC address.\n"); + printk(KERN_ERR + "Your kernel likely does not support slave devices.\n"); - res = -EOPNOTSUPP; - goto err_undo_flags; - } - } else { - /* The application is not using an ABI, which requires the - * slave interface to be open. - */ - if (!(slave_dev->flags & IFF_UP)) { - printk(KERN_ERR DRV_NAME - ": Error: %s is not running\n", - slave_dev->name); - res = -EINVAL; - goto err_undo_flags; - } - - if ((bond->params.mode == BOND_MODE_8023AD) || - (bond->params.mode == BOND_MODE_TLB) || - (bond->params.mode == BOND_MODE_ALB)) { - printk(KERN_ERR DRV_NAME - ": Error: to use %s mode, you must upgrade " - "ifenslave.\n", - bond_mode_name(bond->params.mode)); - res = -EOPNOTSUPP; - goto err_undo_flags; - } + res = -EOPNOTSUPP; + goto err_undo_flags; } new_slave = kmalloc(sizeof(struct slave), GFP_KERNEL); @@ -1761,41 +1736,36 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de */ new_slave->original_flags = slave_dev->flags; - if (app_abi_ver >= 1) { - /* save slave's original ("permanent") mac address for - * modes that needs it, and for restoring it upon release, - * and then set it to the master's address - */ - memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN); + /* + * Save slave's original ("permanent") mac address for modes + * that need it, and for restoring it upon release, and then + * set it to the master's address + */ + memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN); - /* set slave to master's mac address - * The application already set the master's - * mac address to that of the first slave - */ - memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len); - addr.sa_family = slave_dev->type; - res = dev_set_mac_address(slave_dev, &addr); - if (res) { - dprintk("Error %d calling set_mac_address\n", res); - goto err_free; - } + /* + * Set slave to master's mac address. The application already + * set the master's mac address to that of the first slave + */ + memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len); + addr.sa_family = slave_dev->type; + res = dev_set_mac_address(slave_dev, &addr); + if (res) { + dprintk("Error %d calling set_mac_address\n", res); + goto err_free; + } - /* open the slave since the application closed it */ - res = dev_open(slave_dev); - if (res) { - dprintk("Openning slave %s failed\n", slave_dev->name); - goto err_restore_mac; - } + /* open the slave since the application closed it */ + res = dev_open(slave_dev); + if (res) { + dprintk("Openning slave %s failed\n", slave_dev->name); + goto err_restore_mac; } res = netdev_set_master(slave_dev, bond_dev); if (res) { dprintk("Error %d calling netdev_set_master\n", res); - if (app_abi_ver < 1) { - goto err_free; - } else { - goto err_close; - } + goto err_close; } new_slave->dev = slave_dev; @@ -1996,39 +1966,6 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de write_unlock_bh(&bond->lock); - if (app_abi_ver < 1) { - /* - * !!! This is to support old versions of ifenslave. - * We can remove this in 2.5 because our ifenslave takes - * care of this for us. - * We check to see if the master has a mac address yet. - * If not, we'll give it the mac address of our slave device. - */ - int ndx = 0; - - for (ndx = 0; ndx < bond_dev->addr_len; ndx++) { - dprintk("Checking ndx=%d of bond_dev->dev_addr\n", - ndx); - if (bond_dev->dev_addr[ndx] != 0) { - dprintk("Found non-zero byte at ndx=%d\n", - ndx); - break; - } - } - - if (ndx == bond_dev->addr_len) { - /* - * We got all the way through the address and it was - * all 0's. - */ - dprintk("%s doesn't have a MAC address yet. \n", - bond_dev->name); - dprintk("Going to give assign it from %s.\n", - slave_dev->name); - bond_sethwaddr(bond_dev, slave_dev); - } - } - printk(KERN_INFO DRV_NAME ": %s: enslaving %s as a%s interface with a%s link.\n", bond_dev->name, slave_dev->name, @@ -2226,12 +2163,10 @@ static int bond_release(struct net_device *bond_dev, struct net_device *slave_de /* close slave before restoring its mac address */ dev_close(slave_dev); - if (app_abi_ver >= 1) { - /* restore original ("permanent") mac address */ - memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); - addr.sa_family = slave_dev->type; - dev_set_mac_address(slave_dev, &addr); - } + /* restore original ("permanent") mac address */ + memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); + addr.sa_family = slave_dev->type; + dev_set_mac_address(slave_dev, &addr); /* restore the original state of the * IFF_NOARP flag that might have been @@ -2319,12 +2254,10 @@ static int bond_release_all(struct net_device *bond_dev) /* close slave before restoring its mac address */ dev_close(slave_dev); - if (app_abi_ver >= 1) { - /* restore original ("permanent") mac address*/ - memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); - addr.sa_family = slave_dev->type; - dev_set_mac_address(slave_dev, &addr); - } + /* restore original ("permanent") mac address*/ + memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); + addr.sa_family = slave_dev->type; + dev_set_mac_address(slave_dev, &addr); /* restore the original state of the IFF_NOARP flag that might have * been set by bond_set_slave_inactive_flags() @@ -2422,57 +2355,6 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi return res; } -static int bond_ethtool_ioctl(struct net_device *bond_dev, struct ifreq *ifr) -{ - struct ethtool_drvinfo info; - void __user *addr = ifr->ifr_data; - uint32_t cmd; - - if (get_user(cmd, (uint32_t __user *)addr)) { - return -EFAULT; - } - - switch (cmd) { - case ETHTOOL_GDRVINFO: - if (copy_from_user(&info, addr, sizeof(info))) { - return -EFAULT; - } - - if (strcmp(info.driver, "ifenslave") == 0) { - int new_abi_ver; - char *endptr; - - new_abi_ver = simple_strtoul(info.fw_version, - &endptr, 0); - if (*endptr) { - printk(KERN_ERR DRV_NAME - ": Error: got invalid ABI " - "version from application\n"); - - return -EINVAL; - } - - if (orig_app_abi_ver == -1) { - orig_app_abi_ver = new_abi_ver; - } - - app_abi_ver = new_abi_ver; - } - - strncpy(info.driver, DRV_NAME, 32); - strncpy(info.version, DRV_VERSION, 32); - snprintf(info.fw_version, 32, "%d", BOND_ABI_VERSION); - - if (copy_to_user(addr, &info, sizeof(info))) { - return -EFAULT; - } - - return 0; - default: - return -EOPNOTSUPP; - } -} - static int bond_info_query(struct net_device *bond_dev, struct ifbond *info) { struct bonding *bond = bond_dev->priv; @@ -2775,7 +2657,7 @@ static u32 bond_glean_dev_ip(struct net_device *dev) return 0; rcu_read_lock(); - idev = __in_dev_get(dev); + idev = __in_dev_get_rcu(dev); if (!idev) goto out; @@ -3441,16 +3323,11 @@ static void bond_info_show_slave(struct seq_file *seq, const struct slave *slave seq_printf(seq, "Link Failure Count: %d\n", slave->link_failure_count); - if (app_abi_ver >= 1) { - seq_printf(seq, - "Permanent HW addr: %02x:%02x:%02x:%02x:%02x:%02x\n", - slave->perm_hwaddr[0], - slave->perm_hwaddr[1], - slave->perm_hwaddr[2], - slave->perm_hwaddr[3], - slave->perm_hwaddr[4], - slave->perm_hwaddr[5]); - } + seq_printf(seq, + "Permanent HW addr: %02x:%02x:%02x:%02x:%02x:%02x\n", + slave->perm_hwaddr[0], slave->perm_hwaddr[1], + slave->perm_hwaddr[2], slave->perm_hwaddr[3], + slave->perm_hwaddr[4], slave->perm_hwaddr[5]); if (bond->params.mode == BOND_MODE_8023AD) { const struct aggregator *agg @@ -4009,15 +3886,12 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd struct ifslave k_sinfo; struct ifslave __user *u_sinfo = NULL; struct mii_ioctl_data *mii = NULL; - int prev_abi_ver = orig_app_abi_ver; int res = 0; dprintk("bond_ioctl: master=%s, cmd=%d\n", bond_dev->name, cmd); switch (cmd) { - case SIOCETHTOOL: - return bond_ethtool_ioctl(bond_dev, ifr); case SIOCGMIIPHY: mii = if_mii(ifr); if (!mii) { @@ -4089,21 +3963,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd return -EPERM; } - if (orig_app_abi_ver == -1) { - /* no orig_app_abi_ver was provided yet, so we'll use the - * current one from now on, even if it's 0 - */ - orig_app_abi_ver = app_abi_ver; - - } else if (orig_app_abi_ver != app_abi_ver) { - printk(KERN_ERR DRV_NAME - ": Error: already using ifenslave ABI version %d; to " - "upgrade ifenslave to version %d, you must first " - "reload bonding.\n", - orig_app_abi_ver, app_abi_ver); - return -EINVAL; - } - slave_dev = dev_get_by_name(ifr->ifr_slave); dprintk("slave_dev=%p: \n", slave_dev); @@ -4136,14 +3995,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd dev_put(slave_dev); } - if (res < 0) { - /* The ioctl failed, so there's no point in changing the - * orig_app_abi_ver. We'll restore it's value just in case - * we've changed it earlier in this function. - */ - orig_app_abi_ver = prev_abi_ver; - } - return res; } @@ -4390,6 +4241,43 @@ out: return 0; } +static void bond_activebackup_xmit_copy(struct sk_buff *skb, + struct bonding *bond, + struct slave *slave) +{ + struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC); + struct ethhdr *eth_data; + u8 *hwaddr; + int res; + + if (!skb2) { + printk(KERN_ERR DRV_NAME ": Error: " + "bond_activebackup_xmit_copy(): skb_copy() failed\n"); + return; + } + + skb2->mac.raw = (unsigned char *)skb2->data; + eth_data = eth_hdr(skb2); + + /* Pick an appropriate source MAC address + * -- use slave's perm MAC addr, unless used by bond + * -- otherwise, borrow active slave's perm MAC addr + * since that will not be used + */ + hwaddr = slave->perm_hwaddr; + if (!memcmp(eth_data->h_source, hwaddr, ETH_ALEN)) + hwaddr = bond->curr_active_slave->perm_hwaddr; + + /* Set source MAC address appropriately */ + memcpy(eth_data->h_source, hwaddr, ETH_ALEN); + + res = bond_dev_queue_xmit(bond, skb2, slave->dev); + if (res) + dev_kfree_skb(skb2); + + return; +} + /* * in active-backup mode, we know that bond->curr_active_slave is always valid if * the bond has a usable interface. @@ -4406,10 +4294,26 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d goto out; } - if (bond->curr_active_slave) { /* one usable interface */ - res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev); + if (!bond->curr_active_slave) + goto out; + + /* Xmit IGMP frames on all slaves to ensure rapid fail-over + for multicast traffic on snooping switches */ + if (skb->protocol == __constant_htons(ETH_P_IP) && + skb->nh.iph->protocol == IPPROTO_IGMP) { + struct slave *slave, *active_slave; + int i; + + active_slave = bond->curr_active_slave; + bond_for_each_slave_from_to(bond, slave, i, active_slave->next, + active_slave->prev) + if (IS_UP(slave->dev) && + (slave->link == BOND_LINK_UP)) + bond_activebackup_xmit_copy(skb, bond, slave); } + res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev); + out: if (res) { /* no suitable interface, frame not sent */ @@ -4577,9 +4481,18 @@ static inline void bond_set_mode_ops(struct bonding *bond, int mode) } } +static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, + struct ethtool_drvinfo *drvinfo) +{ + strncpy(drvinfo->driver, DRV_NAME, 32); + strncpy(drvinfo->version, DRV_VERSION, 32); + snprintf(drvinfo->fw_version, 32, "%d", BOND_ABI_VERSION); +} + static struct ethtool_ops bond_ethtool_ops = { .get_tx_csum = ethtool_op_get_tx_csum, .get_sg = ethtool_op_get_sg, + .get_drvinfo = bond_ethtool_get_drvinfo, }; /* diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 388196980862..bbf9da8af624 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -40,8 +40,8 @@ #include "bond_3ad.h" #include "bond_alb.h" -#define DRV_VERSION "2.6.3" -#define DRV_RELDATE "June 8, 2005" +#define DRV_VERSION "2.6.4" +#define DRV_RELDATE "September 26, 2005" #define DRV_NAME "bonding" #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c new file mode 100644 index 000000000000..50f43dbf31ae --- /dev/null +++ b/drivers/net/cassini.c @@ -0,0 +1,5237 @@ +/* cassini.c: Sun Microsystems Cassini(+) ethernet driver. + * + * Copyright (C) 2004 Sun Microsystems Inc. + * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + * + * This driver uses the sungem driver (c) David Miller + * (davem@redhat.com) as its basis. + * + * The cassini chip has a number of features that distinguish it from + * the gem chip: + * 4 transmit descriptor rings that are used for either QoS (VLAN) or + * load balancing (non-VLAN mode) + * batching of multiple packets + * multiple CPU dispatching + * page-based RX descriptor engine with separate completion rings + * Gigabit support (GMII and PCS interface) + * MIF link up/down detection works + * + * RX is handled by page sized buffers that are attached as fragments to + * the skb. here's what's done: + * -- driver allocates pages at a time and keeps reference counts + * on them. + * -- the upper protocol layers assume that the header is in the skb + * itself. as a result, cassini will copy a small amount (64 bytes) + * to make them happy. + * -- driver appends the rest of the data pages as frags to skbuffs + * and increments the reference count + * -- on page reclamation, the driver swaps the page with a spare page. + * if that page is still in use, it frees its reference to that page, + * and allocates a new page for use. otherwise, it just recycles the + * the page. + * + * NOTE: cassini can parse the header. however, it's not worth it + * as long as the network stack requires a header copy. + * + * TX has 4 queues. currently these queues are used in a round-robin + * fashion for load balancing. They can also be used for QoS. for that + * to work, however, QoS information needs to be exposed down to the driver + * level so that subqueues get targetted to particular transmit rings. + * alternatively, the queues can be configured via use of the all-purpose + * ioctl. + * + * RX DATA: the rx completion ring has all the info, but the rx desc + * ring has all of the data. RX can conceivably come in under multiple + * interrupts, but the INT# assignment needs to be set up properly by + * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do + * that. also, the two descriptor rings are designed to distinguish between + * encrypted and non-encrypted packets, but we use them for buffering + * instead. + * + * by default, the selective clear mask is set up to process rx packets. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ) +#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ) +#define CAS_NCPUS num_online_cpus() + +#if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL) +#define USE_NAPI +#define cas_skb_release(x) netif_receive_skb(x) +#else +#define cas_skb_release(x) netif_rx(x) +#endif + +/* select which firmware to use */ +#define USE_HP_WORKAROUND +#define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */ +#define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */ + +#include "cassini.h" + +#define USE_TX_COMPWB /* use completion writeback registers */ +#define USE_CSMA_CD_PROTO /* standard CSMA/CD */ +#define USE_RX_BLANK /* hw interrupt mitigation */ +#undef USE_ENTROPY_DEV /* don't test for entropy device */ + +/* NOTE: these aren't useable unless PCI interrupts can be assigned. + * also, we need to make cp->lock finer-grained. + */ +#undef USE_PCI_INTB +#undef USE_PCI_INTC +#undef USE_PCI_INTD +#undef USE_QOS + +#undef USE_VPD_DEBUG /* debug vpd information if defined */ + +/* rx processing options */ +#define USE_PAGE_ORDER /* specify to allocate large rx pages */ +#define RX_DONT_BATCH 0 /* if 1, don't batch flows */ +#define RX_COPY_ALWAYS 0 /* if 0, use frags */ +#define RX_COPY_MIN 64 /* copy a little to make upper layers happy */ +#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */ + +#define DRV_MODULE_NAME "cassini" +#define PFX DRV_MODULE_NAME ": " +#define DRV_MODULE_VERSION "1.4" +#define DRV_MODULE_RELDATE "1 July 2004" + +#define CAS_DEF_MSG_ENABLE \ + (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +/* length of time before we decide the hardware is borked, + * and dev->tx_timeout() should be called to fix the problem + */ +#define CAS_TX_TIMEOUT (HZ) +#define CAS_LINK_TIMEOUT (22*HZ/10) +#define CAS_LINK_FAST_TIMEOUT (1) + +/* timeout values for state changing. these specify the number + * of 10us delays to be used before giving up. + */ +#define STOP_TRIES_PHY 1000 +#define STOP_TRIES 5000 + +/* specify a minimum frame size to deal with some fifo issues + * max mtu == 2 * page size - ethernet header - 64 - swivel = + * 2 * page_size - 0x50 + */ +#define CAS_MIN_FRAME 97 +#define CAS_1000MB_MIN_FRAME 255 +#define CAS_MIN_MTU 60 +#define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000) + +#if 1 +/* + * Eliminate these and use separate atomic counters for each, to + * avoid a race condition. + */ +#else +#define CAS_RESET_MTU 1 +#define CAS_RESET_ALL 2 +#define CAS_RESET_SPARE 3 +#endif + +static char version[] __devinitdata = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + +MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)"); +MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver"); +MODULE_LICENSE("GPL"); +MODULE_PARM(cassini_debug, "i"); +MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value"); +MODULE_PARM(link_mode, "i"); +MODULE_PARM_DESC(link_mode, "default link mode"); + +/* + * Work around for a PCS bug in which the link goes down due to the chip + * being confused and never showing a link status of "up." + */ +#define DEFAULT_LINKDOWN_TIMEOUT 5 +/* + * Value in seconds, for user input. + */ +static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT; +MODULE_PARM(linkdown_timeout, "i"); +MODULE_PARM_DESC(linkdown_timeout, +"min reset interval in sec. for PCS linkdown issue; disabled if not positive"); + +/* + * value in 'ticks' (units used by jiffies). Set when we init the + * module because 'HZ' in actually a function call on some flavors of + * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ. + */ +static int link_transition_timeout; + + +static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */ +static int link_mode; + +static u16 link_modes[] __devinitdata = { + BMCR_ANENABLE, /* 0 : autoneg */ + 0, /* 1 : 10bt half duplex */ + BMCR_SPEED100, /* 2 : 100bt half duplex */ + BMCR_FULLDPLX, /* 3 : 10bt full duplex */ + BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */ + CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */ +}; + +static struct pci_device_id cas_pci_tbl[] __devinitdata = { + { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, cas_pci_tbl); + +static void cas_set_link_modes(struct cas *cp); + +static inline void cas_lock_tx(struct cas *cp) +{ + int i; + + for (i = 0; i < N_TX_RINGS; i++) + spin_lock(&cp->tx_lock[i]); +} + +static inline void cas_lock_all(struct cas *cp) +{ + spin_lock_irq(&cp->lock); + cas_lock_tx(cp); +} + +/* WTZ: QA was finding deadlock problems with the previous + * versions after long test runs with multiple cards per machine. + * See if replacing cas_lock_all with safer versions helps. The + * symptoms QA is reporting match those we'd expect if interrupts + * aren't being properly restored, and we fixed a previous deadlock + * with similar symptoms by using save/restore versions in other + * places. + */ +#define cas_lock_all_save(cp, flags) \ +do { \ + struct cas *xxxcp = (cp); \ + spin_lock_irqsave(&xxxcp->lock, flags); \ + cas_lock_tx(xxxcp); \ +} while (0) + +static inline void cas_unlock_tx(struct cas *cp) +{ + int i; + + for (i = N_TX_RINGS; i > 0; i--) + spin_unlock(&cp->tx_lock[i - 1]); +} + +static inline void cas_unlock_all(struct cas *cp) +{ + cas_unlock_tx(cp); + spin_unlock_irq(&cp->lock); +} + +#define cas_unlock_all_restore(cp, flags) \ +do { \ + struct cas *xxxcp = (cp); \ + cas_unlock_tx(xxxcp); \ + spin_unlock_irqrestore(&xxxcp->lock, flags); \ +} while (0) + +static void cas_disable_irq(struct cas *cp, const int ring) +{ + /* Make sure we won't get any more interrupts */ + if (ring == 0) { + writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK); + return; + } + + /* disable completion interrupts and selectively mask */ + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + switch (ring) { +#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) +#ifdef USE_PCI_INTB + case 1: +#endif +#ifdef USE_PCI_INTC + case 2: +#endif +#ifdef USE_PCI_INTD + case 3: +#endif + writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN, + cp->regs + REG_PLUS_INTRN_MASK(ring)); + break; +#endif + default: + writel(INTRN_MASK_CLEAR_ALL, cp->regs + + REG_PLUS_INTRN_MASK(ring)); + break; + } + } +} + +static inline void cas_mask_intr(struct cas *cp) +{ + int i; + + for (i = 0; i < N_RX_COMP_RINGS; i++) + cas_disable_irq(cp, i); +} + +static void cas_enable_irq(struct cas *cp, const int ring) +{ + if (ring == 0) { /* all but TX_DONE */ + writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK); + return; + } + + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + switch (ring) { +#if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) +#ifdef USE_PCI_INTB + case 1: +#endif +#ifdef USE_PCI_INTC + case 2: +#endif +#ifdef USE_PCI_INTD + case 3: +#endif + writel(INTRN_MASK_RX_EN, cp->regs + + REG_PLUS_INTRN_MASK(ring)); + break; +#endif + default: + break; + } + } +} + +static inline void cas_unmask_intr(struct cas *cp) +{ + int i; + + for (i = 0; i < N_RX_COMP_RINGS; i++) + cas_enable_irq(cp, i); +} + +static inline void cas_entropy_gather(struct cas *cp) +{ +#ifdef USE_ENTROPY_DEV + if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) + return; + + batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV), + readl(cp->regs + REG_ENTROPY_IV), + sizeof(uint64_t)*8); +#endif +} + +static inline void cas_entropy_reset(struct cas *cp) +{ +#ifdef USE_ENTROPY_DEV + if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) + return; + + writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT, + cp->regs + REG_BIM_LOCAL_DEV_EN); + writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET); + writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG); + + /* if we read back 0x0, we don't have an entropy device */ + if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0) + cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV; +#endif +} + +/* access to the phy. the following assumes that we've initialized the MIF to + * be in frame rather than bit-bang mode + */ +static u16 cas_phy_read(struct cas *cp, int reg) +{ + u32 cmd; + int limit = STOP_TRIES_PHY; + + cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ; + cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); + cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); + cmd |= MIF_FRAME_TURN_AROUND_MSB; + writel(cmd, cp->regs + REG_MIF_FRAME); + + /* poll for completion */ + while (limit-- > 0) { + udelay(10); + cmd = readl(cp->regs + REG_MIF_FRAME); + if (cmd & MIF_FRAME_TURN_AROUND_LSB) + return (cmd & MIF_FRAME_DATA_MASK); + } + return 0xFFFF; /* -1 */ +} + +static int cas_phy_write(struct cas *cp, int reg, u16 val) +{ + int limit = STOP_TRIES_PHY; + u32 cmd; + + cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE; + cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); + cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); + cmd |= MIF_FRAME_TURN_AROUND_MSB; + cmd |= val & MIF_FRAME_DATA_MASK; + writel(cmd, cp->regs + REG_MIF_FRAME); + + /* poll for completion */ + while (limit-- > 0) { + udelay(10); + cmd = readl(cp->regs + REG_MIF_FRAME); + if (cmd & MIF_FRAME_TURN_AROUND_LSB) + return 0; + } + return -1; +} + +static void cas_phy_powerup(struct cas *cp) +{ + u16 ctl = cas_phy_read(cp, MII_BMCR); + + if ((ctl & BMCR_PDOWN) == 0) + return; + ctl &= ~BMCR_PDOWN; + cas_phy_write(cp, MII_BMCR, ctl); +} + +static void cas_phy_powerdown(struct cas *cp) +{ + u16 ctl = cas_phy_read(cp, MII_BMCR); + + if (ctl & BMCR_PDOWN) + return; + ctl |= BMCR_PDOWN; + cas_phy_write(cp, MII_BMCR, ctl); +} + +/* cp->lock held. note: the last put_page will free the buffer */ +static int cas_page_free(struct cas *cp, cas_page_t *page) +{ + pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, + PCI_DMA_FROMDEVICE); + __free_pages(page->buffer, cp->page_order); + kfree(page); + return 0; +} + +#ifdef RX_COUNT_BUFFERS +#define RX_USED_ADD(x, y) ((x)->used += (y)) +#define RX_USED_SET(x, y) ((x)->used = (y)) +#else +#define RX_USED_ADD(x, y) +#define RX_USED_SET(x, y) +#endif + +/* local page allocation routines for the receive buffers. jumbo pages + * require at least 8K contiguous and 8K aligned buffers. + */ +static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) +{ + cas_page_t *page; + + page = kmalloc(sizeof(cas_page_t), flags); + if (!page) + return NULL; + + INIT_LIST_HEAD(&page->list); + RX_USED_SET(page, 0); + page->buffer = alloc_pages(flags, cp->page_order); + if (!page->buffer) + goto page_err; + page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, + cp->page_size, PCI_DMA_FROMDEVICE); + return page; + +page_err: + kfree(page); + return NULL; +} + +/* initialize spare pool of rx buffers, but allocate during the open */ +static void cas_spare_init(struct cas *cp) +{ + spin_lock(&cp->rx_inuse_lock); + INIT_LIST_HEAD(&cp->rx_inuse_list); + spin_unlock(&cp->rx_inuse_lock); + + spin_lock(&cp->rx_spare_lock); + INIT_LIST_HEAD(&cp->rx_spare_list); + cp->rx_spares_needed = RX_SPARE_COUNT; + spin_unlock(&cp->rx_spare_lock); +} + +/* used on close. free all the spare buffers. */ +static void cas_spare_free(struct cas *cp) +{ + struct list_head list, *elem, *tmp; + + /* free spare buffers */ + INIT_LIST_HEAD(&list); + spin_lock(&cp->rx_spare_lock); + list_splice(&cp->rx_spare_list, &list); + INIT_LIST_HEAD(&cp->rx_spare_list); + spin_unlock(&cp->rx_spare_lock); + list_for_each_safe(elem, tmp, &list) { + cas_page_free(cp, list_entry(elem, cas_page_t, list)); + } + + INIT_LIST_HEAD(&list); +#if 1 + /* + * Looks like Adrian had protected this with a different + * lock than used everywhere else to manipulate this list. + */ + spin_lock(&cp->rx_inuse_lock); + list_splice(&cp->rx_inuse_list, &list); + INIT_LIST_HEAD(&cp->rx_inuse_list); + spin_unlock(&cp->rx_inuse_lock); +#else + spin_lock(&cp->rx_spare_lock); + list_splice(&cp->rx_inuse_list, &list); + INIT_LIST_HEAD(&cp->rx_inuse_list); + spin_unlock(&cp->rx_spare_lock); +#endif + list_for_each_safe(elem, tmp, &list) { + cas_page_free(cp, list_entry(elem, cas_page_t, list)); + } +} + +/* replenish spares if needed */ +static void cas_spare_recover(struct cas *cp, const gfp_t flags) +{ + struct list_head list, *elem, *tmp; + int needed, i; + + /* check inuse list. if we don't need any more free buffers, + * just free it + */ + + /* make a local copy of the list */ + INIT_LIST_HEAD(&list); + spin_lock(&cp->rx_inuse_lock); + list_splice(&cp->rx_inuse_list, &list); + INIT_LIST_HEAD(&cp->rx_inuse_list); + spin_unlock(&cp->rx_inuse_lock); + + list_for_each_safe(elem, tmp, &list) { + cas_page_t *page = list_entry(elem, cas_page_t, list); + + if (page_count(page->buffer) > 1) + continue; + + list_del(elem); + spin_lock(&cp->rx_spare_lock); + if (cp->rx_spares_needed > 0) { + list_add(elem, &cp->rx_spare_list); + cp->rx_spares_needed--; + spin_unlock(&cp->rx_spare_lock); + } else { + spin_unlock(&cp->rx_spare_lock); + cas_page_free(cp, page); + } + } + + /* put any inuse buffers back on the list */ + if (!list_empty(&list)) { + spin_lock(&cp->rx_inuse_lock); + list_splice(&list, &cp->rx_inuse_list); + spin_unlock(&cp->rx_inuse_lock); + } + + spin_lock(&cp->rx_spare_lock); + needed = cp->rx_spares_needed; + spin_unlock(&cp->rx_spare_lock); + if (!needed) + return; + + /* we still need spares, so try to allocate some */ + INIT_LIST_HEAD(&list); + i = 0; + while (i < needed) { + cas_page_t *spare = cas_page_alloc(cp, flags); + if (!spare) + break; + list_add(&spare->list, &list); + i++; + } + + spin_lock(&cp->rx_spare_lock); + list_splice(&list, &cp->rx_spare_list); + cp->rx_spares_needed -= i; + spin_unlock(&cp->rx_spare_lock); +} + +/* pull a page from the list. */ +static cas_page_t *cas_page_dequeue(struct cas *cp) +{ + struct list_head *entry; + int recover; + + spin_lock(&cp->rx_spare_lock); + if (list_empty(&cp->rx_spare_list)) { + /* try to do a quick recovery */ + spin_unlock(&cp->rx_spare_lock); + cas_spare_recover(cp, GFP_ATOMIC); + spin_lock(&cp->rx_spare_lock); + if (list_empty(&cp->rx_spare_list)) { + if (netif_msg_rx_err(cp)) + printk(KERN_ERR "%s: no spare buffers " + "available.\n", cp->dev->name); + spin_unlock(&cp->rx_spare_lock); + return NULL; + } + } + + entry = cp->rx_spare_list.next; + list_del(entry); + recover = ++cp->rx_spares_needed; + spin_unlock(&cp->rx_spare_lock); + + /* trigger the timer to do the recovery */ + if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) { +#if 1 + atomic_inc(&cp->reset_task_pending); + atomic_inc(&cp->reset_task_pending_spare); + schedule_work(&cp->reset_task); +#else + atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE); + schedule_work(&cp->reset_task); +#endif + } + return list_entry(entry, cas_page_t, list); +} + + +static void cas_mif_poll(struct cas *cp, const int enable) +{ + u32 cfg; + + cfg = readl(cp->regs + REG_MIF_CFG); + cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1); + + if (cp->phy_type & CAS_PHY_MII_MDIO1) + cfg |= MIF_CFG_PHY_SELECT; + + /* poll and interrupt on link status change. */ + if (enable) { + cfg |= MIF_CFG_POLL_EN; + cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR); + cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr); + } + writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF, + cp->regs + REG_MIF_MASK); + writel(cfg, cp->regs + REG_MIF_CFG); +} + +/* Must be invoked under cp->lock */ +static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep) +{ + u16 ctl; +#if 1 + int lcntl; + int changed = 0; + int oldstate = cp->lstate; + int link_was_not_down = !(oldstate == link_down); +#endif + /* Setup link parameters */ + if (!ep) + goto start_aneg; + lcntl = cp->link_cntl; + if (ep->autoneg == AUTONEG_ENABLE) + cp->link_cntl = BMCR_ANENABLE; + else { + cp->link_cntl = 0; + if (ep->speed == SPEED_100) + cp->link_cntl |= BMCR_SPEED100; + else if (ep->speed == SPEED_1000) + cp->link_cntl |= CAS_BMCR_SPEED1000; + if (ep->duplex == DUPLEX_FULL) + cp->link_cntl |= BMCR_FULLDPLX; + } +#if 1 + changed = (lcntl != cp->link_cntl); +#endif +start_aneg: + if (cp->lstate == link_up) { + printk(KERN_INFO "%s: PCS link down.\n", + cp->dev->name); + } else { + if (changed) { + printk(KERN_INFO "%s: link configuration changed\n", + cp->dev->name); + } + } + cp->lstate = link_down; + cp->link_transition = LINK_TRANSITION_LINK_DOWN; + if (!cp->hw_running) + return; +#if 1 + /* + * WTZ: If the old state was link_up, we turn off the carrier + * to replicate everything we do elsewhere on a link-down + * event when we were already in a link-up state.. + */ + if (oldstate == link_up) + netif_carrier_off(cp->dev); + if (changed && link_was_not_down) { + /* + * WTZ: This branch will simply schedule a full reset after + * we explicitly changed link modes in an ioctl. See if this + * fixes the link-problems we were having for forced mode. + */ + atomic_inc(&cp->reset_task_pending); + atomic_inc(&cp->reset_task_pending_all); + schedule_work(&cp->reset_task); + cp->timer_ticks = 0; + mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); + return; + } +#endif + if (cp->phy_type & CAS_PHY_SERDES) { + u32 val = readl(cp->regs + REG_PCS_MII_CTRL); + + if (cp->link_cntl & BMCR_ANENABLE) { + val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN); + cp->lstate = link_aneg; + } else { + if (cp->link_cntl & BMCR_FULLDPLX) + val |= PCS_MII_CTRL_DUPLEX; + val &= ~PCS_MII_AUTONEG_EN; + cp->lstate = link_force_ok; + } + cp->link_transition = LINK_TRANSITION_LINK_CONFIG; + writel(val, cp->regs + REG_PCS_MII_CTRL); + + } else { + cas_mif_poll(cp, 0); + ctl = cas_phy_read(cp, MII_BMCR); + ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | + CAS_BMCR_SPEED1000 | BMCR_ANENABLE); + ctl |= cp->link_cntl; + if (ctl & BMCR_ANENABLE) { + ctl |= BMCR_ANRESTART; + cp->lstate = link_aneg; + } else { + cp->lstate = link_force_ok; + } + cp->link_transition = LINK_TRANSITION_LINK_CONFIG; + cas_phy_write(cp, MII_BMCR, ctl); + cas_mif_poll(cp, 1); + } + + cp->timer_ticks = 0; + mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); +} + +/* Must be invoked under cp->lock. */ +static int cas_reset_mii_phy(struct cas *cp) +{ + int limit = STOP_TRIES_PHY; + u16 val; + + cas_phy_write(cp, MII_BMCR, BMCR_RESET); + udelay(100); + while (limit--) { + val = cas_phy_read(cp, MII_BMCR); + if ((val & BMCR_RESET) == 0) + break; + udelay(10); + } + return (limit <= 0); +} + +static void cas_saturn_firmware_load(struct cas *cp) +{ + cas_saturn_patch_t *patch = cas_saturn_patch; + + cas_phy_powerdown(cp); + + /* expanded memory access mode */ + cas_phy_write(cp, DP83065_MII_MEM, 0x0); + + /* pointer configuration for new firmware */ + cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9); + cas_phy_write(cp, DP83065_MII_REGD, 0xbd); + cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa); + cas_phy_write(cp, DP83065_MII_REGD, 0x82); + cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb); + cas_phy_write(cp, DP83065_MII_REGD, 0x0); + cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc); + cas_phy_write(cp, DP83065_MII_REGD, 0x39); + + /* download new firmware */ + cas_phy_write(cp, DP83065_MII_MEM, 0x1); + cas_phy_write(cp, DP83065_MII_REGE, patch->addr); + while (patch->addr) { + cas_phy_write(cp, DP83065_MII_REGD, patch->val); + patch++; + } + + /* enable firmware */ + cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8); + cas_phy_write(cp, DP83065_MII_REGD, 0x1); +} + + +/* phy initialization */ +static void cas_phy_init(struct cas *cp) +{ + u16 val; + + /* if we're in MII/GMII mode, set up phy */ + if (CAS_PHY_MII(cp->phy_type)) { + writel(PCS_DATAPATH_MODE_MII, + cp->regs + REG_PCS_DATAPATH_MODE); + + cas_mif_poll(cp, 0); + cas_reset_mii_phy(cp); /* take out of isolate mode */ + + if (PHY_LUCENT_B0 == cp->phy_id) { + /* workaround link up/down issue with lucent */ + cas_phy_write(cp, LUCENT_MII_REG, 0x8000); + cas_phy_write(cp, MII_BMCR, 0x00f1); + cas_phy_write(cp, LUCENT_MII_REG, 0x0); + + } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) { + /* workarounds for broadcom phy */ + cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20); + cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012); + cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804); + cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013); + cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204); + cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); + cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132); + cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); + cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232); + cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F); + cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20); + + } else if (PHY_BROADCOM_5411 == cp->phy_id) { + val = cas_phy_read(cp, BROADCOM_MII_REG4); + val = cas_phy_read(cp, BROADCOM_MII_REG4); + if (val & 0x0080) { + /* link workaround */ + cas_phy_write(cp, BROADCOM_MII_REG4, + val & ~0x0080); + } + + } else if (cp->cas_flags & CAS_FLAG_SATURN) { + writel((cp->phy_type & CAS_PHY_MII_MDIO0) ? + SATURN_PCFG_FSI : 0x0, + cp->regs + REG_SATURN_PCFG); + + /* load firmware to address 10Mbps auto-negotiation + * issue. NOTE: this will need to be changed if the + * default firmware gets fixed. + */ + if (PHY_NS_DP83065 == cp->phy_id) { + cas_saturn_firmware_load(cp); + } + cas_phy_powerup(cp); + } + + /* advertise capabilities */ + val = cas_phy_read(cp, MII_BMCR); + val &= ~BMCR_ANENABLE; + cas_phy_write(cp, MII_BMCR, val); + udelay(10); + + cas_phy_write(cp, MII_ADVERTISE, + cas_phy_read(cp, MII_ADVERTISE) | + (ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL | + CAS_ADVERTISE_PAUSE | + CAS_ADVERTISE_ASYM_PAUSE)); + + if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { + /* make sure that we don't advertise half + * duplex to avoid a chip issue + */ + val = cas_phy_read(cp, CAS_MII_1000_CTRL); + val &= ~CAS_ADVERTISE_1000HALF; + val |= CAS_ADVERTISE_1000FULL; + cas_phy_write(cp, CAS_MII_1000_CTRL, val); + } + + } else { + /* reset pcs for serdes */ + u32 val; + int limit; + + writel(PCS_DATAPATH_MODE_SERDES, + cp->regs + REG_PCS_DATAPATH_MODE); + + /* enable serdes pins on saturn */ + if (cp->cas_flags & CAS_FLAG_SATURN) + writel(0, cp->regs + REG_SATURN_PCFG); + + /* Reset PCS unit. */ + val = readl(cp->regs + REG_PCS_MII_CTRL); + val |= PCS_MII_RESET; + writel(val, cp->regs + REG_PCS_MII_CTRL); + + limit = STOP_TRIES; + while (limit-- > 0) { + udelay(10); + if ((readl(cp->regs + REG_PCS_MII_CTRL) & + PCS_MII_RESET) == 0) + break; + } + if (limit <= 0) + printk(KERN_WARNING "%s: PCS reset bit would not " + "clear [%08x].\n", cp->dev->name, + readl(cp->regs + REG_PCS_STATE_MACHINE)); + + /* Make sure PCS is disabled while changing advertisement + * configuration. + */ + writel(0x0, cp->regs + REG_PCS_CFG); + + /* Advertise all capabilities except half-duplex. */ + val = readl(cp->regs + REG_PCS_MII_ADVERT); + val &= ~PCS_MII_ADVERT_HD; + val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE | + PCS_MII_ADVERT_ASYM_PAUSE); + writel(val, cp->regs + REG_PCS_MII_ADVERT); + + /* enable PCS */ + writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG); + + /* pcs workaround: enable sync detect */ + writel(PCS_SERDES_CTRL_SYNCD_EN, + cp->regs + REG_PCS_SERDES_CTRL); + } +} + + +static int cas_pcs_link_check(struct cas *cp) +{ + u32 stat, state_machine; + int retval = 0; + + /* The link status bit latches on zero, so you must + * read it twice in such a case to see a transition + * to the link being up. + */ + stat = readl(cp->regs + REG_PCS_MII_STATUS); + if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0) + stat = readl(cp->regs + REG_PCS_MII_STATUS); + + /* The remote-fault indication is only valid + * when autoneg has completed. + */ + if ((stat & (PCS_MII_STATUS_AUTONEG_COMP | + PCS_MII_STATUS_REMOTE_FAULT)) == + (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) { + if (netif_msg_link(cp)) + printk(KERN_INFO "%s: PCS RemoteFault\n", + cp->dev->name); + } + + /* work around link detection issue by querying the PCS state + * machine directly. + */ + state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE); + if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) { + stat &= ~PCS_MII_STATUS_LINK_STATUS; + } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) { + stat |= PCS_MII_STATUS_LINK_STATUS; + } + + if (stat & PCS_MII_STATUS_LINK_STATUS) { + if (cp->lstate != link_up) { + if (cp->opened) { + cp->lstate = link_up; + cp->link_transition = LINK_TRANSITION_LINK_UP; + + cas_set_link_modes(cp); + netif_carrier_on(cp->dev); + } + } + } else if (cp->lstate == link_up) { + cp->lstate = link_down; + if (link_transition_timeout != 0 && + cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && + !cp->link_transition_jiffies_valid) { + /* + * force a reset, as a workaround for the + * link-failure problem. May want to move this to a + * point a bit earlier in the sequence. If we had + * generated a reset a short time ago, we'll wait for + * the link timer to check the status until a + * timer expires (link_transistion_jiffies_valid is + * true when the timer is running.) Instead of using + * a system timer, we just do a check whenever the + * link timer is running - this clears the flag after + * a suitable delay. + */ + retval = 1; + cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; + cp->link_transition_jiffies = jiffies; + cp->link_transition_jiffies_valid = 1; + } else { + cp->link_transition = LINK_TRANSITION_ON_FAILURE; + } + netif_carrier_off(cp->dev); + if (cp->opened && netif_msg_link(cp)) { + printk(KERN_INFO "%s: PCS link down.\n", + cp->dev->name); + } + + /* Cassini only: if you force a mode, there can be + * sync problems on link down. to fix that, the following + * things need to be checked: + * 1) read serialink state register + * 2) read pcs status register to verify link down. + * 3) if link down and serial link == 0x03, then you need + * to global reset the chip. + */ + if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) { + /* should check to see if we're in a forced mode */ + stat = readl(cp->regs + REG_PCS_SERDES_STATE); + if (stat == 0x03) + return 1; + } + } else if (cp->lstate == link_down) { + if (link_transition_timeout != 0 && + cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && + !cp->link_transition_jiffies_valid) { + /* force a reset, as a workaround for the + * link-failure problem. May want to move + * this to a point a bit earlier in the + * sequence. + */ + retval = 1; + cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; + cp->link_transition_jiffies = jiffies; + cp->link_transition_jiffies_valid = 1; + } else { + cp->link_transition = LINK_TRANSITION_STILL_FAILED; + } + } + + return retval; +} + +static int cas_pcs_interrupt(struct net_device *dev, + struct cas *cp, u32 status) +{ + u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS); + + if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0) + return 0; + return cas_pcs_link_check(cp); +} + +static int cas_txmac_interrupt(struct net_device *dev, + struct cas *cp, u32 status) +{ + u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS); + + if (!txmac_stat) + return 0; + + if (netif_msg_intr(cp)) + printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", + cp->dev->name, txmac_stat); + + /* Defer timer expiration is quite normal, + * don't even log the event. + */ + if ((txmac_stat & MAC_TX_DEFER_TIMER) && + !(txmac_stat & ~MAC_TX_DEFER_TIMER)) + return 0; + + spin_lock(&cp->stat_lock[0]); + if (txmac_stat & MAC_TX_UNDERRUN) { + printk(KERN_ERR "%s: TX MAC xmit underrun.\n", + dev->name); + cp->net_stats[0].tx_fifo_errors++; + } + + if (txmac_stat & MAC_TX_MAX_PACKET_ERR) { + printk(KERN_ERR "%s: TX MAC max packet size error.\n", + dev->name); + cp->net_stats[0].tx_errors++; + } + + /* The rest are all cases of one of the 16-bit TX + * counters expiring. + */ + if (txmac_stat & MAC_TX_COLL_NORMAL) + cp->net_stats[0].collisions += 0x10000; + + if (txmac_stat & MAC_TX_COLL_EXCESS) { + cp->net_stats[0].tx_aborted_errors += 0x10000; + cp->net_stats[0].collisions += 0x10000; + } + + if (txmac_stat & MAC_TX_COLL_LATE) { + cp->net_stats[0].tx_aborted_errors += 0x10000; + cp->net_stats[0].collisions += 0x10000; + } + spin_unlock(&cp->stat_lock[0]); + + /* We do not keep track of MAC_TX_COLL_FIRST and + * MAC_TX_PEAK_ATTEMPTS events. + */ + return 0; +} + +static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware) +{ + cas_hp_inst_t *inst; + u32 val; + int i; + + i = 0; + while ((inst = firmware) && inst->note) { + writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR); + + val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val); + val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask); + writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI); + + val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10); + val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop); + val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext); + val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff); + val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext); + val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff); + val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op); + writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID); + + val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask); + val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift); + val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab); + val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg); + writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW); + ++firmware; + ++i; + } +} + +static void cas_init_rx_dma(struct cas *cp) +{ + u64 desc_dma = cp->block_dvma; + u32 val; + int i, size; + + /* rx free descriptors */ + val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL); + val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0)); + val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0)); + if ((N_RX_DESC_RINGS > 1) && + (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */ + val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1)); + writel(val, cp->regs + REG_RX_CFG); + + val = (unsigned long) cp->init_rxds[0] - + (unsigned long) cp->init_block; + writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI); + writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW); + writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); + + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + /* rx desc 2 is for IPSEC packets. however, + * we don't it that for that purpose. + */ + val = (unsigned long) cp->init_rxds[1] - + (unsigned long) cp->init_block; + writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI); + writel((desc_dma + val) & 0xffffffff, cp->regs + + REG_PLUS_RX_DB1_LOW); + writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + + REG_PLUS_RX_KICK1); + } + + /* rx completion registers */ + val = (unsigned long) cp->init_rxcs[0] - + (unsigned long) cp->init_block; + writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI); + writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW); + + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + /* rx comp 2-4 */ + for (i = 1; i < MAX_RX_COMP_RINGS; i++) { + val = (unsigned long) cp->init_rxcs[i] - + (unsigned long) cp->init_block; + writel((desc_dma + val) >> 32, cp->regs + + REG_PLUS_RX_CBN_HI(i)); + writel((desc_dma + val) & 0xffffffff, cp->regs + + REG_PLUS_RX_CBN_LOW(i)); + } + } + + /* read selective clear regs to prevent spurious interrupts + * on reset because complete == kick. + * selective clear set up to prevent interrupts on resets + */ + readl(cp->regs + REG_INTR_STATUS_ALIAS); + writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR); + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + for (i = 1; i < N_RX_COMP_RINGS; i++) + readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i)); + + /* 2 is different from 3 and 4 */ + if (N_RX_COMP_RINGS > 1) + writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1, + cp->regs + REG_PLUS_ALIASN_CLEAR(1)); + + for (i = 2; i < N_RX_COMP_RINGS; i++) + writel(INTR_RX_DONE_ALT, + cp->regs + REG_PLUS_ALIASN_CLEAR(i)); + } + + /* set up pause thresholds */ + val = CAS_BASE(RX_PAUSE_THRESH_OFF, + cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM); + val |= CAS_BASE(RX_PAUSE_THRESH_ON, + cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM); + writel(val, cp->regs + REG_RX_PAUSE_THRESH); + + /* zero out dma reassembly buffers */ + for (i = 0; i < 64; i++) { + writel(i, cp->regs + REG_RX_TABLE_ADDR); + writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW); + writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID); + writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI); + } + + /* make sure address register is 0 for normal operation */ + writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR); + writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR); + + /* interrupt mitigation */ +#ifdef USE_RX_BLANK + val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL); + val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL); + writel(val, cp->regs + REG_RX_BLANK); +#else + writel(0x0, cp->regs + REG_RX_BLANK); +#endif + + /* interrupt generation as a function of low water marks for + * free desc and completion entries. these are used to trigger + * housekeeping for rx descs. we don't use the free interrupt + * as it's not very useful + */ + /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */ + val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL); + writel(val, cp->regs + REG_RX_AE_THRESH); + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1)); + writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH); + } + + /* Random early detect registers. useful for congestion avoidance. + * this should be tunable. + */ + writel(0x0, cp->regs + REG_RX_RED); + + /* receive page sizes. default == 2K (0x800) */ + val = 0; + if (cp->page_size == 0x1000) + val = 0x1; + else if (cp->page_size == 0x2000) + val = 0x2; + else if (cp->page_size == 0x4000) + val = 0x3; + + /* round mtu + offset. constrain to page size. */ + size = cp->dev->mtu + 64; + if (size > cp->page_size) + size = cp->page_size; + + if (size <= 0x400) + i = 0x0; + else if (size <= 0x800) + i = 0x1; + else if (size <= 0x1000) + i = 0x2; + else + i = 0x3; + + cp->mtu_stride = 1 << (i + 10); + val = CAS_BASE(RX_PAGE_SIZE, val); + val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i); + val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10)); + val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1); + writel(val, cp->regs + REG_RX_PAGE_SIZE); + + /* enable the header parser if desired */ + if (CAS_HP_FIRMWARE == cas_prog_null) + return; + + val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS); + val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK; + val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL); + writel(val, cp->regs + REG_HP_CFG); +} + +static inline void cas_rxc_init(struct cas_rx_comp *rxc) +{ + memset(rxc, 0, sizeof(*rxc)); + rxc->word4 = cpu_to_le64(RX_COMP4_ZERO); +} + +/* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1] + * flipping is protected by the fact that the chip will not + * hand back the same page index while it's being processed. + */ +static inline cas_page_t *cas_page_spare(struct cas *cp, const int index) +{ + cas_page_t *page = cp->rx_pages[1][index]; + cas_page_t *new; + + if (page_count(page->buffer) == 1) + return page; + + new = cas_page_dequeue(cp); + if (new) { + spin_lock(&cp->rx_inuse_lock); + list_add(&page->list, &cp->rx_inuse_list); + spin_unlock(&cp->rx_inuse_lock); + } + return new; +} + +/* this needs to be changed if we actually use the ENC RX DESC ring */ +static cas_page_t *cas_page_swap(struct cas *cp, const int ring, + const int index) +{ + cas_page_t **page0 = cp->rx_pages[0]; + cas_page_t **page1 = cp->rx_pages[1]; + + /* swap if buffer is in use */ + if (page_count(page0[index]->buffer) > 1) { + cas_page_t *new = cas_page_spare(cp, index); + if (new) { + page1[index] = page0[index]; + page0[index] = new; + } + } + RX_USED_SET(page0[index], 0); + return page0[index]; +} + +static void cas_clean_rxds(struct cas *cp) +{ + /* only clean ring 0 as ring 1 is used for spare buffers */ + struct cas_rx_desc *rxd = cp->init_rxds[0]; + int i, size; + + /* release all rx flows */ + for (i = 0; i < N_RX_FLOWS; i++) { + struct sk_buff *skb; + while ((skb = __skb_dequeue(&cp->rx_flows[i]))) { + cas_skb_release(skb); + } + } + + /* initialize descriptors */ + size = RX_DESC_RINGN_SIZE(0); + for (i = 0; i < size; i++) { + cas_page_t *page = cas_page_swap(cp, 0, i); + rxd[i].buffer = cpu_to_le64(page->dma_addr); + rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) | + CAS_BASE(RX_INDEX_RING, 0)); + } + + cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4; + cp->rx_last[0] = 0; + cp->cas_flags &= ~CAS_FLAG_RXD_POST(0); +} + +static void cas_clean_rxcs(struct cas *cp) +{ + int i, j; + + /* take ownership of rx comp descriptors */ + memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS); + memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS); + for (i = 0; i < N_RX_COMP_RINGS; i++) { + struct cas_rx_comp *rxc = cp->init_rxcs[i]; + for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) { + cas_rxc_init(rxc + j); + } + } +} + +#if 0 +/* When we get a RX fifo overflow, the RX unit is probably hung + * so we do the following. + * + * If any part of the reset goes wrong, we return 1 and that causes the + * whole chip to be reset. + */ +static int cas_rxmac_reset(struct cas *cp) +{ + struct net_device *dev = cp->dev; + int limit; + u32 val; + + /* First, reset MAC RX. */ + writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); + for (limit = 0; limit < STOP_TRIES; limit++) { + if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN)) + break; + udelay(10); + } + if (limit == STOP_TRIES) { + printk(KERN_ERR "%s: RX MAC will not disable, resetting whole " + "chip.\n", dev->name); + return 1; + } + + /* Second, disable RX DMA. */ + writel(0, cp->regs + REG_RX_CFG); + for (limit = 0; limit < STOP_TRIES; limit++) { + if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN)) + break; + udelay(10); + } + if (limit == STOP_TRIES) { + printk(KERN_ERR "%s: RX DMA will not disable, resetting whole " + "chip.\n", dev->name); + return 1; + } + + mdelay(5); + + /* Execute RX reset command. */ + writel(SW_RESET_RX, cp->regs + REG_SW_RESET); + for (limit = 0; limit < STOP_TRIES; limit++) { + if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX)) + break; + udelay(10); + } + if (limit == STOP_TRIES) { + printk(KERN_ERR "%s: RX reset command will not execute, " + "resetting whole chip.\n", dev->name); + return 1; + } + + /* reset driver rx state */ + cas_clean_rxds(cp); + cas_clean_rxcs(cp); + + /* Now, reprogram the rest of RX unit. */ + cas_init_rx_dma(cp); + + /* re-enable */ + val = readl(cp->regs + REG_RX_CFG); + writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG); + writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); + val = readl(cp->regs + REG_MAC_RX_CFG); + writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); + return 0; +} +#endif + +static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp, + u32 status) +{ + u32 stat = readl(cp->regs + REG_MAC_RX_STATUS); + + if (!stat) + return 0; + + if (netif_msg_intr(cp)) + printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n", + cp->dev->name, stat); + + /* these are all rollovers */ + spin_lock(&cp->stat_lock[0]); + if (stat & MAC_RX_ALIGN_ERR) + cp->net_stats[0].rx_frame_errors += 0x10000; + + if (stat & MAC_RX_CRC_ERR) + cp->net_stats[0].rx_crc_errors += 0x10000; + + if (stat & MAC_RX_LEN_ERR) + cp->net_stats[0].rx_length_errors += 0x10000; + + if (stat & MAC_RX_OVERFLOW) { + cp->net_stats[0].rx_over_errors++; + cp->net_stats[0].rx_fifo_errors++; + } + + /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR + * events. + */ + spin_unlock(&cp->stat_lock[0]); + return 0; +} + +static int cas_mac_interrupt(struct net_device *dev, struct cas *cp, + u32 status) +{ + u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS); + + if (!stat) + return 0; + + if (netif_msg_intr(cp)) + printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n", + cp->dev->name, stat); + + /* This interrupt is just for pause frame and pause + * tracking. It is useful for diagnostics and debug + * but probably by default we will mask these events. + */ + if (stat & MAC_CTRL_PAUSE_STATE) + cp->pause_entered++; + + if (stat & MAC_CTRL_PAUSE_RECEIVED) + cp->pause_last_time_recvd = (stat >> 16); + + return 0; +} + + +/* Must be invoked under cp->lock. */ +static inline int cas_mdio_link_not_up(struct cas *cp) +{ + u16 val; + + switch (cp->lstate) { + case link_force_ret: + if (netif_msg_link(cp)) + printk(KERN_INFO "%s: Autoneg failed again, keeping" + " forced mode\n", cp->dev->name); + cas_phy_write(cp, MII_BMCR, cp->link_fcntl); + cp->timer_ticks = 5; + cp->lstate = link_force_ok; + cp->link_transition = LINK_TRANSITION_LINK_CONFIG; + break; + + case link_aneg: + val = cas_phy_read(cp, MII_BMCR); + + /* Try forced modes. we try things in the following order: + * 1000 full -> 100 full/half -> 10 half + */ + val &= ~(BMCR_ANRESTART | BMCR_ANENABLE); + val |= BMCR_FULLDPLX; + val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? + CAS_BMCR_SPEED1000 : BMCR_SPEED100; + cas_phy_write(cp, MII_BMCR, val); + cp->timer_ticks = 5; + cp->lstate = link_force_try; + cp->link_transition = LINK_TRANSITION_LINK_CONFIG; + break; + + case link_force_try: + /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */ + val = cas_phy_read(cp, MII_BMCR); + cp->timer_ticks = 5; + if (val & CAS_BMCR_SPEED1000) { /* gigabit */ + val &= ~CAS_BMCR_SPEED1000; + val |= (BMCR_SPEED100 | BMCR_FULLDPLX); + cas_phy_write(cp, MII_BMCR, val); + break; + } + + if (val & BMCR_SPEED100) { + if (val & BMCR_FULLDPLX) /* fd failed */ + val &= ~BMCR_FULLDPLX; + else { /* 100Mbps failed */ + val &= ~BMCR_SPEED100; + } + cas_phy_write(cp, MII_BMCR, val); + break; + } + default: + break; + } + return 0; +} + + +/* must be invoked with cp->lock held */ +static int cas_mii_link_check(struct cas *cp, const u16 bmsr) +{ + int restart; + + if (bmsr & BMSR_LSTATUS) { + /* Ok, here we got a link. If we had it due to a forced + * fallback, and we were configured for autoneg, we + * retry a short autoneg pass. If you know your hub is + * broken, use ethtool ;) + */ + if ((cp->lstate == link_force_try) && + (cp->link_cntl & BMCR_ANENABLE)) { + cp->lstate = link_force_ret; + cp->link_transition = LINK_TRANSITION_LINK_CONFIG; + cas_mif_poll(cp, 0); + cp->link_fcntl = cas_phy_read(cp, MII_BMCR); + cp->timer_ticks = 5; + if (cp->opened && netif_msg_link(cp)) + printk(KERN_INFO "%s: Got link after fallback, retrying" + " autoneg once...\n", cp->dev->name); + cas_phy_write(cp, MII_BMCR, + cp->link_fcntl | BMCR_ANENABLE | + BMCR_ANRESTART); + cas_mif_poll(cp, 1); + + } else if (cp->lstate != link_up) { + cp->lstate = link_up; + cp->link_transition = LINK_TRANSITION_LINK_UP; + + if (cp->opened) { + cas_set_link_modes(cp); + netif_carrier_on(cp->dev); + } + } + return 0; + } + + /* link not up. if the link was previously up, we restart the + * whole process + */ + restart = 0; + if (cp->lstate == link_up) { + cp->lstate = link_down; + cp->link_transition = LINK_TRANSITION_LINK_DOWN; + + netif_carrier_off(cp->dev); + if (cp->opened && netif_msg_link(cp)) + printk(KERN_INFO "%s: Link down\n", + cp->dev->name); + restart = 1; + + } else if (++cp->timer_ticks > 10) + cas_mdio_link_not_up(cp); + + return restart; +} + +static int cas_mif_interrupt(struct net_device *dev, struct cas *cp, + u32 status) +{ + u32 stat = readl(cp->regs + REG_MIF_STATUS); + u16 bmsr; + + /* check for a link change */ + if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0) + return 0; + + bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat); + return cas_mii_link_check(cp, bmsr); +} + +static int cas_pci_interrupt(struct net_device *dev, struct cas *cp, + u32 status) +{ + u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS); + + if (!stat) + return 0; + + printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat, + readl(cp->regs + REG_BIM_DIAG)); + + /* cassini+ has this reserved */ + if ((stat & PCI_ERR_BADACK) && + ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0)) + printk(" "); + + if (stat & PCI_ERR_DTRTO) + printk(" "); + if (stat & PCI_ERR_OTHER) + printk(" "); + if (stat & PCI_ERR_BIM_DMA_WRITE) + printk(" "); + if (stat & PCI_ERR_BIM_DMA_READ) + printk(" "); + printk("\n"); + + if (stat & PCI_ERR_OTHER) { + u16 cfg; + + /* Interrogate PCI config space for the + * true cause. + */ + pci_read_config_word(cp->pdev, PCI_STATUS, &cfg); + printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n", + dev->name, cfg); + if (cfg & PCI_STATUS_PARITY) + printk(KERN_ERR "%s: PCI parity error detected.\n", + dev->name); + if (cfg & PCI_STATUS_SIG_TARGET_ABORT) + printk(KERN_ERR "%s: PCI target abort.\n", + dev->name); + if (cfg & PCI_STATUS_REC_TARGET_ABORT) + printk(KERN_ERR "%s: PCI master acks target abort.\n", + dev->name); + if (cfg & PCI_STATUS_REC_MASTER_ABORT) + printk(KERN_ERR "%s: PCI master abort.\n", dev->name); + if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR) + printk(KERN_ERR "%s: PCI system error SERR#.\n", + dev->name); + if (cfg & PCI_STATUS_DETECTED_PARITY) + printk(KERN_ERR "%s: PCI parity error.\n", + dev->name); + + /* Write the error bits back to clear them. */ + cfg &= (PCI_STATUS_PARITY | + PCI_STATUS_SIG_TARGET_ABORT | + PCI_STATUS_REC_TARGET_ABORT | + PCI_STATUS_REC_MASTER_ABORT | + PCI_STATUS_SIG_SYSTEM_ERROR | + PCI_STATUS_DETECTED_PARITY); + pci_write_config_word(cp->pdev, PCI_STATUS, cfg); + } + + /* For all PCI errors, we should reset the chip. */ + return 1; +} + +/* All non-normal interrupt conditions get serviced here. + * Returns non-zero if we should just exit the interrupt + * handler right now (ie. if we reset the card which invalidates + * all of the other original irq status bits). + */ +static int cas_abnormal_irq(struct net_device *dev, struct cas *cp, + u32 status) +{ + if (status & INTR_RX_TAG_ERROR) { + /* corrupt RX tag framing */ + if (netif_msg_rx_err(cp)) + printk(KERN_DEBUG "%s: corrupt rx tag framing\n", + cp->dev->name); + spin_lock(&cp->stat_lock[0]); + cp->net_stats[0].rx_errors++; + spin_unlock(&cp->stat_lock[0]); + goto do_reset; + } + + if (status & INTR_RX_LEN_MISMATCH) { + /* length mismatch. */ + if (netif_msg_rx_err(cp)) + printk(KERN_DEBUG "%s: length mismatch for rx frame\n", + cp->dev->name); + spin_lock(&cp->stat_lock[0]); + cp->net_stats[0].rx_errors++; + spin_unlock(&cp->stat_lock[0]); + goto do_reset; + } + + if (status & INTR_PCS_STATUS) { + if (cas_pcs_interrupt(dev, cp, status)) + goto do_reset; + } + + if (status & INTR_TX_MAC_STATUS) { + if (cas_txmac_interrupt(dev, cp, status)) + goto do_reset; + } + + if (status & INTR_RX_MAC_STATUS) { + if (cas_rxmac_interrupt(dev, cp, status)) + goto do_reset; + } + + if (status & INTR_MAC_CTRL_STATUS) { + if (cas_mac_interrupt(dev, cp, status)) + goto do_reset; + } + + if (status & INTR_MIF_STATUS) { + if (cas_mif_interrupt(dev, cp, status)) + goto do_reset; + } + + if (status & INTR_PCI_ERROR_STATUS) { + if (cas_pci_interrupt(dev, cp, status)) + goto do_reset; + } + return 0; + +do_reset: +#if 1 + atomic_inc(&cp->reset_task_pending); + atomic_inc(&cp->reset_task_pending_all); + printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n", + dev->name, status); + schedule_work(&cp->reset_task); +#else + atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); + printk(KERN_ERR "reset called in cas_abnormal_irq\n"); + schedule_work(&cp->reset_task); +#endif + return 1; +} + +/* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when + * determining whether to do a netif_stop/wakeup + */ +#define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1) +#define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK) +static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr, + const int len) +{ + unsigned long off = addr + len; + + if (CAS_TABORT(cp) == 1) + return 0; + if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN) + return 0; + return TX_TARGET_ABORT_LEN; +} + +static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) +{ + struct cas_tx_desc *txds; + struct sk_buff **skbs; + struct net_device *dev = cp->dev; + int entry, count; + + spin_lock(&cp->tx_lock[ring]); + txds = cp->init_txds[ring]; + skbs = cp->tx_skbs[ring]; + entry = cp->tx_old[ring]; + + count = TX_BUFF_COUNT(ring, entry, limit); + while (entry != limit) { + struct sk_buff *skb = skbs[entry]; + dma_addr_t daddr; + u32 dlen; + int frag; + + if (!skb) { + /* this should never occur */ + entry = TX_DESC_NEXT(ring, entry); + continue; + } + + /* however, we might get only a partial skb release. */ + count -= skb_shinfo(skb)->nr_frags + + + cp->tx_tiny_use[ring][entry].nbufs + 1; + if (count < 0) + break; + + if (netif_msg_tx_done(cp)) + printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n", + cp->dev->name, ring, entry); + + skbs[entry] = NULL; + cp->tx_tiny_use[ring][entry].nbufs = 0; + + for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { + struct cas_tx_desc *txd = txds + entry; + + daddr = le64_to_cpu(txd->buffer); + dlen = CAS_VAL(TX_DESC_BUFLEN, + le64_to_cpu(txd->control)); + pci_unmap_page(cp->pdev, daddr, dlen, + PCI_DMA_TODEVICE); + entry = TX_DESC_NEXT(ring, entry); + + /* tiny buffer may follow */ + if (cp->tx_tiny_use[ring][entry].used) { + cp->tx_tiny_use[ring][entry].used = 0; + entry = TX_DESC_NEXT(ring, entry); + } + } + + spin_lock(&cp->stat_lock[ring]); + cp->net_stats[ring].tx_packets++; + cp->net_stats[ring].tx_bytes += skb->len; + spin_unlock(&cp->stat_lock[ring]); + dev_kfree_skb_irq(skb); + } + cp->tx_old[ring] = entry; + + /* this is wrong for multiple tx rings. the net device needs + * multiple queues for this to do the right thing. we wait + * for 2*packets to be available when using tiny buffers + */ + if (netif_queue_stopped(dev) && + (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))) + netif_wake_queue(dev); + spin_unlock(&cp->tx_lock[ring]); +} + +static void cas_tx(struct net_device *dev, struct cas *cp, + u32 status) +{ + int limit, ring; +#ifdef USE_TX_COMPWB + u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); +#endif + if (netif_msg_intr(cp)) + printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %lx\n", + cp->dev->name, status, compwb); + /* process all the rings */ + for (ring = 0; ring < N_TX_RINGS; ring++) { +#ifdef USE_TX_COMPWB + /* use the completion writeback registers */ + limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) | + CAS_VAL(TX_COMPWB_LSB, compwb); + compwb = TX_COMPWB_NEXT(compwb); +#else + limit = readl(cp->regs + REG_TX_COMPN(ring)); +#endif + if (cp->tx_old[ring] != limit) + cas_tx_ringN(cp, ring, limit); + } +} + + +static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, + int entry, const u64 *words, + struct sk_buff **skbref) +{ + int dlen, hlen, len, i, alloclen; + int off, swivel = RX_SWIVEL_OFF_VAL; + struct cas_page *page; + struct sk_buff *skb; + void *addr, *crcaddr; + char *p; + + hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]); + dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]); + len = hlen + dlen; + + if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT)) + alloclen = len; + else + alloclen = max(hlen, RX_COPY_MIN); + + skb = dev_alloc_skb(alloclen + swivel + cp->crc_size); + if (skb == NULL) + return -1; + + *skbref = skb; + skb->dev = cp->dev; + skb_reserve(skb, swivel); + + p = skb->data; + addr = crcaddr = NULL; + if (hlen) { /* always copy header pages */ + i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); + page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; + off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 + + swivel; + + i = hlen; + if (!dlen) /* attach FCS */ + i += cp->crc_size; + pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, + PCI_DMA_FROMDEVICE); + addr = cas_page_map(page->buffer); + memcpy(p, addr + off, i); + pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, + PCI_DMA_FROMDEVICE); + cas_page_unmap(addr); + RX_USED_ADD(page, 0x100); + p += hlen; + swivel = 0; + } + + + if (alloclen < (hlen + dlen)) { + skb_frag_t *frag = skb_shinfo(skb)->frags; + + /* normal or jumbo packets. we use frags */ + i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); + page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; + off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; + + hlen = min(cp->page_size - off, dlen); + if (hlen < 0) { + if (netif_msg_rx_err(cp)) { + printk(KERN_DEBUG "%s: rx page overflow: " + "%d\n", cp->dev->name, hlen); + } + dev_kfree_skb_irq(skb); + return -1; + } + i = hlen; + if (i == dlen) /* attach FCS */ + i += cp->crc_size; + pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, + PCI_DMA_FROMDEVICE); + + /* make sure we always copy a header */ + swivel = 0; + if (p == (char *) skb->data) { /* not split */ + addr = cas_page_map(page->buffer); + memcpy(p, addr + off, RX_COPY_MIN); + pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, + PCI_DMA_FROMDEVICE); + cas_page_unmap(addr); + off += RX_COPY_MIN; + swivel = RX_COPY_MIN; + RX_USED_ADD(page, cp->mtu_stride); + } else { + RX_USED_ADD(page, hlen); + } + skb_put(skb, alloclen); + + skb_shinfo(skb)->nr_frags++; + skb->data_len += hlen - swivel; + skb->len += hlen - swivel; + + get_page(page->buffer); + frag->page = page->buffer; + frag->page_offset = off; + frag->size = hlen - swivel; + + /* any more data? */ + if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { + hlen = dlen; + off = 0; + + i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); + page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; + pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, + hlen + cp->crc_size, + PCI_DMA_FROMDEVICE); + pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, + hlen + cp->crc_size, + PCI_DMA_FROMDEVICE); + + skb_shinfo(skb)->nr_frags++; + skb->data_len += hlen; + skb->len += hlen; + frag++; + + get_page(page->buffer); + frag->page = page->buffer; + frag->page_offset = 0; + frag->size = hlen; + RX_USED_ADD(page, hlen + cp->crc_size); + } + + if (cp->crc_size) { + addr = cas_page_map(page->buffer); + crcaddr = addr + off + hlen; + } + + } else { + /* copying packet */ + if (!dlen) + goto end_copy_pkt; + + i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); + page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; + off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; + hlen = min(cp->page_size - off, dlen); + if (hlen < 0) { + if (netif_msg_rx_err(cp)) { + printk(KERN_DEBUG "%s: rx page overflow: " + "%d\n", cp->dev->name, hlen); + } + dev_kfree_skb_irq(skb); + return -1; + } + i = hlen; + if (i == dlen) /* attach FCS */ + i += cp->crc_size; + pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, + PCI_DMA_FROMDEVICE); + addr = cas_page_map(page->buffer); + memcpy(p, addr + off, i); + pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, + PCI_DMA_FROMDEVICE); + cas_page_unmap(addr); + if (p == (char *) skb->data) /* not split */ + RX_USED_ADD(page, cp->mtu_stride); + else + RX_USED_ADD(page, i); + + /* any more data? */ + if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { + p += hlen; + i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); + page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; + pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, + dlen + cp->crc_size, + PCI_DMA_FROMDEVICE); + addr = cas_page_map(page->buffer); + memcpy(p, addr, dlen + cp->crc_size); + pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, + dlen + cp->crc_size, + PCI_DMA_FROMDEVICE); + cas_page_unmap(addr); + RX_USED_ADD(page, dlen + cp->crc_size); + } +end_copy_pkt: + if (cp->crc_size) { + addr = NULL; + crcaddr = skb->data + alloclen; + } + skb_put(skb, alloclen); + } + + i = CAS_VAL(RX_COMP4_TCP_CSUM, words[3]); + if (cp->crc_size) { + /* checksum includes FCS. strip it out. */ + i = csum_fold(csum_partial(crcaddr, cp->crc_size, i)); + if (addr) + cas_page_unmap(addr); + } + skb->csum = ntohs(i ^ 0xffff); + skb->ip_summed = CHECKSUM_HW; + skb->protocol = eth_type_trans(skb, cp->dev); + return len; +} + + +/* we can handle up to 64 rx flows at a time. we do the same thing + * as nonreassm except that we batch up the buffers. + * NOTE: we currently just treat each flow as a bunch of packets that + * we pass up. a better way would be to coalesce the packets + * into a jumbo packet. to do that, we need to do the following: + * 1) the first packet will have a clean split between header and + * data. save both. + * 2) each time the next flow packet comes in, extend the + * data length and merge the checksums. + * 3) on flow release, fix up the header. + * 4) make sure the higher layer doesn't care. + * because packets get coalesced, we shouldn't run into fragment count + * issues. + */ +static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words, + struct sk_buff *skb) +{ + int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1); + struct sk_buff_head *flow = &cp->rx_flows[flowid]; + + /* this is protected at a higher layer, so no need to + * do any additional locking here. stick the buffer + * at the end. + */ + __skb_insert(skb, flow->prev, (struct sk_buff *) flow, flow); + if (words[0] & RX_COMP1_RELEASE_FLOW) { + while ((skb = __skb_dequeue(flow))) { + cas_skb_release(skb); + } + } +} + +/* put rx descriptor back on ring. if a buffer is in use by a higher + * layer, this will need to put in a replacement. + */ +static void cas_post_page(struct cas *cp, const int ring, const int index) +{ + cas_page_t *new; + int entry; + + entry = cp->rx_old[ring]; + + new = cas_page_swap(cp, ring, index); + cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr); + cp->init_rxds[ring][entry].index = + cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) | + CAS_BASE(RX_INDEX_RING, ring)); + + entry = RX_DESC_ENTRY(ring, entry + 1); + cp->rx_old[ring] = entry; + + if (entry % 4) + return; + + if (ring == 0) + writel(entry, cp->regs + REG_RX_KICK); + else if ((N_RX_DESC_RINGS > 1) && + (cp->cas_flags & CAS_FLAG_REG_PLUS)) + writel(entry, cp->regs + REG_PLUS_RX_KICK1); +} + + +/* only when things are bad */ +static int cas_post_rxds_ringN(struct cas *cp, int ring, int num) +{ + unsigned int entry, last, count, released; + int cluster; + cas_page_t **page = cp->rx_pages[ring]; + + entry = cp->rx_old[ring]; + + if (netif_msg_intr(cp)) + printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n", + cp->dev->name, ring, entry); + + cluster = -1; + count = entry & 0x3; + last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4); + released = 0; + while (entry != last) { + /* make a new buffer if it's still in use */ + if (page_count(page[entry]->buffer) > 1) { + cas_page_t *new = cas_page_dequeue(cp); + if (!new) { + /* let the timer know that we need to + * do this again + */ + cp->cas_flags |= CAS_FLAG_RXD_POST(ring); + if (!timer_pending(&cp->link_timer)) + mod_timer(&cp->link_timer, jiffies + + CAS_LINK_FAST_TIMEOUT); + cp->rx_old[ring] = entry; + cp->rx_last[ring] = num ? num - released : 0; + return -ENOMEM; + } + spin_lock(&cp->rx_inuse_lock); + list_add(&page[entry]->list, &cp->rx_inuse_list); + spin_unlock(&cp->rx_inuse_lock); + cp->init_rxds[ring][entry].buffer = + cpu_to_le64(new->dma_addr); + page[entry] = new; + + } + + if (++count == 4) { + cluster = entry; + count = 0; + } + released++; + entry = RX_DESC_ENTRY(ring, entry + 1); + } + cp->rx_old[ring] = entry; + + if (cluster < 0) + return 0; + + if (ring == 0) + writel(cluster, cp->regs + REG_RX_KICK); + else if ((N_RX_DESC_RINGS > 1) && + (cp->cas_flags & CAS_FLAG_REG_PLUS)) + writel(cluster, cp->regs + REG_PLUS_RX_KICK1); + return 0; +} + + +/* process a completion ring. packets are set up in three basic ways: + * small packets: should be copied header + data in single buffer. + * large packets: header and data in a single buffer. + * split packets: header in a separate buffer from data. + * data may be in multiple pages. data may be > 256 + * bytes but in a single page. + * + * NOTE: RX page posting is done in this routine as well. while there's + * the capability of using multiple RX completion rings, it isn't + * really worthwhile due to the fact that the page posting will + * force serialization on the single descriptor ring. + */ +static int cas_rx_ringN(struct cas *cp, int ring, int budget) +{ + struct cas_rx_comp *rxcs = cp->init_rxcs[ring]; + int entry, drops; + int npackets = 0; + + if (netif_msg_intr(cp)) + printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n", + cp->dev->name, ring, + readl(cp->regs + REG_RX_COMP_HEAD), + cp->rx_new[ring]); + + entry = cp->rx_new[ring]; + drops = 0; + while (1) { + struct cas_rx_comp *rxc = rxcs + entry; + struct sk_buff *skb; + int type, len; + u64 words[4]; + int i, dring; + + words[0] = le64_to_cpu(rxc->word1); + words[1] = le64_to_cpu(rxc->word2); + words[2] = le64_to_cpu(rxc->word3); + words[3] = le64_to_cpu(rxc->word4); + + /* don't touch if still owned by hw */ + type = CAS_VAL(RX_COMP1_TYPE, words[0]); + if (type == 0) + break; + + /* hw hasn't cleared the zero bit yet */ + if (words[3] & RX_COMP4_ZERO) { + break; + } + + /* get info on the packet */ + if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) { + spin_lock(&cp->stat_lock[ring]); + cp->net_stats[ring].rx_errors++; + if (words[3] & RX_COMP4_LEN_MISMATCH) + cp->net_stats[ring].rx_length_errors++; + if (words[3] & RX_COMP4_BAD) + cp->net_stats[ring].rx_crc_errors++; + spin_unlock(&cp->stat_lock[ring]); + + /* We'll just return it to Cassini. */ + drop_it: + spin_lock(&cp->stat_lock[ring]); + ++cp->net_stats[ring].rx_dropped; + spin_unlock(&cp->stat_lock[ring]); + goto next; + } + + len = cas_rx_process_pkt(cp, rxc, entry, words, &skb); + if (len < 0) { + ++drops; + goto drop_it; + } + + /* see if it's a flow re-assembly or not. the driver + * itself handles release back up. + */ + if (RX_DONT_BATCH || (type == 0x2)) { + /* non-reassm: these always get released */ + cas_skb_release(skb); + } else { + cas_rx_flow_pkt(cp, words, skb); + } + + spin_lock(&cp->stat_lock[ring]); + cp->net_stats[ring].rx_packets++; + cp->net_stats[ring].rx_bytes += len; + spin_unlock(&cp->stat_lock[ring]); + cp->dev->last_rx = jiffies; + + next: + npackets++; + + /* should it be released? */ + if (words[0] & RX_COMP1_RELEASE_HDR) { + i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); + dring = CAS_VAL(RX_INDEX_RING, i); + i = CAS_VAL(RX_INDEX_NUM, i); + cas_post_page(cp, dring, i); + } + + if (words[0] & RX_COMP1_RELEASE_DATA) { + i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); + dring = CAS_VAL(RX_INDEX_RING, i); + i = CAS_VAL(RX_INDEX_NUM, i); + cas_post_page(cp, dring, i); + } + + if (words[0] & RX_COMP1_RELEASE_NEXT) { + i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); + dring = CAS_VAL(RX_INDEX_RING, i); + i = CAS_VAL(RX_INDEX_NUM, i); + cas_post_page(cp, dring, i); + } + + /* skip to the next entry */ + entry = RX_COMP_ENTRY(ring, entry + 1 + + CAS_VAL(RX_COMP1_SKIP, words[0])); +#ifdef USE_NAPI + if (budget && (npackets >= budget)) + break; +#endif + } + cp->rx_new[ring] = entry; + + if (drops) + printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", + cp->dev->name); + return npackets; +} + + +/* put completion entries back on the ring */ +static void cas_post_rxcs_ringN(struct net_device *dev, + struct cas *cp, int ring) +{ + struct cas_rx_comp *rxc = cp->init_rxcs[ring]; + int last, entry; + + last = cp->rx_cur[ring]; + entry = cp->rx_new[ring]; + if (netif_msg_intr(cp)) + printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n", + dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD), + entry); + + /* zero and re-mark descriptors */ + while (last != entry) { + cas_rxc_init(rxc + last); + last = RX_COMP_ENTRY(ring, last + 1); + } + cp->rx_cur[ring] = last; + + if (ring == 0) + writel(last, cp->regs + REG_RX_COMP_TAIL); + else if (cp->cas_flags & CAS_FLAG_REG_PLUS) + writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring)); +} + + + +/* cassini can use all four PCI interrupts for the completion ring. + * rings 3 and 4 are identical + */ +#if defined(USE_PCI_INTC) || defined(USE_PCI_INTD) +static inline void cas_handle_irqN(struct net_device *dev, + struct cas *cp, const u32 status, + const int ring) +{ + if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT)) + cas_post_rxcs_ringN(dev, cp, ring); +} + +static irqreturn_t cas_interruptN(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = dev_id; + struct cas *cp = netdev_priv(dev); + unsigned long flags; + int ring; + u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); + + /* check for shared irq */ + if (status == 0) + return IRQ_NONE; + + ring = (irq == cp->pci_irq_INTC) ? 2 : 3; + spin_lock_irqsave(&cp->lock, flags); + if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ +#ifdef USE_NAPI + cas_mask_intr(cp); + netif_rx_schedule(dev); +#else + cas_rx_ringN(cp, ring, 0); +#endif + status &= ~INTR_RX_DONE_ALT; + } + + if (status) + cas_handle_irqN(dev, cp, status, ring); + spin_unlock_irqrestore(&cp->lock, flags); + return IRQ_HANDLED; +} +#endif + +#ifdef USE_PCI_INTB +/* everything but rx packets */ +static inline void cas_handle_irq1(struct cas *cp, const u32 status) +{ + if (status & INTR_RX_BUF_UNAVAIL_1) { + /* Frame arrived, no free RX buffers available. + * NOTE: we can get this on a link transition. */ + cas_post_rxds_ringN(cp, 1, 0); + spin_lock(&cp->stat_lock[1]); + cp->net_stats[1].rx_dropped++; + spin_unlock(&cp->stat_lock[1]); + } + + if (status & INTR_RX_BUF_AE_1) + cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) - + RX_AE_FREEN_VAL(1)); + + if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) + cas_post_rxcs_ringN(cp, 1); +} + +/* ring 2 handles a few more events than 3 and 4 */ +static irqreturn_t cas_interrupt1(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = dev_id; + struct cas *cp = netdev_priv(dev); + unsigned long flags; + u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); + + /* check for shared interrupt */ + if (status == 0) + return IRQ_NONE; + + spin_lock_irqsave(&cp->lock, flags); + if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ +#ifdef USE_NAPI + cas_mask_intr(cp); + netif_rx_schedule(dev); +#else + cas_rx_ringN(cp, 1, 0); +#endif + status &= ~INTR_RX_DONE_ALT; + } + if (status) + cas_handle_irq1(cp, status); + spin_unlock_irqrestore(&cp->lock, flags); + return IRQ_HANDLED; +} +#endif + +static inline void cas_handle_irq(struct net_device *dev, + struct cas *cp, const u32 status) +{ + /* housekeeping interrupts */ + if (status & INTR_ERROR_MASK) + cas_abnormal_irq(dev, cp, status); + + if (status & INTR_RX_BUF_UNAVAIL) { + /* Frame arrived, no free RX buffers available. + * NOTE: we can get this on a link transition. + */ + cas_post_rxds_ringN(cp, 0, 0); + spin_lock(&cp->stat_lock[0]); + cp->net_stats[0].rx_dropped++; + spin_unlock(&cp->stat_lock[0]); + } else if (status & INTR_RX_BUF_AE) { + cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) - + RX_AE_FREEN_VAL(0)); + } + + if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) + cas_post_rxcs_ringN(dev, cp, 0); +} + +static irqreturn_t cas_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = dev_id; + struct cas *cp = netdev_priv(dev); + unsigned long flags; + u32 status = readl(cp->regs + REG_INTR_STATUS); + + if (status == 0) + return IRQ_NONE; + + spin_lock_irqsave(&cp->lock, flags); + if (status & (INTR_TX_ALL | INTR_TX_INTME)) { + cas_tx(dev, cp, status); + status &= ~(INTR_TX_ALL | INTR_TX_INTME); + } + + if (status & INTR_RX_DONE) { +#ifdef USE_NAPI + cas_mask_intr(cp); + netif_rx_schedule(dev); +#else + cas_rx_ringN(cp, 0, 0); +#endif + status &= ~INTR_RX_DONE; + } + + if (status) + cas_handle_irq(dev, cp, status); + spin_unlock_irqrestore(&cp->lock, flags); + return IRQ_HANDLED; +} + + +#ifdef USE_NAPI +static int cas_poll(struct net_device *dev, int *budget) +{ + struct cas *cp = netdev_priv(dev); + int i, enable_intr, todo, credits; + u32 status = readl(cp->regs + REG_INTR_STATUS); + unsigned long flags; + + spin_lock_irqsave(&cp->lock, flags); + cas_tx(dev, cp, status); + spin_unlock_irqrestore(&cp->lock, flags); + + /* NAPI rx packets. we spread the credits across all of the + * rxc rings + */ + todo = min(*budget, dev->quota); + + /* to make sure we're fair with the work we loop through each + * ring N_RX_COMP_RING times with a request of + * todo / N_RX_COMP_RINGS + */ + enable_intr = 1; + credits = 0; + for (i = 0; i < N_RX_COMP_RINGS; i++) { + int j; + for (j = 0; j < N_RX_COMP_RINGS; j++) { + credits += cas_rx_ringN(cp, j, todo / N_RX_COMP_RINGS); + if (credits >= todo) { + enable_intr = 0; + goto rx_comp; + } + } + } + +rx_comp: + *budget -= credits; + dev->quota -= credits; + + /* final rx completion */ + spin_lock_irqsave(&cp->lock, flags); + if (status) + cas_handle_irq(dev, cp, status); + +#ifdef USE_PCI_INTB + if (N_RX_COMP_RINGS > 1) { + status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); + if (status) + cas_handle_irq1(dev, cp, status); + } +#endif + +#ifdef USE_PCI_INTC + if (N_RX_COMP_RINGS > 2) { + status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2)); + if (status) + cas_handle_irqN(dev, cp, status, 2); + } +#endif + +#ifdef USE_PCI_INTD + if (N_RX_COMP_RINGS > 3) { + status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3)); + if (status) + cas_handle_irqN(dev, cp, status, 3); + } +#endif + spin_unlock_irqrestore(&cp->lock, flags); + if (enable_intr) { + netif_rx_complete(dev); + cas_unmask_intr(cp); + return 0; + } + return 1; +} +#endif + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void cas_netpoll(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + + cas_disable_irq(cp, 0); + cas_interrupt(cp->pdev->irq, dev, NULL); + cas_enable_irq(cp, 0); + +#ifdef USE_PCI_INTB + if (N_RX_COMP_RINGS > 1) { + /* cas_interrupt1(); */ + } +#endif +#ifdef USE_PCI_INTC + if (N_RX_COMP_RINGS > 2) { + /* cas_interruptN(); */ + } +#endif +#ifdef USE_PCI_INTD + if (N_RX_COMP_RINGS > 3) { + /* cas_interruptN(); */ + } +#endif +} +#endif + +static void cas_tx_timeout(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + + printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); + if (!cp->hw_running) { + printk("%s: hrm.. hw not running!\n", dev->name); + return; + } + + printk(KERN_ERR "%s: MIF_STATE[%08x]\n", + dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE)); + + printk(KERN_ERR "%s: MAC_STATE[%08x]\n", + dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE)); + + printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] " + "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n", + dev->name, + readl(cp->regs + REG_TX_CFG), + readl(cp->regs + REG_MAC_TX_STATUS), + readl(cp->regs + REG_MAC_TX_CFG), + readl(cp->regs + REG_TX_FIFO_PKT_CNT), + readl(cp->regs + REG_TX_FIFO_WRITE_PTR), + readl(cp->regs + REG_TX_FIFO_READ_PTR), + readl(cp->regs + REG_TX_SM_1), + readl(cp->regs + REG_TX_SM_2)); + + printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n", + dev->name, + readl(cp->regs + REG_RX_CFG), + readl(cp->regs + REG_MAC_RX_STATUS), + readl(cp->regs + REG_MAC_RX_CFG)); + + printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n", + dev->name, + readl(cp->regs + REG_HP_STATE_MACHINE), + readl(cp->regs + REG_HP_STATUS0), + readl(cp->regs + REG_HP_STATUS1), + readl(cp->regs + REG_HP_STATUS2)); + +#if 1 + atomic_inc(&cp->reset_task_pending); + atomic_inc(&cp->reset_task_pending_all); + schedule_work(&cp->reset_task); +#else + atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); + schedule_work(&cp->reset_task); +#endif +} + +static inline int cas_intme(int ring, int entry) +{ + /* Algorithm: IRQ every 1/2 of descriptors. */ + if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1))) + return 1; + return 0; +} + + +static void cas_write_txd(struct cas *cp, int ring, int entry, + dma_addr_t mapping, int len, u64 ctrl, int last) +{ + struct cas_tx_desc *txd = cp->init_txds[ring] + entry; + + ctrl |= CAS_BASE(TX_DESC_BUFLEN, len); + if (cas_intme(ring, entry)) + ctrl |= TX_DESC_INTME; + if (last) + ctrl |= TX_DESC_EOF; + txd->control = cpu_to_le64(ctrl); + txd->buffer = cpu_to_le64(mapping); +} + +static inline void *tx_tiny_buf(struct cas *cp, const int ring, + const int entry) +{ + return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry; +} + +static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring, + const int entry, const int tentry) +{ + cp->tx_tiny_use[ring][tentry].nbufs++; + cp->tx_tiny_use[ring][entry].used = 1; + return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry; +} + +static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, + struct sk_buff *skb) +{ + struct net_device *dev = cp->dev; + int entry, nr_frags, frag, tabort, tentry; + dma_addr_t mapping; + unsigned long flags; + u64 ctrl; + u32 len; + + spin_lock_irqsave(&cp->tx_lock[ring], flags); + + /* This is a hard error, log it. */ + if (TX_BUFFS_AVAIL(cp, ring) <= + CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) { + netif_stop_queue(dev); + spin_unlock_irqrestore(&cp->tx_lock[ring], flags); + printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " + "queue awake!\n", dev->name); + return 1; + } + + ctrl = 0; + if (skb->ip_summed == CHECKSUM_HW) { + u64 csum_start_off, csum_stuff_off; + + csum_start_off = (u64) (skb->h.raw - skb->data); + csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data); + + ctrl = TX_DESC_CSUM_EN | + CAS_BASE(TX_DESC_CSUM_START, csum_start_off) | + CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off); + } + + entry = cp->tx_new[ring]; + cp->tx_skbs[ring][entry] = skb; + + nr_frags = skb_shinfo(skb)->nr_frags; + len = skb_headlen(skb); + mapping = pci_map_page(cp->pdev, virt_to_page(skb->data), + offset_in_page(skb->data), len, + PCI_DMA_TODEVICE); + + tentry = entry; + tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); + if (unlikely(tabort)) { + /* NOTE: len is always > tabort */ + cas_write_txd(cp, ring, entry, mapping, len - tabort, + ctrl | TX_DESC_SOF, 0); + entry = TX_DESC_NEXT(ring, entry); + + memcpy(tx_tiny_buf(cp, ring, entry), skb->data + + len - tabort, tabort); + mapping = tx_tiny_map(cp, ring, entry, tentry); + cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, + (nr_frags == 0)); + } else { + cas_write_txd(cp, ring, entry, mapping, len, ctrl | + TX_DESC_SOF, (nr_frags == 0)); + } + entry = TX_DESC_NEXT(ring, entry); + + for (frag = 0; frag < nr_frags; frag++) { + skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; + + len = fragp->size; + mapping = pci_map_page(cp->pdev, fragp->page, + fragp->page_offset, len, + PCI_DMA_TODEVICE); + + tabort = cas_calc_tabort(cp, fragp->page_offset, len); + if (unlikely(tabort)) { + void *addr; + + /* NOTE: len is always > tabort */ + cas_write_txd(cp, ring, entry, mapping, len - tabort, + ctrl, 0); + entry = TX_DESC_NEXT(ring, entry); + + addr = cas_page_map(fragp->page); + memcpy(tx_tiny_buf(cp, ring, entry), + addr + fragp->page_offset + len - tabort, + tabort); + cas_page_unmap(addr); + mapping = tx_tiny_map(cp, ring, entry, tentry); + len = tabort; + } + + cas_write_txd(cp, ring, entry, mapping, len, ctrl, + (frag + 1 == nr_frags)); + entry = TX_DESC_NEXT(ring, entry); + } + + cp->tx_new[ring] = entry; + if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)) + netif_stop_queue(dev); + + if (netif_msg_tx_queued(cp)) + printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, " + "avail %d\n", + dev->name, ring, entry, skb->len, + TX_BUFFS_AVAIL(cp, ring)); + writel(entry, cp->regs + REG_TX_KICKN(ring)); + spin_unlock_irqrestore(&cp->tx_lock[ring], flags); + return 0; +} + +static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + + /* this is only used as a load-balancing hint, so it doesn't + * need to be SMP safe + */ + static int ring; + + skb = skb_padto(skb, cp->min_frame_size); + if (!skb) + return 0; + + /* XXX: we need some higher-level QoS hooks to steer packets to + * individual queues. + */ + if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb)) + return 1; + dev->trans_start = jiffies; + return 0; +} + +static void cas_init_tx_dma(struct cas *cp) +{ + u64 desc_dma = cp->block_dvma; + unsigned long off; + u32 val; + int i; + + /* set up tx completion writeback registers. must be 8-byte aligned */ +#ifdef USE_TX_COMPWB + off = offsetof(struct cas_init_block, tx_compwb); + writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI); + writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW); +#endif + + /* enable completion writebacks, enable paced mode, + * disable read pipe, and disable pre-interrupt compwbs + */ + val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 | + TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 | + TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE | + TX_CFG_INTR_COMPWB_DIS; + + /* write out tx ring info and tx desc bases */ + for (i = 0; i < MAX_TX_RINGS; i++) { + off = (unsigned long) cp->init_txds[i] - + (unsigned long) cp->init_block; + + val |= CAS_TX_RINGN_BASE(i); + writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i)); + writel((desc_dma + off) & 0xffffffff, cp->regs + + REG_TX_DBN_LOW(i)); + /* don't zero out the kick register here as the system + * will wedge + */ + } + writel(val, cp->regs + REG_TX_CFG); + + /* program max burst sizes. these numbers should be different + * if doing QoS. + */ +#ifdef USE_QOS + writel(0x800, cp->regs + REG_TX_MAXBURST_0); + writel(0x1600, cp->regs + REG_TX_MAXBURST_1); + writel(0x2400, cp->regs + REG_TX_MAXBURST_2); + writel(0x4800, cp->regs + REG_TX_MAXBURST_3); +#else + writel(0x800, cp->regs + REG_TX_MAXBURST_0); + writel(0x800, cp->regs + REG_TX_MAXBURST_1); + writel(0x800, cp->regs + REG_TX_MAXBURST_2); + writel(0x800, cp->regs + REG_TX_MAXBURST_3); +#endif +} + +/* Must be invoked under cp->lock. */ +static inline void cas_init_dma(struct cas *cp) +{ + cas_init_tx_dma(cp); + cas_init_rx_dma(cp); +} + +/* Must be invoked under cp->lock. */ +static u32 cas_setup_multicast(struct cas *cp) +{ + u32 rxcfg = 0; + int i; + + if (cp->dev->flags & IFF_PROMISC) { + rxcfg |= MAC_RX_CFG_PROMISC_EN; + + } else if (cp->dev->flags & IFF_ALLMULTI) { + for (i=0; i < 16; i++) + writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i)); + rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; + + } else { + u16 hash_table[16]; + u32 crc; + struct dev_mc_list *dmi = cp->dev->mc_list; + int i; + + /* use the alternate mac address registers for the + * first 15 multicast addresses + */ + for (i = 1; i <= CAS_MC_EXACT_MATCH_SIZE; i++) { + if (!dmi) { + writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 0)); + writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 1)); + writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2)); + continue; + } + writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5], + cp->regs + REG_MAC_ADDRN(i*3 + 0)); + writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3], + cp->regs + REG_MAC_ADDRN(i*3 + 1)); + writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1], + cp->regs + REG_MAC_ADDRN(i*3 + 2)); + dmi = dmi->next; + } + + /* use hw hash table for the next series of + * multicast addresses + */ + memset(hash_table, 0, sizeof(hash_table)); + while (dmi) { + crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr); + crc >>= 24; + hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); + dmi = dmi->next; + } + for (i=0; i < 16; i++) + writel(hash_table[i], cp->regs + + REG_MAC_HASH_TABLEN(i)); + rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; + } + + return rxcfg; +} + +/* must be invoked under cp->stat_lock[N_TX_RINGS] */ +static void cas_clear_mac_err(struct cas *cp) +{ + writel(0, cp->regs + REG_MAC_COLL_NORMAL); + writel(0, cp->regs + REG_MAC_COLL_FIRST); + writel(0, cp->regs + REG_MAC_COLL_EXCESS); + writel(0, cp->regs + REG_MAC_COLL_LATE); + writel(0, cp->regs + REG_MAC_TIMER_DEFER); + writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK); + writel(0, cp->regs + REG_MAC_RECV_FRAME); + writel(0, cp->regs + REG_MAC_LEN_ERR); + writel(0, cp->regs + REG_MAC_ALIGN_ERR); + writel(0, cp->regs + REG_MAC_FCS_ERR); + writel(0, cp->regs + REG_MAC_RX_CODE_ERR); +} + + +static void cas_mac_reset(struct cas *cp) +{ + int i; + + /* do both TX and RX reset */ + writel(0x1, cp->regs + REG_MAC_TX_RESET); + writel(0x1, cp->regs + REG_MAC_RX_RESET); + + /* wait for TX */ + i = STOP_TRIES; + while (i-- > 0) { + if (readl(cp->regs + REG_MAC_TX_RESET) == 0) + break; + udelay(10); + } + + /* wait for RX */ + i = STOP_TRIES; + while (i-- > 0) { + if (readl(cp->regs + REG_MAC_RX_RESET) == 0) + break; + udelay(10); + } + + if (readl(cp->regs + REG_MAC_TX_RESET) | + readl(cp->regs + REG_MAC_RX_RESET)) + printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n", + cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET), + readl(cp->regs + REG_MAC_RX_RESET), + readl(cp->regs + REG_MAC_STATE_MACHINE)); +} + + +/* Must be invoked under cp->lock. */ +static void cas_init_mac(struct cas *cp) +{ + unsigned char *e = &cp->dev->dev_addr[0]; + int i; +#ifdef CONFIG_CASSINI_MULTICAST_REG_WRITE + u32 rxcfg; +#endif + cas_mac_reset(cp); + + /* setup core arbitration weight register */ + writel(CAWR_RR_DIS, cp->regs + REG_CAWR); + + /* XXX Use pci_dma_burst_advice() */ +#if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) + /* set the infinite burst register for chips that don't have + * pci issues. + */ + if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0) + writel(INF_BURST_EN, cp->regs + REG_INF_BURST); +#endif + + writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE); + + writel(0x00, cp->regs + REG_MAC_IPG0); + writel(0x08, cp->regs + REG_MAC_IPG1); + writel(0x04, cp->regs + REG_MAC_IPG2); + + /* change later for 802.3z */ + writel(0x40, cp->regs + REG_MAC_SLOT_TIME); + + /* min frame + FCS */ + writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN); + + /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we + * specify the maximum frame size to prevent RX tag errors on + * oversized frames. + */ + writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) | + CAS_BASE(MAC_FRAMESIZE_MAX_FRAME, + (CAS_MAX_MTU + ETH_HLEN + 4 + 4)), + cp->regs + REG_MAC_FRAMESIZE_MAX); + + /* NOTE: crc_size is used as a surrogate for half-duplex. + * workaround saturn half-duplex issue by increasing preamble + * size to 65 bytes. + */ + if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size) + writel(0x41, cp->regs + REG_MAC_PA_SIZE); + else + writel(0x07, cp->regs + REG_MAC_PA_SIZE); + writel(0x04, cp->regs + REG_MAC_JAM_SIZE); + writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT); + writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE); + + writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED); + + writel(0, cp->regs + REG_MAC_ADDR_FILTER0); + writel(0, cp->regs + REG_MAC_ADDR_FILTER1); + writel(0, cp->regs + REG_MAC_ADDR_FILTER2); + writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK); + writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK); + + /* setup mac address in perfect filter array */ + for (i = 0; i < 45; i++) + writel(0x0, cp->regs + REG_MAC_ADDRN(i)); + + writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0)); + writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1)); + writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2)); + + writel(0x0001, cp->regs + REG_MAC_ADDRN(42)); + writel(0xc200, cp->regs + REG_MAC_ADDRN(43)); + writel(0x0180, cp->regs + REG_MAC_ADDRN(44)); + +#ifndef CONFIG_CASSINI_MULTICAST_REG_WRITE + cp->mac_rx_cfg = cas_setup_multicast(cp); +#else + /* WTZ: Do what Adrian did in cas_set_multicast. Doing + * a writel does not seem to be necessary because Cassini + * seems to preserve the configuration when we do the reset. + * If the chip is in trouble, though, it is not clear if we + * can really count on this behavior. cas_set_multicast uses + * spin_lock_irqsave, but we are called only in cas_init_hw and + * cas_init_hw is protected by cas_lock_all, which calls + * spin_lock_irq (so it doesn't need to save the flags, and + * we should be OK for the writel, as that is the only + * difference). + */ + cp->mac_rx_cfg = rxcfg = cas_setup_multicast(cp); + writel(rxcfg, cp->regs + REG_MAC_RX_CFG); +#endif + spin_lock(&cp->stat_lock[N_TX_RINGS]); + cas_clear_mac_err(cp); + spin_unlock(&cp->stat_lock[N_TX_RINGS]); + + /* Setup MAC interrupts. We want to get all of the interesting + * counter expiration events, but we do not want to hear about + * normal rx/tx as the DMA engine tells us that. + */ + writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK); + writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); + + /* Don't enable even the PAUSE interrupts for now, we + * make no use of those events other than to record them. + */ + writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK); +} + +/* Must be invoked under cp->lock. */ +static void cas_init_pause_thresholds(struct cas *cp) +{ + /* Calculate pause thresholds. Setting the OFF threshold to the + * full RX fifo size effectively disables PAUSE generation + */ + if (cp->rx_fifo_size <= (2 * 1024)) { + cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size; + } else { + int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63; + if (max_frame * 3 > cp->rx_fifo_size) { + cp->rx_pause_off = 7104; + cp->rx_pause_on = 960; + } else { + int off = (cp->rx_fifo_size - (max_frame * 2)); + int on = off - max_frame; + cp->rx_pause_off = off; + cp->rx_pause_on = on; + } + } +} + +static int cas_vpd_match(const void __iomem *p, const char *str) +{ + int len = strlen(str) + 1; + int i; + + for (i = 0; i < len; i++) { + if (readb(p + i) != str[i]) + return 0; + } + return 1; +} + + +/* get the mac address by reading the vpd information in the rom. + * also get the phy type and determine if there's an entropy generator. + * NOTE: this is a bit convoluted for the following reasons: + * 1) vpd info has order-dependent mac addresses for multinic cards + * 2) the only way to determine the nic order is to use the slot + * number. + * 3) fiber cards don't have bridges, so their slot numbers don't + * mean anything. + * 4) we don't actually know we have a fiber card until after + * the mac addresses are parsed. + */ +static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, + const int offset) +{ + void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START; + void __iomem *base, *kstart; + int i, len; + int found = 0; +#define VPD_FOUND_MAC 0x01 +#define VPD_FOUND_PHY 0x02 + + int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ + int mac_off = 0; + + /* give us access to the PROM */ + writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD, + cp->regs + REG_BIM_LOCAL_DEV_EN); + + /* check for an expansion rom */ + if (readb(p) != 0x55 || readb(p + 1) != 0xaa) + goto use_random_mac_addr; + + /* search for beginning of vpd */ + base = NULL; + for (i = 2; i < EXPANSION_ROM_SIZE; i++) { + /* check for PCIR */ + if ((readb(p + i + 0) == 0x50) && + (readb(p + i + 1) == 0x43) && + (readb(p + i + 2) == 0x49) && + (readb(p + i + 3) == 0x52)) { + base = p + (readb(p + i + 8) | + (readb(p + i + 9) << 8)); + break; + } + } + + if (!base || (readb(base) != 0x82)) + goto use_random_mac_addr; + + i = (readb(base + 1) | (readb(base + 2) << 8)) + 3; + while (i < EXPANSION_ROM_SIZE) { + if (readb(base + i) != 0x90) /* no vpd found */ + goto use_random_mac_addr; + + /* found a vpd field */ + len = readb(base + i + 1) | (readb(base + i + 2) << 8); + + /* extract keywords */ + kstart = base + i + 3; + p = kstart; + while ((p - kstart) < len) { + int klen = readb(p + 2); + int j; + char type; + + p += 3; + + /* look for the following things: + * -- correct length == 29 + * 3 (type) + 2 (size) + + * 18 (strlen("local-mac-address") + 1) + + * 6 (mac addr) + * -- VPD Instance 'I' + * -- VPD Type Bytes 'B' + * -- VPD data length == 6 + * -- property string == local-mac-address + * + * -- correct length == 24 + * 3 (type) + 2 (size) + + * 12 (strlen("entropy-dev") + 1) + + * 7 (strlen("vms110") + 1) + * -- VPD Instance 'I' + * -- VPD Type String 'B' + * -- VPD data length == 7 + * -- property string == entropy-dev + * + * -- correct length == 18 + * 3 (type) + 2 (size) + + * 9 (strlen("phy-type") + 1) + + * 4 (strlen("pcs") + 1) + * -- VPD Instance 'I' + * -- VPD Type String 'S' + * -- VPD data length == 4 + * -- property string == phy-type + * + * -- correct length == 23 + * 3 (type) + 2 (size) + + * 14 (strlen("phy-interface") + 1) + + * 4 (strlen("pcs") + 1) + * -- VPD Instance 'I' + * -- VPD Type String 'S' + * -- VPD data length == 4 + * -- property string == phy-interface + */ + if (readb(p) != 'I') + goto next; + + /* finally, check string and length */ + type = readb(p + 3); + if (type == 'B') { + if ((klen == 29) && readb(p + 4) == 6 && + cas_vpd_match(p + 5, + "local-mac-address")) { + if (mac_off++ > offset) + goto next; + + /* set mac address */ + for (j = 0; j < 6; j++) + dev_addr[j] = + readb(p + 23 + j); + goto found_mac; + } + } + + if (type != 'S') + goto next; + +#ifdef USE_ENTROPY_DEV + if ((klen == 24) && + cas_vpd_match(p + 5, "entropy-dev") && + cas_vpd_match(p + 17, "vms110")) { + cp->cas_flags |= CAS_FLAG_ENTROPY_DEV; + goto next; + } +#endif + + if (found & VPD_FOUND_PHY) + goto next; + + if ((klen == 18) && readb(p + 4) == 4 && + cas_vpd_match(p + 5, "phy-type")) { + if (cas_vpd_match(p + 14, "pcs")) { + phy_type = CAS_PHY_SERDES; + goto found_phy; + } + } + + if ((klen == 23) && readb(p + 4) == 4 && + cas_vpd_match(p + 5, "phy-interface")) { + if (cas_vpd_match(p + 19, "pcs")) { + phy_type = CAS_PHY_SERDES; + goto found_phy; + } + } +found_mac: + found |= VPD_FOUND_MAC; + goto next; + +found_phy: + found |= VPD_FOUND_PHY; + +next: + p += klen; + } + i += len + 3; + } + +use_random_mac_addr: + if (found & VPD_FOUND_MAC) + goto done; + + /* Sun MAC prefix then 3 random bytes. */ + printk(PFX "MAC address not found in ROM VPD\n"); + dev_addr[0] = 0x08; + dev_addr[1] = 0x00; + dev_addr[2] = 0x20; + get_random_bytes(dev_addr + 3, 3); + +done: + writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN); + return phy_type; +} + +/* check pci invariants */ +static void cas_check_pci_invariants(struct cas *cp) +{ + struct pci_dev *pdev = cp->pdev; + u8 rev; + + cp->cas_flags = 0; + pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); + if ((pdev->vendor == PCI_VENDOR_ID_SUN) && + (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) { + if (rev >= CAS_ID_REVPLUS) + cp->cas_flags |= CAS_FLAG_REG_PLUS; + if (rev < CAS_ID_REVPLUS02u) + cp->cas_flags |= CAS_FLAG_TARGET_ABORT; + + /* Original Cassini supports HW CSUM, but it's not + * enabled by default as it can trigger TX hangs. + */ + if (rev < CAS_ID_REV2) + cp->cas_flags |= CAS_FLAG_NO_HW_CSUM; + } else { + /* Only sun has original cassini chips. */ + cp->cas_flags |= CAS_FLAG_REG_PLUS; + + /* We use a flag because the same phy might be externally + * connected. + */ + if ((pdev->vendor == PCI_VENDOR_ID_NS) && + (pdev->device == PCI_DEVICE_ID_NS_SATURN)) + cp->cas_flags |= CAS_FLAG_SATURN; + } +} + + +static int cas_check_invariants(struct cas *cp) +{ + struct pci_dev *pdev = cp->pdev; + u32 cfg; + int i; + + /* get page size for rx buffers. */ + cp->page_order = 0; +#ifdef USE_PAGE_ORDER + if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) { + /* see if we can allocate larger pages */ + struct page *page = alloc_pages(GFP_ATOMIC, + CAS_JUMBO_PAGE_SHIFT - + PAGE_SHIFT); + if (page) { + __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT); + cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT; + } else { + printk(PFX "MTU limited to %d bytes\n", CAS_MAX_MTU); + } + } +#endif + cp->page_size = (PAGE_SIZE << cp->page_order); + + /* Fetch the FIFO configurations. */ + cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64; + cp->rx_fifo_size = RX_FIFO_SIZE; + + /* finish phy determination. MDIO1 takes precedence over MDIO0 if + * they're both connected. + */ + cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr, + PCI_SLOT(pdev->devfn)); + if (cp->phy_type & CAS_PHY_SERDES) { + cp->cas_flags |= CAS_FLAG_1000MB_CAP; + return 0; /* no more checking needed */ + } + + /* MII */ + cfg = readl(cp->regs + REG_MIF_CFG); + if (cfg & MIF_CFG_MDIO_1) { + cp->phy_type = CAS_PHY_MII_MDIO1; + } else if (cfg & MIF_CFG_MDIO_0) { + cp->phy_type = CAS_PHY_MII_MDIO0; + } + + cas_mif_poll(cp, 0); + writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); + + for (i = 0; i < 32; i++) { + u32 phy_id; + int j; + + for (j = 0; j < 3; j++) { + cp->phy_addr = i; + phy_id = cas_phy_read(cp, MII_PHYSID1) << 16; + phy_id |= cas_phy_read(cp, MII_PHYSID2); + if (phy_id && (phy_id != 0xFFFFFFFF)) { + cp->phy_id = phy_id; + goto done; + } + } + } + printk(KERN_ERR PFX "MII phy did not respond [%08x]\n", + readl(cp->regs + REG_MIF_STATE_MACHINE)); + return -1; + +done: + /* see if we can do gigabit */ + cfg = cas_phy_read(cp, MII_BMSR); + if ((cfg & CAS_BMSR_1000_EXTEND) && + cas_phy_read(cp, CAS_MII_1000_EXTEND)) + cp->cas_flags |= CAS_FLAG_1000MB_CAP; + return 0; +} + +/* Must be invoked under cp->lock. */ +static inline void cas_start_dma(struct cas *cp) +{ + int i; + u32 val; + int txfailed = 0; + + /* enable dma */ + val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN; + writel(val, cp->regs + REG_TX_CFG); + val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN; + writel(val, cp->regs + REG_RX_CFG); + + /* enable the mac */ + val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN; + writel(val, cp->regs + REG_MAC_TX_CFG); + val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN; + writel(val, cp->regs + REG_MAC_RX_CFG); + + i = STOP_TRIES; + while (i-- > 0) { + val = readl(cp->regs + REG_MAC_TX_CFG); + if ((val & MAC_TX_CFG_EN)) + break; + udelay(10); + } + if (i < 0) txfailed = 1; + i = STOP_TRIES; + while (i-- > 0) { + val = readl(cp->regs + REG_MAC_RX_CFG); + if ((val & MAC_RX_CFG_EN)) { + if (txfailed) { + printk(KERN_ERR + "%s: enabling mac failed [tx:%08x:%08x].\n", + cp->dev->name, + readl(cp->regs + REG_MIF_STATE_MACHINE), + readl(cp->regs + REG_MAC_STATE_MACHINE)); + } + goto enable_rx_done; + } + udelay(10); + } + printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n", + cp->dev->name, + (txfailed? "tx,rx":"rx"), + readl(cp->regs + REG_MIF_STATE_MACHINE), + readl(cp->regs + REG_MAC_STATE_MACHINE)); + +enable_rx_done: + cas_unmask_intr(cp); /* enable interrupts */ + writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); + writel(0, cp->regs + REG_RX_COMP_TAIL); + + if (cp->cas_flags & CAS_FLAG_REG_PLUS) { + if (N_RX_DESC_RINGS > 1) + writel(RX_DESC_RINGN_SIZE(1) - 4, + cp->regs + REG_PLUS_RX_KICK1); + + for (i = 1; i < N_RX_COMP_RINGS; i++) + writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i)); + } +} + +/* Must be invoked under cp->lock. */ +static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd, + int *pause) +{ + u32 val = readl(cp->regs + REG_PCS_MII_LPA); + *fd = (val & PCS_MII_LPA_FD) ? 1 : 0; + *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00; + if (val & PCS_MII_LPA_ASYM_PAUSE) + *pause |= 0x10; + *spd = 1000; +} + +/* Must be invoked under cp->lock. */ +static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd, + int *pause) +{ + u32 val; + + *fd = 0; + *spd = 10; + *pause = 0; + + /* use GMII registers */ + val = cas_phy_read(cp, MII_LPA); + if (val & CAS_LPA_PAUSE) + *pause = 0x01; + + if (val & CAS_LPA_ASYM_PAUSE) + *pause |= 0x10; + + if (val & LPA_DUPLEX) + *fd = 1; + if (val & LPA_100) + *spd = 100; + + if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { + val = cas_phy_read(cp, CAS_MII_1000_STATUS); + if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF)) + *spd = 1000; + if (val & CAS_LPA_1000FULL) + *fd = 1; + } +} + +/* A link-up condition has occurred, initialize and enable the + * rest of the chip. + * + * Must be invoked under cp->lock. + */ +static void cas_set_link_modes(struct cas *cp) +{ + u32 val; + int full_duplex, speed, pause; + + full_duplex = 0; + speed = 10; + pause = 0; + + if (CAS_PHY_MII(cp->phy_type)) { + cas_mif_poll(cp, 0); + val = cas_phy_read(cp, MII_BMCR); + if (val & BMCR_ANENABLE) { + cas_read_mii_link_mode(cp, &full_duplex, &speed, + &pause); + } else { + if (val & BMCR_FULLDPLX) + full_duplex = 1; + + if (val & BMCR_SPEED100) + speed = 100; + else if (val & CAS_BMCR_SPEED1000) + speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? + 1000 : 100; + } + cas_mif_poll(cp, 1); + + } else { + val = readl(cp->regs + REG_PCS_MII_CTRL); + cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause); + if ((val & PCS_MII_AUTONEG_EN) == 0) { + if (val & PCS_MII_CTRL_DUPLEX) + full_duplex = 1; + } + } + + if (netif_msg_link(cp)) + printk(KERN_INFO "%s: Link up at %d Mbps, %s-duplex.\n", + cp->dev->name, speed, (full_duplex ? "full" : "half")); + + val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED; + if (CAS_PHY_MII(cp->phy_type)) { + val |= MAC_XIF_MII_BUFFER_OUTPUT_EN; + if (!full_duplex) + val |= MAC_XIF_DISABLE_ECHO; + } + if (full_duplex) + val |= MAC_XIF_FDPLX_LED; + if (speed == 1000) + val |= MAC_XIF_GMII_MODE; + writel(val, cp->regs + REG_MAC_XIF_CFG); + + /* deal with carrier and collision detect. */ + val = MAC_TX_CFG_IPG_EN; + if (full_duplex) { + val |= MAC_TX_CFG_IGNORE_CARRIER; + val |= MAC_TX_CFG_IGNORE_COLL; + } else { +#ifndef USE_CSMA_CD_PROTO + val |= MAC_TX_CFG_NEVER_GIVE_UP_EN; + val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM; +#endif + } + /* val now set up for REG_MAC_TX_CFG */ + + /* If gigabit and half-duplex, enable carrier extension + * mode. increase slot time to 512 bytes as well. + * else, disable it and make sure slot time is 64 bytes. + * also activate checksum bug workaround + */ + if ((speed == 1000) && !full_duplex) { + writel(val | MAC_TX_CFG_CARRIER_EXTEND, + cp->regs + REG_MAC_TX_CFG); + + val = readl(cp->regs + REG_MAC_RX_CFG); + val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */ + writel(val | MAC_RX_CFG_CARRIER_EXTEND, + cp->regs + REG_MAC_RX_CFG); + + writel(0x200, cp->regs + REG_MAC_SLOT_TIME); + + cp->crc_size = 4; + /* minimum size gigabit frame at half duplex */ + cp->min_frame_size = CAS_1000MB_MIN_FRAME; + + } else { + writel(val, cp->regs + REG_MAC_TX_CFG); + + /* checksum bug workaround. don't strip FCS when in + * half-duplex mode + */ + val = readl(cp->regs + REG_MAC_RX_CFG); + if (full_duplex) { + val |= MAC_RX_CFG_STRIP_FCS; + cp->crc_size = 0; + cp->min_frame_size = CAS_MIN_MTU; + } else { + val &= ~MAC_RX_CFG_STRIP_FCS; + cp->crc_size = 4; + cp->min_frame_size = CAS_MIN_FRAME; + } + writel(val & ~MAC_RX_CFG_CARRIER_EXTEND, + cp->regs + REG_MAC_RX_CFG); + writel(0x40, cp->regs + REG_MAC_SLOT_TIME); + } + + if (netif_msg_link(cp)) { + if (pause & 0x01) { + printk(KERN_INFO "%s: Pause is enabled " + "(rxfifo: %d off: %d on: %d)\n", + cp->dev->name, + cp->rx_fifo_size, + cp->rx_pause_off, + cp->rx_pause_on); + } else if (pause & 0x10) { + printk(KERN_INFO "%s: TX pause enabled\n", + cp->dev->name); + } else { + printk(KERN_INFO "%s: Pause is disabled\n", + cp->dev->name); + } + } + + val = readl(cp->regs + REG_MAC_CTRL_CFG); + val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN); + if (pause) { /* symmetric or asymmetric pause */ + val |= MAC_CTRL_CFG_SEND_PAUSE_EN; + if (pause & 0x01) { /* symmetric pause */ + val |= MAC_CTRL_CFG_RECV_PAUSE_EN; + } + } + writel(val, cp->regs + REG_MAC_CTRL_CFG); + cas_start_dma(cp); +} + +/* Must be invoked under cp->lock. */ +static void cas_init_hw(struct cas *cp, int restart_link) +{ + if (restart_link) + cas_phy_init(cp); + + cas_init_pause_thresholds(cp); + cas_init_mac(cp); + cas_init_dma(cp); + + if (restart_link) { + /* Default aneg parameters */ + cp->timer_ticks = 0; + cas_begin_auto_negotiation(cp, NULL); + } else if (cp->lstate == link_up) { + cas_set_link_modes(cp); + netif_carrier_on(cp->dev); + } +} + +/* Must be invoked under cp->lock. on earlier cassini boards, + * SOFT_0 is tied to PCI reset. we use this to force a pci reset, + * let it settle out, and then restore pci state. + */ +static void cas_hard_reset(struct cas *cp) +{ + writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN); + udelay(20); + pci_restore_state(cp->pdev); +} + + +static void cas_global_reset(struct cas *cp, int blkflag) +{ + int limit; + + /* issue a global reset. don't use RSTOUT. */ + if (blkflag && !CAS_PHY_MII(cp->phy_type)) { + /* For PCS, when the blkflag is set, we should set the + * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of + * the last autonegotiation from being cleared. We'll + * need some special handling if the chip is set into a + * loopback mode. + */ + writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK), + cp->regs + REG_SW_RESET); + } else { + writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET); + } + + /* need to wait at least 3ms before polling register */ + mdelay(3); + + limit = STOP_TRIES; + while (limit-- > 0) { + u32 val = readl(cp->regs + REG_SW_RESET); + if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0) + goto done; + udelay(10); + } + printk(KERN_ERR "%s: sw reset failed.\n", cp->dev->name); + +done: + /* enable various BIM interrupts */ + writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE | + BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG); + + /* clear out pci error status mask for handled errors. + * we don't deal with DMA counter overflows as they happen + * all the time. + */ + writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO | + PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE | + PCI_ERR_BIM_DMA_READ), cp->regs + + REG_PCI_ERR_STATUS_MASK); + + /* set up for MII by default to address mac rx reset timeout + * issue + */ + writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); +} + +static void cas_reset(struct cas *cp, int blkflag) +{ + u32 val; + + cas_mask_intr(cp); + cas_global_reset(cp, blkflag); + cas_mac_reset(cp); + cas_entropy_reset(cp); + + /* disable dma engines. */ + val = readl(cp->regs + REG_TX_CFG); + val &= ~TX_CFG_DMA_EN; + writel(val, cp->regs + REG_TX_CFG); + + val = readl(cp->regs + REG_RX_CFG); + val &= ~RX_CFG_DMA_EN; + writel(val, cp->regs + REG_RX_CFG); + + /* program header parser */ + if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) || + (CAS_HP_ALT_FIRMWARE == cas_prog_null)) { + cas_load_firmware(cp, CAS_HP_FIRMWARE); + } else { + cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE); + } + + /* clear out error registers */ + spin_lock(&cp->stat_lock[N_TX_RINGS]); + cas_clear_mac_err(cp); + spin_unlock(&cp->stat_lock[N_TX_RINGS]); +} + +/* Shut down the chip, must be called with pm_sem held. */ +static void cas_shutdown(struct cas *cp) +{ + unsigned long flags; + + /* Make us not-running to avoid timers respawning */ + cp->hw_running = 0; + + del_timer_sync(&cp->link_timer); + + /* Stop the reset task */ +#if 0 + while (atomic_read(&cp->reset_task_pending_mtu) || + atomic_read(&cp->reset_task_pending_spare) || + atomic_read(&cp->reset_task_pending_all)) + schedule(); + +#else + while (atomic_read(&cp->reset_task_pending)) + schedule(); +#endif + /* Actually stop the chip */ + cas_lock_all_save(cp, flags); + cas_reset(cp, 0); + if (cp->cas_flags & CAS_FLAG_SATURN) + cas_phy_powerdown(cp); + cas_unlock_all_restore(cp, flags); +} + +static int cas_change_mtu(struct net_device *dev, int new_mtu) +{ + struct cas *cp = netdev_priv(dev); + + if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU) + return -EINVAL; + + dev->mtu = new_mtu; + if (!netif_running(dev) || !netif_device_present(dev)) + return 0; + + /* let the reset task handle it */ +#if 1 + atomic_inc(&cp->reset_task_pending); + if ((cp->phy_type & CAS_PHY_SERDES)) { + atomic_inc(&cp->reset_task_pending_all); + } else { + atomic_inc(&cp->reset_task_pending_mtu); + } + schedule_work(&cp->reset_task); +#else + atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ? + CAS_RESET_ALL : CAS_RESET_MTU); + printk(KERN_ERR "reset called in cas_change_mtu\n"); + schedule_work(&cp->reset_task); +#endif + + flush_scheduled_work(); + return 0; +} + +static void cas_clean_txd(struct cas *cp, int ring) +{ + struct cas_tx_desc *txd = cp->init_txds[ring]; + struct sk_buff *skb, **skbs = cp->tx_skbs[ring]; + u64 daddr, dlen; + int i, size; + + size = TX_DESC_RINGN_SIZE(ring); + for (i = 0; i < size; i++) { + int frag; + + if (skbs[i] == NULL) + continue; + + skb = skbs[i]; + skbs[i] = NULL; + + for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { + int ent = i & (size - 1); + + /* first buffer is never a tiny buffer and so + * needs to be unmapped. + */ + daddr = le64_to_cpu(txd[ent].buffer); + dlen = CAS_VAL(TX_DESC_BUFLEN, + le64_to_cpu(txd[ent].control)); + pci_unmap_page(cp->pdev, daddr, dlen, + PCI_DMA_TODEVICE); + + if (frag != skb_shinfo(skb)->nr_frags) { + i++; + + /* next buffer might by a tiny buffer. + * skip past it. + */ + ent = i & (size - 1); + if (cp->tx_tiny_use[ring][ent].used) + i++; + } + } + dev_kfree_skb_any(skb); + } + + /* zero out tiny buf usage */ + memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring])); +} + +/* freed on close */ +static inline void cas_free_rx_desc(struct cas *cp, int ring) +{ + cas_page_t **page = cp->rx_pages[ring]; + int i, size; + + size = RX_DESC_RINGN_SIZE(ring); + for (i = 0; i < size; i++) { + if (page[i]) { + cas_page_free(cp, page[i]); + page[i] = NULL; + } + } +} + +static void cas_free_rxds(struct cas *cp) +{ + int i; + + for (i = 0; i < N_RX_DESC_RINGS; i++) + cas_free_rx_desc(cp, i); +} + +/* Must be invoked under cp->lock. */ +static void cas_clean_rings(struct cas *cp) +{ + int i; + + /* need to clean all tx rings */ + memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS); + memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS); + for (i = 0; i < N_TX_RINGS; i++) + cas_clean_txd(cp, i); + + /* zero out init block */ + memset(cp->init_block, 0, sizeof(struct cas_init_block)); + cas_clean_rxds(cp); + cas_clean_rxcs(cp); +} + +/* allocated on open */ +static inline int cas_alloc_rx_desc(struct cas *cp, int ring) +{ + cas_page_t **page = cp->rx_pages[ring]; + int size, i = 0; + + size = RX_DESC_RINGN_SIZE(ring); + for (i = 0; i < size; i++) { + if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL) + return -1; + } + return 0; +} + +static int cas_alloc_rxds(struct cas *cp) +{ + int i; + + for (i = 0; i < N_RX_DESC_RINGS; i++) { + if (cas_alloc_rx_desc(cp, i) < 0) { + cas_free_rxds(cp); + return -1; + } + } + return 0; +} + +static void cas_reset_task(void *data) +{ + struct cas *cp = (struct cas *) data; +#if 0 + int pending = atomic_read(&cp->reset_task_pending); +#else + int pending_all = atomic_read(&cp->reset_task_pending_all); + int pending_spare = atomic_read(&cp->reset_task_pending_spare); + int pending_mtu = atomic_read(&cp->reset_task_pending_mtu); + + if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) { + /* We can have more tasks scheduled than actually + * needed. + */ + atomic_dec(&cp->reset_task_pending); + return; + } +#endif + /* The link went down, we reset the ring, but keep + * DMA stopped. Use this function for reset + * on error as well. + */ + if (cp->hw_running) { + unsigned long flags; + + /* Make sure we don't get interrupts or tx packets */ + netif_device_detach(cp->dev); + cas_lock_all_save(cp, flags); + + if (cp->opened) { + /* We call cas_spare_recover when we call cas_open. + * but we do not initialize the lists cas_spare_recover + * uses until cas_open is called. + */ + cas_spare_recover(cp, GFP_ATOMIC); + } +#if 1 + /* test => only pending_spare set */ + if (!pending_all && !pending_mtu) + goto done; +#else + if (pending == CAS_RESET_SPARE) + goto done; +#endif + /* when pending == CAS_RESET_ALL, the following + * call to cas_init_hw will restart auto negotiation. + * Setting the second argument of cas_reset to + * !(pending == CAS_RESET_ALL) will set this argument + * to 1 (avoiding reinitializing the PHY for the normal + * PCS case) when auto negotiation is not restarted. + */ +#if 1 + cas_reset(cp, !(pending_all > 0)); + if (cp->opened) + cas_clean_rings(cp); + cas_init_hw(cp, (pending_all > 0)); +#else + cas_reset(cp, !(pending == CAS_RESET_ALL)); + if (cp->opened) + cas_clean_rings(cp); + cas_init_hw(cp, pending == CAS_RESET_ALL); +#endif + +done: + cas_unlock_all_restore(cp, flags); + netif_device_attach(cp->dev); + } +#if 1 + atomic_sub(pending_all, &cp->reset_task_pending_all); + atomic_sub(pending_spare, &cp->reset_task_pending_spare); + atomic_sub(pending_mtu, &cp->reset_task_pending_mtu); + atomic_dec(&cp->reset_task_pending); +#else + atomic_set(&cp->reset_task_pending, 0); +#endif +} + +static void cas_link_timer(unsigned long data) +{ + struct cas *cp = (struct cas *) data; + int mask, pending = 0, reset = 0; + unsigned long flags; + + if (link_transition_timeout != 0 && + cp->link_transition_jiffies_valid && + ((jiffies - cp->link_transition_jiffies) > + (link_transition_timeout))) { + /* One-second counter so link-down workaround doesn't + * cause resets to occur so fast as to fool the switch + * into thinking the link is down. + */ + cp->link_transition_jiffies_valid = 0; + } + + if (!cp->hw_running) + return; + + spin_lock_irqsave(&cp->lock, flags); + cas_lock_tx(cp); + cas_entropy_gather(cp); + + /* If the link task is still pending, we just + * reschedule the link timer + */ +#if 1 + if (atomic_read(&cp->reset_task_pending_all) || + atomic_read(&cp->reset_task_pending_spare) || + atomic_read(&cp->reset_task_pending_mtu)) + goto done; +#else + if (atomic_read(&cp->reset_task_pending)) + goto done; +#endif + + /* check for rx cleaning */ + if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) { + int i, rmask; + + for (i = 0; i < MAX_RX_DESC_RINGS; i++) { + rmask = CAS_FLAG_RXD_POST(i); + if ((mask & rmask) == 0) + continue; + + /* post_rxds will do a mod_timer */ + if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) { + pending = 1; + continue; + } + cp->cas_flags &= ~rmask; + } + } + + if (CAS_PHY_MII(cp->phy_type)) { + u16 bmsr; + cas_mif_poll(cp, 0); + bmsr = cas_phy_read(cp, MII_BMSR); + /* WTZ: Solaris driver reads this twice, but that + * may be due to the PCS case and the use of a + * common implementation. Read it twice here to be + * safe. + */ + bmsr = cas_phy_read(cp, MII_BMSR); + cas_mif_poll(cp, 1); + readl(cp->regs + REG_MIF_STATUS); /* avoid dups */ + reset = cas_mii_link_check(cp, bmsr); + } else { + reset = cas_pcs_link_check(cp); + } + + if (reset) + goto done; + + /* check for tx state machine confusion */ + if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) { + u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE); + u32 wptr, rptr; + int tlm = CAS_VAL(MAC_SM_TLM, val); + + if (((tlm == 0x5) || (tlm == 0x3)) && + (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) { + if (netif_msg_tx_err(cp)) + printk(KERN_DEBUG "%s: tx err: " + "MAC_STATE[%08x]\n", + cp->dev->name, val); + reset = 1; + goto done; + } + + val = readl(cp->regs + REG_TX_FIFO_PKT_CNT); + wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR); + rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR); + if ((val == 0) && (wptr != rptr)) { + if (netif_msg_tx_err(cp)) + printk(KERN_DEBUG "%s: tx err: " + "TX_FIFO[%08x:%08x:%08x]\n", + cp->dev->name, val, wptr, rptr); + reset = 1; + } + + if (reset) + cas_hard_reset(cp); + } + +done: + if (reset) { +#if 1 + atomic_inc(&cp->reset_task_pending); + atomic_inc(&cp->reset_task_pending_all); + schedule_work(&cp->reset_task); +#else + atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); + printk(KERN_ERR "reset called in cas_link_timer\n"); + schedule_work(&cp->reset_task); +#endif + } + + if (!pending) + mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); + cas_unlock_tx(cp); + spin_unlock_irqrestore(&cp->lock, flags); +} + +/* tiny buffers are used to avoid target abort issues with + * older cassini's + */ +static void cas_tx_tiny_free(struct cas *cp) +{ + struct pci_dev *pdev = cp->pdev; + int i; + + for (i = 0; i < N_TX_RINGS; i++) { + if (!cp->tx_tiny_bufs[i]) + continue; + + pci_free_consistent(pdev, TX_TINY_BUF_BLOCK, + cp->tx_tiny_bufs[i], + cp->tx_tiny_dvma[i]); + cp->tx_tiny_bufs[i] = NULL; + } +} + +static int cas_tx_tiny_alloc(struct cas *cp) +{ + struct pci_dev *pdev = cp->pdev; + int i; + + for (i = 0; i < N_TX_RINGS; i++) { + cp->tx_tiny_bufs[i] = + pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK, + &cp->tx_tiny_dvma[i]); + if (!cp->tx_tiny_bufs[i]) { + cas_tx_tiny_free(cp); + return -1; + } + } + return 0; +} + + +static int cas_open(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + int hw_was_up, err; + unsigned long flags; + + down(&cp->pm_sem); + + hw_was_up = cp->hw_running; + + /* The power-management semaphore protects the hw_running + * etc. state so it is safe to do this bit without cp->lock + */ + if (!cp->hw_running) { + /* Reset the chip */ + cas_lock_all_save(cp, flags); + /* We set the second arg to cas_reset to zero + * because cas_init_hw below will have its second + * argument set to non-zero, which will force + * autonegotiation to start. + */ + cas_reset(cp, 0); + cp->hw_running = 1; + cas_unlock_all_restore(cp, flags); + } + + if (cas_tx_tiny_alloc(cp) < 0) + return -ENOMEM; + + /* alloc rx descriptors */ + err = -ENOMEM; + if (cas_alloc_rxds(cp) < 0) + goto err_tx_tiny; + + /* allocate spares */ + cas_spare_init(cp); + cas_spare_recover(cp, GFP_KERNEL); + + /* We can now request the interrupt as we know it's masked + * on the controller. cassini+ has up to 4 interrupts + * that can be used, but you need to do explicit pci interrupt + * mapping to expose them + */ + if (request_irq(cp->pdev->irq, cas_interrupt, + SA_SHIRQ, dev->name, (void *) dev)) { + printk(KERN_ERR "%s: failed to request irq !\n", + cp->dev->name); + err = -EAGAIN; + goto err_spare; + } + + /* init hw */ + cas_lock_all_save(cp, flags); + cas_clean_rings(cp); + cas_init_hw(cp, !hw_was_up); + cp->opened = 1; + cas_unlock_all_restore(cp, flags); + + netif_start_queue(dev); + up(&cp->pm_sem); + return 0; + +err_spare: + cas_spare_free(cp); + cas_free_rxds(cp); +err_tx_tiny: + cas_tx_tiny_free(cp); + up(&cp->pm_sem); + return err; +} + +static int cas_close(struct net_device *dev) +{ + unsigned long flags; + struct cas *cp = netdev_priv(dev); + + /* Make sure we don't get distracted by suspend/resume */ + down(&cp->pm_sem); + + netif_stop_queue(dev); + + /* Stop traffic, mark us closed */ + cas_lock_all_save(cp, flags); + cp->opened = 0; + cas_reset(cp, 0); + cas_phy_init(cp); + cas_begin_auto_negotiation(cp, NULL); + cas_clean_rings(cp); + cas_unlock_all_restore(cp, flags); + + free_irq(cp->pdev->irq, (void *) dev); + cas_spare_free(cp); + cas_free_rxds(cp); + cas_tx_tiny_free(cp); + up(&cp->pm_sem); + return 0; +} + +static struct { + const char name[ETH_GSTRING_LEN]; +} ethtool_cassini_statnames[] = { + {"collisions"}, + {"rx_bytes"}, + {"rx_crc_errors"}, + {"rx_dropped"}, + {"rx_errors"}, + {"rx_fifo_errors"}, + {"rx_frame_errors"}, + {"rx_length_errors"}, + {"rx_over_errors"}, + {"rx_packets"}, + {"tx_aborted_errors"}, + {"tx_bytes"}, + {"tx_dropped"}, + {"tx_errors"}, + {"tx_fifo_errors"}, + {"tx_packets"} +}; +#define CAS_NUM_STAT_KEYS (sizeof(ethtool_cassini_statnames)/ETH_GSTRING_LEN) + +static struct { + const int offsets; /* neg. values for 2nd arg to cas_read_phy */ +} ethtool_register_table[] = { + {-MII_BMSR}, + {-MII_BMCR}, + {REG_CAWR}, + {REG_INF_BURST}, + {REG_BIM_CFG}, + {REG_RX_CFG}, + {REG_HP_CFG}, + {REG_MAC_TX_CFG}, + {REG_MAC_RX_CFG}, + {REG_MAC_CTRL_CFG}, + {REG_MAC_XIF_CFG}, + {REG_MIF_CFG}, + {REG_PCS_CFG}, + {REG_SATURN_PCFG}, + {REG_PCS_MII_STATUS}, + {REG_PCS_STATE_MACHINE}, + {REG_MAC_COLL_EXCESS}, + {REG_MAC_COLL_LATE} +}; +#define CAS_REG_LEN (sizeof(ethtool_register_table)/sizeof(int)) +#define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN) + +static void cas_read_regs(struct cas *cp, u8 *ptr, int len) +{ + u8 *p; + int i; + unsigned long flags; + + spin_lock_irqsave(&cp->lock, flags); + for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) { + u16 hval; + u32 val; + if (ethtool_register_table[i].offsets < 0) { + hval = cas_phy_read(cp, + -ethtool_register_table[i].offsets); + val = hval; + } else { + val= readl(cp->regs+ethtool_register_table[i].offsets); + } + memcpy(p, (u8 *)&val, sizeof(u32)); + } + spin_unlock_irqrestore(&cp->lock, flags); +} + +static struct net_device_stats *cas_get_stats(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + struct net_device_stats *stats = cp->net_stats; + unsigned long flags; + int i; + unsigned long tmp; + + /* we collate all of the stats into net_stats[N_TX_RING] */ + if (!cp->hw_running) + return stats + N_TX_RINGS; + + /* collect outstanding stats */ + /* WTZ: the Cassini spec gives these as 16 bit counters but + * stored in 32-bit words. Added a mask of 0xffff to be safe, + * in case the chip somehow puts any garbage in the other bits. + * Also, counter usage didn't seem to mach what Adrian did + * in the parts of the code that set these quantities. Made + * that consistent. + */ + spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags); + stats[N_TX_RINGS].rx_crc_errors += + readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff; + stats[N_TX_RINGS].rx_frame_errors += + readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff; + stats[N_TX_RINGS].rx_length_errors += + readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff; +#if 1 + tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) + + (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff); + stats[N_TX_RINGS].tx_aborted_errors += tmp; + stats[N_TX_RINGS].collisions += + tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff); +#else + stats[N_TX_RINGS].tx_aborted_errors += + readl(cp->regs + REG_MAC_COLL_EXCESS); + stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) + + readl(cp->regs + REG_MAC_COLL_LATE); +#endif + cas_clear_mac_err(cp); + + /* saved bits that are unique to ring 0 */ + spin_lock(&cp->stat_lock[0]); + stats[N_TX_RINGS].collisions += stats[0].collisions; + stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors; + stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors; + stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors; + stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors; + stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors; + spin_unlock(&cp->stat_lock[0]); + + for (i = 0; i < N_TX_RINGS; i++) { + spin_lock(&cp->stat_lock[i]); + stats[N_TX_RINGS].rx_length_errors += + stats[i].rx_length_errors; + stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors; + stats[N_TX_RINGS].rx_packets += stats[i].rx_packets; + stats[N_TX_RINGS].tx_packets += stats[i].tx_packets; + stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes; + stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes; + stats[N_TX_RINGS].rx_errors += stats[i].rx_errors; + stats[N_TX_RINGS].tx_errors += stats[i].tx_errors; + stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped; + stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped; + memset(stats + i, 0, sizeof(struct net_device_stats)); + spin_unlock(&cp->stat_lock[i]); + } + spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags); + return stats + N_TX_RINGS; +} + + +static void cas_set_multicast(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + u32 rxcfg, rxcfg_new; + unsigned long flags; + int limit = STOP_TRIES; + + if (!cp->hw_running) + return; + + spin_lock_irqsave(&cp->lock, flags); + rxcfg = readl(cp->regs + REG_MAC_RX_CFG); + + /* disable RX MAC and wait for completion */ + writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); + while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) { + if (!limit--) + break; + udelay(10); + } + + /* disable hash filter and wait for completion */ + limit = STOP_TRIES; + rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN); + writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); + while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) { + if (!limit--) + break; + udelay(10); + } + + /* program hash filters */ + cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp); + rxcfg |= rxcfg_new; + writel(rxcfg, cp->regs + REG_MAC_RX_CFG); + spin_unlock_irqrestore(&cp->lock, flags); +} + +static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct cas *cp = netdev_priv(dev); + strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN); + strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN); + info->fw_version[0] = '\0'; + strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN); + info->regdump_len = cp->casreg_len < CAS_MAX_REGS ? + cp->casreg_len : CAS_MAX_REGS; + info->n_stats = CAS_NUM_STAT_KEYS; +} + +static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct cas *cp = netdev_priv(dev); + u16 bmcr; + int full_duplex, speed, pause; + unsigned long flags; + enum link_state linkstate = link_up; + + cmd->advertising = 0; + cmd->supported = SUPPORTED_Autoneg; + if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { + cmd->supported |= SUPPORTED_1000baseT_Full; + cmd->advertising |= ADVERTISED_1000baseT_Full; + } + + /* Record PHY settings if HW is on. */ + spin_lock_irqsave(&cp->lock, flags); + bmcr = 0; + linkstate = cp->lstate; + if (CAS_PHY_MII(cp->phy_type)) { + cmd->port = PORT_MII; + cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ? + XCVR_INTERNAL : XCVR_EXTERNAL; + cmd->phy_address = cp->phy_addr; + cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full; + + cmd->supported |= + (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_TP | SUPPORTED_MII); + + if (cp->hw_running) { + cas_mif_poll(cp, 0); + bmcr = cas_phy_read(cp, MII_BMCR); + cas_read_mii_link_mode(cp, &full_duplex, + &speed, &pause); + cas_mif_poll(cp, 1); + } + + } else { + cmd->port = PORT_FIBRE; + cmd->transceiver = XCVR_INTERNAL; + cmd->phy_address = 0; + cmd->supported |= SUPPORTED_FIBRE; + cmd->advertising |= ADVERTISED_FIBRE; + + if (cp->hw_running) { + /* pcs uses the same bits as mii */ + bmcr = readl(cp->regs + REG_PCS_MII_CTRL); + cas_read_pcs_link_mode(cp, &full_duplex, + &speed, &pause); + } + } + spin_unlock_irqrestore(&cp->lock, flags); + + if (bmcr & BMCR_ANENABLE) { + cmd->advertising |= ADVERTISED_Autoneg; + cmd->autoneg = AUTONEG_ENABLE; + cmd->speed = ((speed == 10) ? + SPEED_10 : + ((speed == 1000) ? + SPEED_1000 : SPEED_100)); + cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF; + } else { + cmd->autoneg = AUTONEG_DISABLE; + cmd->speed = + (bmcr & CAS_BMCR_SPEED1000) ? + SPEED_1000 : + ((bmcr & BMCR_SPEED100) ? SPEED_100: + SPEED_10); + cmd->duplex = + (bmcr & BMCR_FULLDPLX) ? + DUPLEX_FULL : DUPLEX_HALF; + } + if (linkstate != link_up) { + /* Force these to "unknown" if the link is not up and + * autonogotiation in enabled. We can set the link + * speed to 0, but not cmd->duplex, + * because its legal values are 0 and 1. Ethtool will + * print the value reported in parentheses after the + * word "Unknown" for unrecognized values. + * + * If in forced mode, we report the speed and duplex + * settings that we configured. + */ + if (cp->link_cntl & BMCR_ANENABLE) { + cmd->speed = 0; + cmd->duplex = 0xff; + } else { + cmd->speed = SPEED_10; + if (cp->link_cntl & BMCR_SPEED100) { + cmd->speed = SPEED_100; + } else if (cp->link_cntl & CAS_BMCR_SPEED1000) { + cmd->speed = SPEED_1000; + } + cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)? + DUPLEX_FULL : DUPLEX_HALF; + } + } + return 0; +} + +static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct cas *cp = netdev_priv(dev); + unsigned long flags; + + /* Verify the settings we care about. */ + if (cmd->autoneg != AUTONEG_ENABLE && + cmd->autoneg != AUTONEG_DISABLE) + return -EINVAL; + + if (cmd->autoneg == AUTONEG_DISABLE && + ((cmd->speed != SPEED_1000 && + cmd->speed != SPEED_100 && + cmd->speed != SPEED_10) || + (cmd->duplex != DUPLEX_HALF && + cmd->duplex != DUPLEX_FULL))) + return -EINVAL; + + /* Apply settings and restart link process. */ + spin_lock_irqsave(&cp->lock, flags); + cas_begin_auto_negotiation(cp, cmd); + spin_unlock_irqrestore(&cp->lock, flags); + return 0; +} + +static int cas_nway_reset(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + unsigned long flags; + + if ((cp->link_cntl & BMCR_ANENABLE) == 0) + return -EINVAL; + + /* Restart link process. */ + spin_lock_irqsave(&cp->lock, flags); + cas_begin_auto_negotiation(cp, NULL); + spin_unlock_irqrestore(&cp->lock, flags); + + return 0; +} + +static u32 cas_get_link(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + return cp->lstate == link_up; +} + +static u32 cas_get_msglevel(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + return cp->msg_enable; +} + +static void cas_set_msglevel(struct net_device *dev, u32 value) +{ + struct cas *cp = netdev_priv(dev); + cp->msg_enable = value; +} + +static int cas_get_regs_len(struct net_device *dev) +{ + struct cas *cp = netdev_priv(dev); + return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS; +} + +static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct cas *cp = netdev_priv(dev); + regs->version = 0; + /* cas_read_regs handles locks (cp->lock). */ + cas_read_regs(cp, p, regs->len / sizeof(u32)); +} + +static int cas_get_stats_count(struct net_device *dev) +{ + return CAS_NUM_STAT_KEYS; +} + +static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + memcpy(data, ðtool_cassini_statnames, + CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN); +} + +static void cas_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *estats, u64 *data) +{ + struct cas *cp = netdev_priv(dev); + struct net_device_stats *stats = cas_get_stats(cp->dev); + int i = 0; + data[i++] = stats->collisions; + data[i++] = stats->rx_bytes; + data[i++] = stats->rx_crc_errors; + data[i++] = stats->rx_dropped; + data[i++] = stats->rx_errors; + data[i++] = stats->rx_fifo_errors; + data[i++] = stats->rx_frame_errors; + data[i++] = stats->rx_length_errors; + data[i++] = stats->rx_over_errors; + data[i++] = stats->rx_packets; + data[i++] = stats->tx_aborted_errors; + data[i++] = stats->tx_bytes; + data[i++] = stats->tx_dropped; + data[i++] = stats->tx_errors; + data[i++] = stats->tx_fifo_errors; + data[i++] = stats->tx_packets; + BUG_ON(i != CAS_NUM_STAT_KEYS); +} + +static struct ethtool_ops cas_ethtool_ops = { + .get_drvinfo = cas_get_drvinfo, + .get_settings = cas_get_settings, + .set_settings = cas_set_settings, + .nway_reset = cas_nway_reset, + .get_link = cas_get_link, + .get_msglevel = cas_get_msglevel, + .set_msglevel = cas_set_msglevel, + .get_regs_len = cas_get_regs_len, + .get_regs = cas_get_regs, + .get_stats_count = cas_get_stats_count, + .get_strings = cas_get_strings, + .get_ethtool_stats = cas_get_ethtool_stats, +}; + +static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct cas *cp = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(ifr); + unsigned long flags; + int rc = -EOPNOTSUPP; + + /* Hold the PM semaphore while doing ioctl's or we may collide + * with open/close and power management and oops. + */ + down(&cp->pm_sem); + switch (cmd) { + case SIOCGMIIPHY: /* Get address of MII PHY in use. */ + data->phy_id = cp->phy_addr; + /* Fallthrough... */ + + case SIOCGMIIREG: /* Read MII PHY register. */ + spin_lock_irqsave(&cp->lock, flags); + cas_mif_poll(cp, 0); + data->val_out = cas_phy_read(cp, data->reg_num & 0x1f); + cas_mif_poll(cp, 1); + spin_unlock_irqrestore(&cp->lock, flags); + rc = 0; + break; + + case SIOCSMIIREG: /* Write MII PHY register. */ + if (!capable(CAP_NET_ADMIN)) { + rc = -EPERM; + break; + } + spin_lock_irqsave(&cp->lock, flags); + cas_mif_poll(cp, 0); + rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in); + cas_mif_poll(cp, 1); + spin_unlock_irqrestore(&cp->lock, flags); + break; + default: + break; + }; + + up(&cp->pm_sem); + return rc; +} + +static int __devinit cas_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + static int cas_version_printed = 0; + unsigned long casreg_base, casreg_len; + struct net_device *dev; + struct cas *cp; + int i, err, pci_using_dac; + u16 pci_cmd; + u8 orig_cacheline_size = 0, cas_cacheline_size = 0; + + if (cas_version_printed++ == 0) + printk(KERN_INFO "%s", version); + + err = pci_enable_device(pdev); + if (err) { + printk(KERN_ERR PFX "Cannot enable PCI device, " + "aborting.\n"); + return err; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + printk(KERN_ERR PFX "Cannot find proper PCI device " + "base address, aborting.\n"); + err = -ENODEV; + goto err_out_disable_pdev; + } + + dev = alloc_etherdev(sizeof(*cp)); + if (!dev) { + printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); + err = -ENOMEM; + goto err_out_disable_pdev; + } + SET_MODULE_OWNER(dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + err = pci_request_regions(pdev, dev->name); + if (err) { + printk(KERN_ERR PFX "Cannot obtain PCI resources, " + "aborting.\n"); + goto err_out_free_netdev; + } + pci_set_master(pdev); + + /* we must always turn on parity response or else parity + * doesn't get generated properly. disable SERR/PERR as well. + * in addition, we want to turn MWI on. + */ + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + pci_cmd &= ~PCI_COMMAND_SERR; + pci_cmd |= PCI_COMMAND_PARITY; + pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); + pci_set_mwi(pdev); + /* + * On some architectures, the default cache line size set + * by pci_set_mwi reduces perforamnce. We have to increase + * it for this case. To start, we'll print some configuration + * data. + */ +#if 1 + pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, + &orig_cacheline_size); + if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) { + cas_cacheline_size = + (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ? + CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES; + if (pci_write_config_byte(pdev, + PCI_CACHE_LINE_SIZE, + cas_cacheline_size)) { + printk(KERN_ERR PFX "Could not set PCI cache " + "line size\n"); + goto err_write_cacheline; + } + } +#endif + + + /* Configure DMA attributes. */ + if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { + pci_using_dac = 1; + err = pci_set_consistent_dma_mask(pdev, + DMA_64BIT_MASK); + if (err < 0) { + printk(KERN_ERR PFX "Unable to obtain 64-bit DMA " + "for consistent allocations\n"); + goto err_out_free_res; + } + + } else { + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + printk(KERN_ERR PFX "No usable DMA configuration, " + "aborting.\n"); + goto err_out_free_res; + } + pci_using_dac = 0; + } + + casreg_base = pci_resource_start(pdev, 0); + casreg_len = pci_resource_len(pdev, 0); + + cp = netdev_priv(dev); + cp->pdev = pdev; +#if 1 + /* A value of 0 indicates we never explicitly set it */ + cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0; +#endif + cp->dev = dev; + cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : + cassini_debug; + + cp->link_transition = LINK_TRANSITION_UNKNOWN; + cp->link_transition_jiffies_valid = 0; + + spin_lock_init(&cp->lock); + spin_lock_init(&cp->rx_inuse_lock); + spin_lock_init(&cp->rx_spare_lock); + for (i = 0; i < N_TX_RINGS; i++) { + spin_lock_init(&cp->stat_lock[i]); + spin_lock_init(&cp->tx_lock[i]); + } + spin_lock_init(&cp->stat_lock[N_TX_RINGS]); + init_MUTEX(&cp->pm_sem); + + init_timer(&cp->link_timer); + cp->link_timer.function = cas_link_timer; + cp->link_timer.data = (unsigned long) cp; + +#if 1 + /* Just in case the implementation of atomic operations + * change so that an explicit initialization is necessary. + */ + atomic_set(&cp->reset_task_pending, 0); + atomic_set(&cp->reset_task_pending_all, 0); + atomic_set(&cp->reset_task_pending_spare, 0); + atomic_set(&cp->reset_task_pending_mtu, 0); +#endif + INIT_WORK(&cp->reset_task, cas_reset_task, cp); + + /* Default link parameters */ + if (link_mode >= 0 && link_mode <= 6) + cp->link_cntl = link_modes[link_mode]; + else + cp->link_cntl = BMCR_ANENABLE; + cp->lstate = link_down; + cp->link_transition = LINK_TRANSITION_LINK_DOWN; + netif_carrier_off(cp->dev); + cp->timer_ticks = 0; + + /* give us access to cassini registers */ + cp->regs = ioremap(casreg_base, casreg_len); + if (cp->regs == 0UL) { + printk(KERN_ERR PFX "Cannot map device registers, " + "aborting.\n"); + goto err_out_free_res; + } + cp->casreg_len = casreg_len; + + pci_save_state(pdev); + cas_check_pci_invariants(cp); + cas_hard_reset(cp); + cas_reset(cp, 0); + if (cas_check_invariants(cp)) + goto err_out_iounmap; + + cp->init_block = (struct cas_init_block *) + pci_alloc_consistent(pdev, sizeof(struct cas_init_block), + &cp->block_dvma); + if (!cp->init_block) { + printk(KERN_ERR PFX "Cannot allocate init block, " + "aborting.\n"); + goto err_out_iounmap; + } + + for (i = 0; i < N_TX_RINGS; i++) + cp->init_txds[i] = cp->init_block->txds[i]; + + for (i = 0; i < N_RX_DESC_RINGS; i++) + cp->init_rxds[i] = cp->init_block->rxds[i]; + + for (i = 0; i < N_RX_COMP_RINGS; i++) + cp->init_rxcs[i] = cp->init_block->rxcs[i]; + + for (i = 0; i < N_RX_FLOWS; i++) + skb_queue_head_init(&cp->rx_flows[i]); + + dev->open = cas_open; + dev->stop = cas_close; + dev->hard_start_xmit = cas_start_xmit; + dev->get_stats = cas_get_stats; + dev->set_multicast_list = cas_set_multicast; + dev->do_ioctl = cas_ioctl; + dev->ethtool_ops = &cas_ethtool_ops; + dev->tx_timeout = cas_tx_timeout; + dev->watchdog_timeo = CAS_TX_TIMEOUT; + dev->change_mtu = cas_change_mtu; +#ifdef USE_NAPI + dev->poll = cas_poll; + dev->weight = 64; +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = cas_netpoll; +#endif + dev->irq = pdev->irq; + dev->dma = 0; + + /* Cassini features. */ + if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0) + dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; + + if (pci_using_dac) + dev->features |= NETIF_F_HIGHDMA; + + if (register_netdev(dev)) { + printk(KERN_ERR PFX "Cannot register net device, " + "aborting.\n"); + goto err_out_free_consistent; + } + + i = readl(cp->regs + REG_BIM_CFG); + printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) " + "Ethernet[%d] ", dev->name, + (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", + (i & BIM_CFG_32BIT) ? "32" : "64", + (i & BIM_CFG_66MHZ) ? "66" : "33", + (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq); + + for (i = 0; i < 6; i++) + printk("%2.2x%c", dev->dev_addr[i], + i == 5 ? ' ' : ':'); + printk("\n"); + + pci_set_drvdata(pdev, dev); + cp->hw_running = 1; + cas_entropy_reset(cp); + cas_phy_init(cp); + cas_begin_auto_negotiation(cp, NULL); + return 0; + +err_out_free_consistent: + pci_free_consistent(pdev, sizeof(struct cas_init_block), + cp->init_block, cp->block_dvma); + +err_out_iounmap: + down(&cp->pm_sem); + if (cp->hw_running) + cas_shutdown(cp); + up(&cp->pm_sem); + + iounmap(cp->regs); + + +err_out_free_res: + pci_release_regions(pdev); + +err_write_cacheline: + /* Try to restore it in case the error occured after we + * set it. + */ + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size); + +err_out_free_netdev: + free_netdev(dev); + +err_out_disable_pdev: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + return -ENODEV; +} + +static void __devexit cas_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct cas *cp; + if (!dev) + return; + + cp = netdev_priv(dev); + unregister_netdev(dev); + + down(&cp->pm_sem); + flush_scheduled_work(); + if (cp->hw_running) + cas_shutdown(cp); + up(&cp->pm_sem); + +#if 1 + if (cp->orig_cacheline_size) { + /* Restore the cache line size if we had modified + * it. + */ + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, + cp->orig_cacheline_size); + } +#endif + pci_free_consistent(pdev, sizeof(struct cas_init_block), + cp->init_block, cp->block_dvma); + iounmap(cp->regs); + free_netdev(dev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +#ifdef CONFIG_PM +static int cas_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct cas *cp = netdev_priv(dev); + unsigned long flags; + + /* We hold the PM semaphore during entire driver + * sleep time + */ + down(&cp->pm_sem); + + /* If the driver is opened, we stop the DMA */ + if (cp->opened) { + netif_device_detach(dev); + + cas_lock_all_save(cp, flags); + + /* We can set the second arg of cas_reset to 0 + * because on resume, we'll call cas_init_hw with + * its second arg set so that autonegotiation is + * restarted. + */ + cas_reset(cp, 0); + cas_clean_rings(cp); + cas_unlock_all_restore(cp, flags); + } + + if (cp->hw_running) + cas_shutdown(cp); + + return 0; +} + +static int cas_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct cas *cp = netdev_priv(dev); + + printk(KERN_INFO "%s: resuming\n", dev->name); + + cas_hard_reset(cp); + if (cp->opened) { + unsigned long flags; + cas_lock_all_save(cp, flags); + cas_reset(cp, 0); + cp->hw_running = 1; + cas_clean_rings(cp); + cas_init_hw(cp, 1); + cas_unlock_all_restore(cp, flags); + + netif_device_attach(dev); + } + up(&cp->pm_sem); + return 0; +} +#endif /* CONFIG_PM */ + +static struct pci_driver cas_driver = { + .name = DRV_MODULE_NAME, + .id_table = cas_pci_tbl, + .probe = cas_init_one, + .remove = __devexit_p(cas_remove_one), +#ifdef CONFIG_PM + .suspend = cas_suspend, + .resume = cas_resume +#endif +}; + +static int __init cas_init(void) +{ + if (linkdown_timeout > 0) + link_transition_timeout = linkdown_timeout * HZ; + else + link_transition_timeout = 0; + + return pci_module_init(&cas_driver); +} + +static void __exit cas_cleanup(void) +{ + pci_unregister_driver(&cas_driver); +} + +module_init(cas_init); +module_exit(cas_cleanup); diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h new file mode 100644 index 000000000000..88063ef16cf6 --- /dev/null +++ b/drivers/net/cassini.h @@ -0,0 +1,4425 @@ +/* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $ + * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver. + * + * Copyright (C) 2004 Sun Microsystems Inc. + * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + * + * vendor id: 0x108E (Sun Microsystems, Inc.) + * device id: 0xabba (Cassini) + * revision ids: 0x01 = Cassini + * 0x02 = Cassini rev 2 + * 0x10 = Cassini+ + * 0x11 = Cassini+ 0.2u + * + * vendor id: 0x100b (National Semiconductor) + * device id: 0x0035 (DP83065/Saturn) + * revision ids: 0x30 = Saturn B2 + * + * rings are all offset from 0. + * + * there are two clock domains: + * PCI: 33/66MHz clock + * chip: 125MHz clock + */ + +#ifndef _CASSINI_H +#define _CASSINI_H + +/* cassini register map: 2M memory mapped in 32-bit memory space accessible as + * 32-bit words. there is no i/o port access. REG_ addresses are + * shared between cassini and cassini+. REG_PLUS_ addresses only + * appear in cassini+. REG_MINUS_ addresses only appear in cassini. + */ +#define CAS_ID_REV2 0x02 +#define CAS_ID_REVPLUS 0x10 +#define CAS_ID_REVPLUS02u 0x11 +#define CAS_ID_REVSATURNB2 0x30 + +/** global resources **/ + +/* this register sets the weights for the weighted round robin arbiter. e.g., + * if rx weight == 1 and tx weight == 0, rx == 2x tx transfer credit + * for its next turn to access the pci bus. + * map: 0x0 = x1, 0x1 = x2, 0x2 = x4, 0x3 = x8 + * DEFAULT: 0x0, SIZE: 5 bits + */ +#define REG_CAWR 0x0004 /* core arbitration weight */ +#define CAWR_RX_DMA_WEIGHT_SHIFT 0 +#define CAWR_RX_DMA_WEIGHT_MASK 0x03 /* [0:1] */ +#define CAWR_TX_DMA_WEIGHT_SHIFT 2 +#define CAWR_TX_DMA_WEIGHT_MASK 0x0C /* [3:2] */ +#define CAWR_RR_DIS 0x10 /* [4] */ + +/* if enabled, BIM can send bursts across PCI bus > cacheline size. burst + * sizes determined by length of packet or descriptor transfer and the + * max length allowed by the target. + * DEFAULT: 0x0, SIZE: 1 bit + */ +#define REG_INF_BURST 0x0008 /* infinite burst enable reg */ +#define INF_BURST_EN 0x1 /* enable */ + +/* top level interrupts [0-9] are auto-cleared to 0 when the status + * register is read. second level interrupts [13 - 18] are cleared at + * the source. tx completion register 3 is replicated in [19 - 31] + * DEFAULT: 0x00000000, SIZE: 29 bits + */ +#define REG_INTR_STATUS 0x000C /* interrupt status register */ +#define INTR_TX_INTME 0x00000001 /* frame w/ INT ME desc bit set + xferred from host queue to + TX FIFO */ +#define INTR_TX_ALL 0x00000002 /* all xmit frames xferred into + TX FIFO. i.e., + TX Kick == TX complete. if + PACED_MODE set, then TX FIFO + also empty */ +#define INTR_TX_DONE 0x00000004 /* any frame xferred into tx + FIFO */ +#define INTR_TX_TAG_ERROR 0x00000008 /* TX FIFO tag framing + corrupted. FATAL ERROR */ +#define INTR_RX_DONE 0x00000010 /* at least 1 frame xferred + from RX FIFO to host mem. + RX completion reg updated. + may be delayed by recv + intr blanking. */ +#define INTR_RX_BUF_UNAVAIL 0x00000020 /* no more receive buffers. + RX Kick == RX complete */ +#define INTR_RX_TAG_ERROR 0x00000040 /* RX FIFO tag framing + corrupted. FATAL ERROR */ +#define INTR_RX_COMP_FULL 0x00000080 /* no more room in completion + ring to post descriptors. + RX complete head incr to + almost reach RX complete + tail */ +#define INTR_RX_BUF_AE 0x00000100 /* less than the + programmable threshold # + of free descr avail for + hw use */ +#define INTR_RX_COMP_AF 0x00000200 /* less than the + programmable threshold # + of descr spaces for hw + use in completion descr + ring */ +#define INTR_RX_LEN_MISMATCH 0x00000400 /* len field from MAC != + len of non-reassembly pkt + from fifo during DMA or + header parser provides TCP + header and payload size > + MAC packet size. + FATAL ERROR */ +#define INTR_SUMMARY 0x00001000 /* summary interrupt bit. this + bit will be set if an interrupt + generated on the pci bus. useful + when driver is polling for + interrupts */ +#define INTR_PCS_STATUS 0x00002000 /* PCS interrupt status register */ +#define INTR_TX_MAC_STATUS 0x00004000 /* TX MAC status register has at + least 1 unmasked interrupt set */ +#define INTR_RX_MAC_STATUS 0x00008000 /* RX MAC status register has at + least 1 unmasked interrupt set */ +#define INTR_MAC_CTRL_STATUS 0x00010000 /* MAC control status register has + at least 1 unmasked interrupt + set */ +#define INTR_MIF_STATUS 0x00020000 /* MIF status register has at least + 1 unmasked interrupt set */ +#define INTR_PCI_ERROR_STATUS 0x00040000 /* PCI error status register in the + BIF has at least 1 unmasked + interrupt set */ +#define INTR_TX_COMP_3_MASK 0xFFF80000 /* mask for TX completion + 3 reg data */ +#define INTR_TX_COMP_3_SHIFT 19 +#define INTR_ERROR_MASK (INTR_MIF_STATUS | INTR_PCI_ERROR_STATUS | \ + INTR_PCS_STATUS | INTR_RX_LEN_MISMATCH | \ + INTR_TX_MAC_STATUS | INTR_RX_MAC_STATUS | \ + INTR_TX_TAG_ERROR | INTR_RX_TAG_ERROR | \ + INTR_MAC_CTRL_STATUS) + +/* determines which status events will cause an interrupt. layout same + * as REG_INTR_STATUS. + * DEFAULT: 0xFFFFFFFF, SIZE: 16 bits + */ +#define REG_INTR_MASK 0x0010 /* Interrupt mask */ + +/* top level interrupt bits that are cleared during read of REG_INTR_STATUS_ALIAS. + * useful when driver is polling for interrupts. layout same as REG_INTR_MASK. + * DEFAULT: 0x00000000, SIZE: 12 bits + */ +#define REG_ALIAS_CLEAR 0x0014 /* alias clear mask + (used w/ status alias) */ +/* same as REG_INTR_STATUS except that only bits cleared are those selected by + * REG_ALIAS_CLEAR + * DEFAULT: 0x00000000, SIZE: 29 bits + */ +#define REG_INTR_STATUS_ALIAS 0x001C /* interrupt status alias + (selective clear) */ + +/* DEFAULT: 0x0, SIZE: 3 bits */ +#define REG_PCI_ERR_STATUS 0x1000 /* PCI error status */ +#define PCI_ERR_BADACK 0x01 /* reserved in Cassini+. + set if no ACK64# during ABS64 cycle + in Cassini. */ +#define PCI_ERR_DTRTO 0x02 /* delayed xaction timeout. set if + no read retry after 2^15 clocks */ +#define PCI_ERR_OTHER 0x04 /* other PCI errors */ +#define PCI_ERR_BIM_DMA_WRITE 0x08 /* BIM received 0 count DMA write req. + unused in Cassini. */ +#define PCI_ERR_BIM_DMA_READ 0x10 /* BIM received 0 count DMA read req. + unused in Cassini. */ +#define PCI_ERR_BIM_DMA_TIMEOUT 0x20 /* BIM received 255 retries during + DMA. unused in cassini. */ + +/* mask for PCI status events that will set PCI_ERR_STATUS. if cleared, event + * causes an interrupt to be generated. + * DEFAULT: 0x7, SIZE: 3 bits + */ +#define REG_PCI_ERR_STATUS_MASK 0x1004 /* PCI Error status mask */ + +/* used to configure PCI related parameters that are not in PCI config space. + * DEFAULT: 0bxx000, SIZE: 5 bits + */ +#define REG_BIM_CFG 0x1008 /* BIM Configuration */ +#define BIM_CFG_RESERVED0 0x001 /* reserved */ +#define BIM_CFG_RESERVED1 0x002 /* reserved */ +#define BIM_CFG_64BIT_DISABLE 0x004 /* disable 64-bit mode */ +#define BIM_CFG_66MHZ 0x008 /* (ro) 1 = 66MHz, 0 = < 66MHz */ +#define BIM_CFG_32BIT 0x010 /* (ro) 1 = 32-bit slot, 0 = 64-bit */ +#define BIM_CFG_DPAR_INTR_ENABLE 0x020 /* detected parity err enable */ +#define BIM_CFG_RMA_INTR_ENABLE 0x040 /* master abort intr enable */ +#define BIM_CFG_RTA_INTR_ENABLE 0x080 /* target abort intr enable */ +#define BIM_CFG_RESERVED2 0x100 /* reserved */ +#define BIM_CFG_BIM_DISABLE 0x200 /* stop BIM DMA. use before global + reset. reserved in Cassini. */ +#define BIM_CFG_BIM_STATUS 0x400 /* (ro) 1 = BIM DMA suspended. + reserved in Cassini. */ +#define BIM_CFG_PERROR_BLOCK 0x800 /* block PERR# to pci bus. def: 0. + reserved in Cassini. */ + +/* DEFAULT: 0x00000000, SIZE: 32 bits */ +#define REG_BIM_DIAG 0x100C /* BIM Diagnostic */ +#define BIM_DIAG_MSTR_SM_MASK 0x3FFFFF00 /* PCI master controller state + machine bits [21:0] */ +#define BIM_DIAG_BRST_SM_MASK 0x7F /* PCI burst controller state + machine bits [6:0] */ + +/* writing to SW_RESET_TX and SW_RESET_RX will issue a global + * reset. poll until TX and RX read back as 0's for completion. + */ +#define REG_SW_RESET 0x1010 /* Software reset */ +#define SW_RESET_TX 0x00000001 /* reset TX DMA engine. poll until + cleared to 0. */ +#define SW_RESET_RX 0x00000002 /* reset RX DMA engine. poll until + cleared to 0. */ +#define SW_RESET_RSTOUT 0x00000004 /* force RSTOUT# pin active (low). + resets PHY and anything else + connected to RSTOUT#. RSTOUT# + is also activated by local PCI + reset when hot-swap is being + done. */ +#define SW_RESET_BLOCK_PCS_SLINK 0x00000008 /* if a global reset is done with + this bit set, PCS and SLINK + modules won't be reset. + i.e., link won't drop. */ +#define SW_RESET_BREQ_SM_MASK 0x00007F00 /* breq state machine [6:0] */ +#define SW_RESET_PCIARB_SM_MASK 0x00070000 /* pci arbitration state bits: + 0b000: ARB_IDLE1 + 0b001: ARB_IDLE2 + 0b010: ARB_WB_ACK + 0b011: ARB_WB_WAT + 0b100: ARB_RB_ACK + 0b101: ARB_RB_WAT + 0b110: ARB_RB_END + 0b111: ARB_WB_END */ +#define SW_RESET_RDPCI_SM_MASK 0x00300000 /* read pci state bits: + 0b00: RD_PCI_WAT + 0b01: RD_PCI_RDY + 0b11: RD_PCI_ACK */ +#define SW_RESET_RDARB_SM_MASK 0x00C00000 /* read arbitration state bits: + 0b00: AD_IDL_RX + 0b01: AD_ACK_RX + 0b10: AD_ACK_TX + 0b11: AD_IDL_TX */ +#define SW_RESET_WRPCI_SM_MASK 0x06000000 /* write pci state bits + 0b00: WR_PCI_WAT + 0b01: WR_PCI_RDY + 0b11: WR_PCI_ACK */ +#define SW_RESET_WRARB_SM_MASK 0x38000000 /* write arbitration state bits: + 0b000: ARB_IDLE1 + 0b001: ARB_IDLE2 + 0b010: ARB_TX_ACK + 0b011: ARB_TX_WAT + 0b100: ARB_RX_ACK + 0b110: ARB_RX_WAT */ + +/* Cassini only. 64-bit register used to check PCI datapath. when read, + * value written has both lower and upper 32-bit halves rotated to the right + * one bit position. e.g., FFFFFFFF FFFFFFFF -> 7FFFFFFF 7FFFFFFF + */ +#define REG_MINUS_BIM_DATAPATH_TEST 0x1018 /* Cassini: BIM datapath test + Cassini+: reserved */ + +/* output enables are provided for each device's chip select and for the rest + * of the outputs from cassini to its local bus devices. two sw programmable + * bits are connected to general purpus control/status bits. + * DEFAULT: 0x7 + */ +#define REG_BIM_LOCAL_DEV_EN 0x1020 /* BIM local device + output EN. default: 0x7 */ +#define BIM_LOCAL_DEV_PAD 0x01 /* address bus, RW signal, and + OE signal output enable on the + local bus interface. these + are shared between both local + bus devices. tristate when 0. */ +#define BIM_LOCAL_DEV_PROM 0x02 /* PROM chip select */ +#define BIM_LOCAL_DEV_EXT 0x04 /* secondary local bus device chip + select output enable */ +#define BIM_LOCAL_DEV_SOFT_0 0x08 /* sw programmable ctrl bit 0 */ +#define BIM_LOCAL_DEV_SOFT_1 0x10 /* sw programmable ctrl bit 1 */ +#define BIM_LOCAL_DEV_HW_RESET 0x20 /* internal hw reset. Cassini+ only. */ + +/* access 24 entry BIM read and write buffers. put address in REG_BIM_BUFFER_ADDR + * and read/write from/to it REG_BIM_BUFFER_DATA_LOW and _DATA_HI. + * _DATA_HI should be the last access of the sequence. + * DEFAULT: undefined + */ +#define REG_BIM_BUFFER_ADDR 0x1024 /* BIM buffer address. for + purposes. */ +#define BIM_BUFFER_ADDR_MASK 0x3F /* index (0 - 23) of buffer */ +#define BIM_BUFFER_WR_SELECT 0x40 /* write buffer access = 1 + read buffer access = 0 */ +/* DEFAULT: undefined */ +#define REG_BIM_BUFFER_DATA_LOW 0x1028 /* BIM buffer data low */ +#define REG_BIM_BUFFER_DATA_HI 0x102C /* BIM buffer data high */ + +/* set BIM_RAM_BIST_START to start built-in self test for BIM read buffer. + * bit auto-clears when done with status read from _SUMMARY and _PASS bits. + */ +#define REG_BIM_RAM_BIST 0x102C /* BIM RAM (read buffer) BIST + control/status */ +#define BIM_RAM_BIST_RD_START 0x01 /* start BIST for BIM read buffer */ +#define BIM_RAM_BIST_WR_START 0x02 /* start BIST for BIM write buffer. + Cassini only. reserved in + Cassini+. */ +#define BIM_RAM_BIST_RD_PASS 0x04 /* summary BIST pass status for read + buffer. */ +#define BIM_RAM_BIST_WR_PASS 0x08 /* summary BIST pass status for write + buffer. Cassini only. reserved + in Cassini+. */ +#define BIM_RAM_BIST_RD_LOW_PASS 0x10 /* read low bank passes BIST */ +#define BIM_RAM_BIST_RD_HI_PASS 0x20 /* read high bank passes BIST */ +#define BIM_RAM_BIST_WR_LOW_PASS 0x40 /* write low bank passes BIST. + Cassini only. reserved in + Cassini+. */ +#define BIM_RAM_BIST_WR_HI_PASS 0x80 /* write high bank passes BIST. + Cassini only. reserved in + Cassini+. */ + +/* ASUN: i'm not sure what this does as it's not in the spec. + * DEFAULT: 0xFC + */ +#define REG_BIM_DIAG_MUX 0x1030 /* BIM diagnostic probe mux + select register */ + +/* enable probe monitoring mode and select data appearing on the P_A* bus. bit + * values for _SEL_HI_MASK and _SEL_LOW_MASK: + * 0x0: internal probe[7:0] (pci arb state, wtc empty w, wtc full w, wtc empty w, + * wtc empty r, post pci) + * 0x1: internal probe[15:8] (pci wbuf comp, pci wpkt comp, pci rbuf comp, + * pci rpkt comp, txdma wr req, txdma wr ack, + * txdma wr rdy, txdma wr xfr done) + * 0x2: internal probe[23:16] (txdma rd req, txdma rd ack, txdma rd rdy, rxdma rd, + * rd arb state, rd pci state) + * 0x3: internal probe[31:24] (rxdma req, rxdma ack, rxdma rdy, wrarb state, + * wrpci state) + * 0x4: pci io probe[7:0] 0x5: pci io probe[15:8] + * 0x6: pci io probe[23:16] 0x7: pci io probe[31:24] + * 0x8: pci io probe[39:32] 0x9: pci io probe[47:40] + * 0xa: pci io probe[55:48] 0xb: pci io probe[63:56] + * the following are not available in Cassini: + * 0xc: rx probe[7:0] 0xd: tx probe[7:0] + * 0xe: hp probe[7:0] 0xf: mac probe[7:0] + */ +#define REG_PLUS_PROBE_MUX_SELECT 0x1034 /* Cassini+: PROBE MUX SELECT */ +#define PROBE_MUX_EN 0x80000000 /* allow probe signals to be + driven on local bus P_A[15:0] + for debugging */ +#define PROBE_MUX_SUB_MUX_MASK 0x0000FF00 /* select sub module probe signals: + 0x03 = mac[1:0] + 0x0C = rx[1:0] + 0x30 = tx[1:0] + 0xC0 = hp[1:0] */ +#define PROBE_MUX_SEL_HI_MASK 0x000000F0 /* select which module to appear + on P_A[15:8]. see above for + values. */ +#define PROBE_MUX_SEL_LOW_MASK 0x0000000F /* select which module to appear + on P_A[7:0]. see above for + values. */ + +/* values mean the same thing as REG_INTR_MASK excep that it's for INTB. + DEFAULT: 0x1F */ +#define REG_PLUS_INTR_MASK_1 0x1038 /* Cassini+: interrupt mask + register 2 for INTB */ +#define REG_PLUS_INTRN_MASK(x) (REG_PLUS_INTR_MASK_1 + ((x) - 1)*16) +/* bits correspond to both _MASK and _STATUS registers. _ALT corresponds to + * all of the alternate (2-4) INTR registers while _1 corresponds to only + * _MASK_1 and _STATUS_1 registers. + * DEFAULT: 0x7 for MASK registers, 0x0 for ALIAS_CLEAR registers + */ +#define INTR_RX_DONE_ALT 0x01 +#define INTR_RX_COMP_FULL_ALT 0x02 +#define INTR_RX_COMP_AF_ALT 0x04 +#define INTR_RX_BUF_UNAVAIL_1 0x08 +#define INTR_RX_BUF_AE_1 0x10 /* almost empty */ +#define INTRN_MASK_RX_EN 0x80 +#define INTRN_MASK_CLEAR_ALL (INTR_RX_DONE_ALT | \ + INTR_RX_COMP_FULL_ALT | \ + INTR_RX_COMP_AF_ALT | \ + INTR_RX_BUF_UNAVAIL_1 | \ + INTR_RX_BUF_AE_1) +#define REG_PLUS_INTR_STATUS_1 0x103C /* Cassini+: interrupt status + register 2 for INTB. default: 0x1F */ +#define REG_PLUS_INTRN_STATUS(x) (REG_PLUS_INTR_STATUS_1 + ((x) - 1)*16) +#define INTR_STATUS_ALT_INTX_EN 0x80 /* generate INTX when one of the + flags are set. enables desc ring. */ + +#define REG_PLUS_ALIAS_CLEAR_1 0x1040 /* Cassini+: alias clear mask + register 2 for INTB */ +#define REG_PLUS_ALIASN_CLEAR(x) (REG_PLUS_ALIAS_CLEAR_1 + ((x) - 1)*16) + +#define REG_PLUS_INTR_STATUS_ALIAS_1 0x1044 /* Cassini+: interrupt status + register alias 2 for INTB */ +#define REG_PLUS_INTRN_STATUS_ALIAS(x) (REG_PLUS_INTR_STATUS_ALIAS_1 + ((x) - 1)*16) + +#define REG_SATURN_PCFG 0x106c /* pin configuration register for + integrated macphy */ + +#define SATURN_PCFG_TLA 0x00000001 /* 1 = phy actled */ +#define SATURN_PCFG_FLA 0x00000002 /* 1 = phy link10led */ +#define SATURN_PCFG_CLA 0x00000004 /* 1 = phy link100led */ +#define SATURN_PCFG_LLA 0x00000008 /* 1 = phy link1000led */ +#define SATURN_PCFG_RLA 0x00000010 /* 1 = phy duplexled */ +#define SATURN_PCFG_PDS 0x00000020 /* phy debug mode. + 0 = normal */ +#define SATURN_PCFG_MTP 0x00000080 /* test point select */ +#define SATURN_PCFG_GMO 0x00000100 /* GMII observe. 1 = + GMII on SERDES pins for + monitoring. */ +#define SATURN_PCFG_FSI 0x00000200 /* 1 = freeze serdes/gmii. all + pins configed as outputs. + for power saving when using + internal phy. */ +#define SATURN_PCFG_LAD 0x00000800 /* 0 = mac core led ctrl + polarity from strapping + value. + 1 = mac core led ctrl + polarity active low. */ + + +/** transmit dma registers **/ +#define MAX_TX_RINGS_SHIFT 2 +#define MAX_TX_RINGS (1 << MAX_TX_RINGS_SHIFT) +#define MAX_TX_RINGS_MASK (MAX_TX_RINGS - 1) + +/* TX configuration. + * descr ring sizes size = 32 * (1 << n), n < 9. e.g., 0x8 = 8k. default: 0x8 + * DEFAULT: 0x3F000001 + */ +#define REG_TX_CFG 0x2004 /* TX config */ +#define TX_CFG_DMA_EN 0x00000001 /* enable TX DMA. if cleared, DMA + will stop after xfer of current + buffer has been completed. */ +#define TX_CFG_FIFO_PIO_SEL 0x00000002 /* TX DMA FIFO can be + accessed w/ FIFO addr + and data registers. + TX DMA should be + disabled. */ +#define TX_CFG_DESC_RING0_MASK 0x0000003C /* # desc entries in + ring 1. */ +#define TX_CFG_DESC_RING0_SHIFT 2 +#define TX_CFG_DESC_RINGN_MASK(a) (TX_CFG_DESC_RING0_MASK << (a)*4) +#define TX_CFG_DESC_RINGN_SHIFT(a) (TX_CFG_DESC_RING0_SHIFT + (a)*4) +#define TX_CFG_PACED_MODE 0x00100000 /* TX_ALL only set after + TX FIFO becomes empty. + if 0, TX_ALL set + if descr queue empty. */ +#define TX_CFG_DMA_RDPIPE_DIS 0x01000000 /* always set to 1 */ +#define TX_CFG_COMPWB_Q1 0x02000000 /* completion writeback happens at + the end of every packet kicked + through Q1. */ +#define TX_CFG_COMPWB_Q2 0x04000000 /* completion writeback happens at + the end of every packet kicked + through Q2. */ +#define TX_CFG_COMPWB_Q3 0x08000000 /* completion writeback happens at + the end of every packet kicked + through Q3 */ +#define TX_CFG_COMPWB_Q4 0x10000000 /* completion writeback happens at + the end of every packet kicked + through Q4 */ +#define TX_CFG_INTR_COMPWB_DIS 0x20000000 /* disable pre-interrupt completion + writeback */ +#define TX_CFG_CTX_SEL_MASK 0xC0000000 /* selects tx test port + connection + 0b00: tx mac req, + tx mac retry req, + tx ack and tx tag. + 0b01: txdma rd req, + txdma rd ack, + txdma rd rdy, + txdma rd type0 + 0b11: txdma wr req, + txdma wr ack, + txdma wr rdy, + txdma wr xfr done. */ +#define TX_CFG_CTX_SEL_SHIFT 30 + +/* 11-bit counters that point to next location in FIFO to be loaded/retrieved. + * used for diagnostics only. + */ +#define REG_TX_FIFO_WRITE_PTR 0x2014 /* TX FIFO write pointer */ +#define REG_TX_FIFO_SHADOW_WRITE_PTR 0x2018 /* TX FIFO shadow write + pointer. temp hold reg. + diagnostics only. */ +#define REG_TX_FIFO_READ_PTR 0x201C /* TX FIFO read pointer */ +#define REG_TX_FIFO_SHADOW_READ_PTR 0x2020 /* TX FIFO shadow read + pointer */ + +/* (ro) 11-bit up/down counter w/ # of frames currently in TX FIFO */ +#define REG_TX_FIFO_PKT_CNT 0x2024 /* TX FIFO packet counter */ + +/* current state of all state machines in TX */ +#define REG_TX_SM_1 0x2028 /* TX state machine reg #1 */ +#define TX_SM_1_CHAIN_MASK 0x000003FF /* chaining state machine */ +#define TX_SM_1_CSUM_MASK 0x00000C00 /* checksum state machine */ +#define TX_SM_1_FIFO_LOAD_MASK 0x0003F000 /* FIFO load state machine. + = 0x01 when TX disabled. */ +#define TX_SM_1_FIFO_UNLOAD_MASK 0x003C0000 /* FIFO unload state machine */ +#define TX_SM_1_CACHE_MASK 0x03C00000 /* desc. prefetch cache controller + state machine */ +#define TX_SM_1_CBQ_ARB_MASK 0xF8000000 /* CBQ arbiter state machine */ + +#define REG_TX_SM_2 0x202C /* TX state machine reg #2 */ +#define TX_SM_2_COMP_WB_MASK 0x07 /* completion writeback sm */ +#define TX_SM_2_SUB_LOAD_MASK 0x38 /* sub load state machine */ +#define TX_SM_2_KICK_MASK 0xC0 /* kick state machine */ + +/* 64-bit pointer to the transmit data buffer. only the 50 LSB are incremented + * while the upper 23 bits are taken from the TX descriptor + */ +#define REG_TX_DATA_PTR_LOW 0x2030 /* TX data pointer low */ +#define REG_TX_DATA_PTR_HI 0x2034 /* TX data pointer high */ + +/* 13 bit registers written by driver w/ descriptor value that follows + * last valid xmit descriptor. kick # and complete # values are used by + * the xmit dma engine to control tx descr fetching. if > 1 valid + * tx descr is available within the cache line being read, cassini will + * internally cache up to 4 of them. 0 on reset. _KICK = rw, _COMP = ro. + */ +#define REG_TX_KICK0 0x2038 /* TX kick reg #1 */ +#define REG_TX_KICKN(x) (REG_TX_KICK0 + (x)*4) +#define REG_TX_COMP0 0x2048 /* TX completion reg #1 */ +#define REG_TX_COMPN(x) (REG_TX_COMP0 + (x)*4) + +/* values of TX_COMPLETE_1-4 are written. each completion register + * is 2bytes in size and contiguous. 8B allocation w/ 8B alignment. + * NOTE: completion reg values are only written back prior to TX_INTME and + * TX_ALL interrupts. at all other times, the most up-to-date index values + * should be obtained from the REG_TX_COMPLETE_# registers. + * here's the layout: + * offset from base addr completion # byte + * 0 TX_COMPLETE_1_MSB + * 1 TX_COMPLETE_1_LSB + * 2 TX_COMPLETE_2_MSB + * 3 TX_COMPLETE_2_LSB + * 4 TX_COMPLETE_3_MSB + * 5 TX_COMPLETE_3_LSB + * 6 TX_COMPLETE_4_MSB + * 7 TX_COMPLETE_4_LSB + */ +#define TX_COMPWB_SIZE 8 +#define REG_TX_COMPWB_DB_LOW 0x2058 /* TX completion write back + base low */ +#define REG_TX_COMPWB_DB_HI 0x205C /* TX completion write back + base high */ +#define TX_COMPWB_MSB_MASK 0x00000000000000FFULL +#define TX_COMPWB_MSB_SHIFT 0 +#define TX_COMPWB_LSB_MASK 0x000000000000FF00ULL +#define TX_COMPWB_LSB_SHIFT 8 +#define TX_COMPWB_NEXT(x) ((x) >> 16) + +/* 53 MSB used as base address. 11 LSB assumed to be 0. TX desc pointer must + * be 2KB-aligned. */ +#define REG_TX_DB0_LOW 0x2060 /* TX descriptor base low #1 */ +#define REG_TX_DB0_HI 0x2064 /* TX descriptor base hi #1 */ +#define REG_TX_DBN_LOW(x) (REG_TX_DB0_LOW + (x)*8) +#define REG_TX_DBN_HI(x) (REG_TX_DB0_HI + (x)*8) + +/* 16-bit registers hold weights for the weighted round-robin of the + * four CBQ TX descr rings. weights correspond to # bytes xferred from + * host to TXFIFO in a round of WRR arbitration. can be set + * dynamically with new weights set upon completion of the current + * packet transfer from host memory to TXFIFO. a dummy write to any of + * these registers causes a queue1 pre-emption with all historical bw + * deficit data reset to 0 (useful when congestion requires a + * pre-emption/re-allocation of network bandwidth + */ +#define REG_TX_MAXBURST_0 0x2080 /* TX MaxBurst #1 */ +#define REG_TX_MAXBURST_1 0x2084 /* TX MaxBurst #2 */ +#define REG_TX_MAXBURST_2 0x2088 /* TX MaxBurst #3 */ +#define REG_TX_MAXBURST_3 0x208C /* TX MaxBurst #4 */ + +/* diagnostics access to any TX FIFO location. every access is 65 + * bits. _DATA_LOW = 32 LSB, _DATA_HI_T1/T0 = 32 MSB. _TAG = tag bit. + * writing _DATA_HI_T0 sets tag bit low, writing _DATA_HI_T1 sets tag + * bit high. TX_FIFO_PIO_SEL must be set for TX FIFO PIO access. if + * TX FIFO data integrity is desired, TX DMA should be + * disabled. _DATA_HI_Tx should be the last access of the sequence. + */ +#define REG_TX_FIFO_ADDR 0x2104 /* TX FIFO address */ +#define REG_TX_FIFO_TAG 0x2108 /* TX FIFO tag */ +#define REG_TX_FIFO_DATA_LOW 0x210C /* TX FIFO data low */ +#define REG_TX_FIFO_DATA_HI_T1 0x2110 /* TX FIFO data high t1 */ +#define REG_TX_FIFO_DATA_HI_T0 0x2114 /* TX FIFO data high t0 */ +#define REG_TX_FIFO_SIZE 0x2118 /* (ro) TX FIFO size = 0x090 = 9KB */ + +/* 9-bit register controls BIST of TX FIFO. bit set indicates that the BIST + * passed for the specified memory + */ +#define REG_TX_RAMBIST 0x211C /* TX RAMBIST control/status */ +#define TX_RAMBIST_STATE 0x01C0 /* progress state of RAMBIST + controller state machine */ +#define TX_RAMBIST_RAM33A_PASS 0x0020 /* RAM33A passed */ +#define TX_RAMBIST_RAM32A_PASS 0x0010 /* RAM32A passed */ +#define TX_RAMBIST_RAM33B_PASS 0x0008 /* RAM33B passed */ +#define TX_RAMBIST_RAM32B_PASS 0x0004 /* RAM32B passed */ +#define TX_RAMBIST_SUMMARY 0x0002 /* all RAM passed */ +#define TX_RAMBIST_START 0x0001 /* write 1 to start BIST. self + clears on completion. */ + +/** receive dma registers **/ +#define MAX_RX_DESC_RINGS 2 +#define MAX_RX_COMP_RINGS 4 + +/* receive DMA channel configuration. default: 0x80910 + * free ring size = (1 << n)*32 -> [32 - 8k] + * completion ring size = (1 << n)*128 -> [128 - 32k], n < 9 + * DEFAULT: 0x80910 + */ +#define REG_RX_CFG 0x4000 /* RX config */ +#define RX_CFG_DMA_EN 0x00000001 /* enable RX DMA. 0 stops + channel as soon as current + frame xfer has completed. + driver should disable MAC + for 200ms before disabling + RX */ +#define RX_CFG_DESC_RING_MASK 0x0000001E /* # desc entries in RX + free desc ring. + def: 0x8 = 8k */ +#define RX_CFG_DESC_RING_SHIFT 1 +#define RX_CFG_COMP_RING_MASK 0x000001E0 /* # desc entries in RX complete + ring. def: 0x8 = 32k */ +#define RX_CFG_COMP_RING_SHIFT 5 +#define RX_CFG_BATCH_DIS 0x00000200 /* disable receive desc + batching. def: 0x0 = + enabled */ +#define RX_CFG_SWIVEL_MASK 0x00001C00 /* byte offset of the 1st + data byte of the packet + w/in 8 byte boundares. + this swivels the data + DMA'ed to header + buffers, jumbo buffers + when header split is not + requested and MTU sized + buffers. def: 0x2 */ +#define RX_CFG_SWIVEL_SHIFT 10 + +/* cassini+ only */ +#define RX_CFG_DESC_RING1_MASK 0x000F0000 /* # of desc entries in + RX free desc ring 2. + def: 0x8 = 8k */ +#define RX_CFG_DESC_RING1_SHIFT 16 + + +/* the page size register allows cassini chips to do the following with + * received data: + * [--------------------------------------------------------------] page + * [off][buf1][pad][off][buf2][pad][off][buf3][pad][off][buf4][pad] + * |--------------| = PAGE_SIZE_BUFFER_STRIDE + * page = PAGE_SIZE + * offset = PAGE_SIZE_MTU_OFF + * for the above example, MTU_BUFFER_COUNT = 4. + * NOTE: as is apparent, you need to ensure that the following holds: + * MTU_BUFFER_COUNT <= PAGE_SIZE/PAGE_SIZE_BUFFER_STRIDE + * DEFAULT: 0x48002002 (8k pages) + */ +#define REG_RX_PAGE_SIZE 0x4004 /* RX page size */ +#define RX_PAGE_SIZE_MASK 0x00000003 /* size of pages pointed to + by receive descriptors. + if jumbo buffers are + supported the page size + should not be < 8k. + 0b00 = 2k, 0b01 = 4k + 0b10 = 8k, 0b11 = 16k + DEFAULT: 8k */ +#define RX_PAGE_SIZE_SHIFT 0 +#define RX_PAGE_SIZE_MTU_COUNT_MASK 0x00007800 /* # of MTU buffers the hw + packs into a page. + DEFAULT: 4 */ +#define RX_PAGE_SIZE_MTU_COUNT_SHIFT 11 +#define RX_PAGE_SIZE_MTU_STRIDE_MASK 0x18000000 /* # of bytes that separate + each MTU buffer + + offset from each + other. + 0b00 = 1k, 0b01 = 2k + 0b10 = 4k, 0b11 = 8k + DEFAULT: 0x1 */ +#define RX_PAGE_SIZE_MTU_STRIDE_SHIFT 27 +#define RX_PAGE_SIZE_MTU_OFF_MASK 0xC0000000 /* offset in each page that + hw writes the MTU buffer + into. + 0b00 = 0, + 0b01 = 64 bytes + 0b10 = 96, 0b11 = 128 + DEFAULT: 0x1 */ +#define RX_PAGE_SIZE_MTU_OFF_SHIFT 30 + +/* 11-bit counter points to next location in RX FIFO to be loaded/read. + * shadow write pointers enable retries in case of early receive aborts. + * DEFAULT: 0x0. generated on 64-bit boundaries. + */ +#define REG_RX_FIFO_WRITE_PTR 0x4008 /* RX FIFO write pointer */ +#define REG_RX_FIFO_READ_PTR 0x400C /* RX FIFO read pointer */ +#define REG_RX_IPP_FIFO_SHADOW_WRITE_PTR 0x4010 /* RX IPP FIFO shadow write + pointer */ +#define REG_RX_IPP_FIFO_SHADOW_READ_PTR 0x4014 /* RX IPP FIFO shadow read + pointer */ +#define REG_RX_IPP_FIFO_READ_PTR 0x400C /* RX IPP FIFO read + pointer. (8-bit counter) */ + +/* current state of RX DMA state engines + other info + * DEFAULT: 0x0 + */ +#define REG_RX_DEBUG 0x401C /* RX debug */ +#define RX_DEBUG_LOAD_STATE_MASK 0x0000000F /* load state machine w/ MAC: + 0x0 = idle, 0x1 = load_bop + 0x2 = load 1, 0x3 = load 2 + 0x4 = load 3, 0x5 = load 4 + 0x6 = last detect + 0x7 = wait req + 0x8 = wait req statuss 1st + 0x9 = load st + 0xa = bubble mac + 0xb = error */ +#define RX_DEBUG_LM_STATE_MASK 0x00000070 /* load state machine w/ HP and + RX FIFO: + 0x0 = idle, 0x1 = hp xfr + 0x2 = wait hp ready + 0x3 = wait flow code + 0x4 = fifo xfer + 0x5 = make status + 0x6 = csum ready + 0x7 = error */ +#define RX_DEBUG_FC_STATE_MASK 0x000000180 /* flow control state machine + w/ MAC: + 0x0 = idle + 0x1 = wait xoff ack + 0x2 = wait xon + 0x3 = wait xon ack */ +#define RX_DEBUG_DATA_STATE_MASK 0x000001E00 /* unload data state machine + states: + 0x0 = idle data + 0x1 = header begin + 0x2 = xfer header + 0x3 = xfer header ld + 0x4 = mtu begin + 0x5 = xfer mtu + 0x6 = xfer mtu ld + 0x7 = jumbo begin + 0x8 = xfer jumbo + 0x9 = xfer jumbo ld + 0xa = reas begin + 0xb = xfer reas + 0xc = flush tag + 0xd = xfer reas ld + 0xe = error + 0xf = bubble idle */ +#define RX_DEBUG_DESC_STATE_MASK 0x0001E000 /* unload desc state machine + states: + 0x0 = idle desc + 0x1 = wait ack + 0x9 = wait ack 2 + 0x2 = fetch desc 1 + 0xa = fetch desc 2 + 0x3 = load ptrs + 0x4 = wait dma + 0x5 = wait ack batch + 0x6 = post batch + 0x7 = xfr done */ +#define RX_DEBUG_INTR_READ_PTR_MASK 0x30000000 /* interrupt read ptr of the + interrupt queue */ +#define RX_DEBUG_INTR_WRITE_PTR_MASK 0xC0000000 /* interrupt write pointer + of the interrupt queue */ + +/* flow control frames are emmitted using two PAUSE thresholds: + * XOFF PAUSE uses pause time value pre-programmed in the Send PAUSE MAC reg + * XON PAUSE uses a pause time of 0. granularity of threshold is 64bytes. + * PAUSE thresholds defined in terms of FIFO occupancy and may be translated + * into FIFO vacancy using RX_FIFO_SIZE. setting ON will trigger XON frames + * when FIFO reaches 0. OFF threshold should not be > size of RX FIFO. max + * value is is 0x6F. + * DEFAULT: 0x00078 + */ +#define REG_RX_PAUSE_THRESH 0x4020 /* RX pause thresholds */ +#define RX_PAUSE_THRESH_QUANTUM 64 +#define RX_PAUSE_THRESH_OFF_MASK 0x000001FF /* XOFF PAUSE emitted when + RX FIFO occupancy > + value*64B */ +#define RX_PAUSE_THRESH_OFF_SHIFT 0 +#define RX_PAUSE_THRESH_ON_MASK 0x001FF000 /* XON PAUSE emitted after + emitting XOFF PAUSE when RX + FIFO occupancy falls below + this value*64B. must be + < XOFF threshold. if = + RX_FIFO_SIZE< XON frames are + never emitted. */ +#define RX_PAUSE_THRESH_ON_SHIFT 12 + +/* 13-bit register used to control RX desc fetching and intr generation. if 4+ + * valid RX descriptors are available, Cassini will read 4 at a time. + * writing N means that all desc up to *but* excluding N are available. N must + * be a multiple of 4 (N % 4 = 0). first desc should be cache-line aligned. + * DEFAULT: 0 on reset + */ +#define REG_RX_KICK 0x4024 /* RX kick reg */ + +/* 8KB aligned 64-bit pointer to the base of the RX free/completion rings. + * lower 13 bits of the low register are hard-wired to 0. + */ +#define REG_RX_DB_LOW 0x4028 /* RX descriptor ring + base low */ +#define REG_RX_DB_HI 0x402C /* RX descriptor ring + base hi */ +#define REG_RX_CB_LOW 0x4030 /* RX completion ring + base low */ +#define REG_RX_CB_HI 0x4034 /* RX completion ring + base hi */ +/* 13-bit register indicate desc used by cassini for receive frames. used + * for diagnostic purposes. + * DEFAULT: 0 on reset + */ +#define REG_RX_COMP 0x4038 /* (ro) RX completion */ + +/* HEAD and TAIL are used to control RX desc posting and interrupt + * generation. hw moves the head register to pass ownership to sw. sw + * moves the tail register to pass ownership back to hw. to give all + * entries to hw, set TAIL = HEAD. if HEAD and TAIL indicate that no + * more entries are available, DMA will pause and an interrupt will be + * generated to indicate no more entries are available. sw can use + * this interrupt to reduce the # of times it must update the + * completion tail register. + * DEFAULT: 0 on reset + */ +#define REG_RX_COMP_HEAD 0x403C /* RX completion head */ +#define REG_RX_COMP_TAIL 0x4040 /* RX completion tail */ + +/* values used for receive interrupt blanking. loaded each time the ISR is read + * DEFAULT: 0x00000000 + */ +#define REG_RX_BLANK 0x4044 /* RX blanking register + for ISR read */ +#define RX_BLANK_INTR_PKT_MASK 0x000001FF /* RX_DONE intr asserted if + this many sets of completion + writebacks (up to 2 packets) + occur since the last time + the ISR was read. 0 = no + packet blanking */ +#define RX_BLANK_INTR_PKT_SHIFT 0 +#define RX_BLANK_INTR_TIME_MASK 0x3FFFF000 /* RX_DONE interrupt asserted + if that many clocks were + counted since last time the + ISR was read. + each count is 512 core + clocks (125MHz). 0 = no + time blanking */ +#define RX_BLANK_INTR_TIME_SHIFT 12 + +/* values used for interrupt generation based on threshold values of how + * many free desc and completion entries are available for hw use. + * DEFAULT: 0x00000000 + */ +#define REG_RX_AE_THRESH 0x4048 /* RX almost empty + thresholds */ +#define RX_AE_THRESH_FREE_MASK 0x00001FFF /* RX_BUF_AE will be + generated if # desc + avail for hw use <= + # */ +#define RX_AE_THRESH_FREE_SHIFT 0 +#define RX_AE_THRESH_COMP_MASK 0x0FFFE000 /* RX_COMP_AE will be + generated if # of + completion entries + avail for hw use <= + # */ +#define RX_AE_THRESH_COMP_SHIFT 13 + +/* probabilities for random early drop (RED) thresholds on a FIFO threshold + * basis. probability should increase when the FIFO level increases. control + * packets are never dropped and not counted in stats. probability programmed + * on a 12.5% granularity. e.g., 0x1 = 1/8 packets dropped. + * DEFAULT: 0x00000000 + */ +#define REG_RX_RED 0x404C /* RX random early detect enable */ +#define RX_RED_4K_6K_FIFO_MASK 0x000000FF /* 4KB < FIFO thresh < 6KB */ +#define RX_RED_6K_8K_FIFO_MASK 0x0000FF00 /* 6KB < FIFO thresh < 8KB */ +#define RX_RED_8K_10K_FIFO_MASK 0x00FF0000 /* 8KB < FIFO thresh < 10KB */ +#define RX_RED_10K_12K_FIFO_MASK 0xFF000000 /* 10KB < FIFO thresh < 12KB */ + +/* FIFO fullness levels for RX FIFO, RX control FIFO, and RX IPP FIFO. + * RX control FIFO = # of packets in RX FIFO. + * DEFAULT: 0x0 + */ +#define REG_RX_FIFO_FULLNESS 0x4050 /* (ro) RX FIFO fullness */ +#define RX_FIFO_FULLNESS_RX_FIFO_MASK 0x3FF80000 /* level w/ 8B granularity */ +#define RX_FIFO_FULLNESS_IPP_FIFO_MASK 0x0007FF00 /* level w/ 8B granularity */ +#define RX_FIFO_FULLNESS_RX_PKT_MASK 0x000000FF /* # packets in RX FIFO */ +#define REG_RX_IPP_PACKET_COUNT 0x4054 /* RX IPP packet counter */ +#define REG_RX_WORK_DMA_PTR_LOW 0x4058 /* RX working DMA ptr low */ +#define REG_RX_WORK_DMA_PTR_HI 0x405C /* RX working DMA ptr + high */ + +/* BIST testing ro RX FIFO, RX control FIFO, and RX IPP FIFO. only RX BIST + * START/COMPLETE is writeable. START will clear when the BIST has completed + * checking all 17 RAMS. + * DEFAULT: 0bxxxx xxxxx xxxx xxxx xxxx x000 0000 0000 00x0 + */ +#define REG_RX_BIST 0x4060 /* (ro) RX BIST */ +#define RX_BIST_32A_PASS 0x80000000 /* RX FIFO 32A passed */ +#define RX_BIST_33A_PASS 0x40000000 /* RX FIFO 33A passed */ +#define RX_BIST_32B_PASS 0x20000000 /* RX FIFO 32B passed */ +#define RX_BIST_33B_PASS 0x10000000 /* RX FIFO 33B passed */ +#define RX_BIST_32C_PASS 0x08000000 /* RX FIFO 32C passed */ +#define RX_BIST_33C_PASS 0x04000000 /* RX FIFO 33C passed */ +#define RX_BIST_IPP_32A_PASS 0x02000000 /* RX IPP FIFO 33B passed */ +#define RX_BIST_IPP_33A_PASS 0x01000000 /* RX IPP FIFO 33A passed */ +#define RX_BIST_IPP_32B_PASS 0x00800000 /* RX IPP FIFO 32B passed */ +#define RX_BIST_IPP_33B_PASS 0x00400000 /* RX IPP FIFO 33B passed */ +#define RX_BIST_IPP_32C_PASS 0x00200000 /* RX IPP FIFO 32C passed */ +#define RX_BIST_IPP_33C_PASS 0x00100000 /* RX IPP FIFO 33C passed */ +#define RX_BIST_CTRL_32_PASS 0x00800000 /* RX CTRL FIFO 32 passed */ +#define RX_BIST_CTRL_33_PASS 0x00400000 /* RX CTRL FIFO 33 passed */ +#define RX_BIST_REAS_26A_PASS 0x00200000 /* RX Reas 26A passed */ +#define RX_BIST_REAS_26B_PASS 0x00100000 /* RX Reas 26B passed */ +#define RX_BIST_REAS_27_PASS 0x00080000 /* RX Reas 27 passed */ +#define RX_BIST_STATE_MASK 0x00078000 /* BIST state machine */ +#define RX_BIST_SUMMARY 0x00000002 /* when BIST complete, + summary pass bit + contains AND of BIST + results of all 16 + RAMS */ +#define RX_BIST_START 0x00000001 /* write 1 to start + BIST. self clears + on completion. */ + +/* next location in RX CTRL FIFO that will be loaded w/ data from RX IPP/read + * from to retrieve packet control info. + * DEFAULT: 0 + */ +#define REG_RX_CTRL_FIFO_WRITE_PTR 0x4064 /* (ro) RX control FIFO + write ptr */ +#define REG_RX_CTRL_FIFO_READ_PTR 0x4068 /* (ro) RX control FIFO read + ptr */ + +/* receive interrupt blanking. loaded each time interrupt alias register is + * read. + * DEFAULT: 0x0 + */ +#define REG_RX_BLANK_ALIAS_READ 0x406C /* RX blanking register for + alias read */ +#define RX_BAR_INTR_PACKET_MASK 0x000001FF /* assert RX_DONE if # + completion writebacks + > # since last ISR + read. 0 = no + blanking. up to 2 + packets per + completion wb. */ +#define RX_BAR_INTR_TIME_MASK 0x3FFFF000 /* assert RX_DONE if # + clocks > # since last + ISR read. each count + is 512 core clocks + (125MHz). 0 = no + blanking. */ + +/* diagnostic access to RX FIFO. 32 LSB accessed via DATA_LOW. 32 MSB accessed + * via DATA_HI_T0 or DATA_HI_T1. TAG reads the tag bit. writing HI_T0 + * will unset the tag bit while writing HI_T1 will set the tag bit. to reset + * to normal operation after diagnostics, write to address location 0x0. + * RX_DMA_EN bit must be set to 0x0 for RX FIFO PIO access. DATA_HI should + * be the last write access of a write sequence. + * DEFAULT: undefined + */ +#define REG_RX_FIFO_ADDR 0x4080 /* RX FIFO address */ +#define REG_RX_FIFO_TAG 0x4084 /* RX FIFO tag */ +#define REG_RX_FIFO_DATA_LOW 0x4088 /* RX FIFO data low */ +#define REG_RX_FIFO_DATA_HI_T0 0x408C /* RX FIFO data high T0 */ +#define REG_RX_FIFO_DATA_HI_T1 0x4090 /* RX FIFO data high T1 */ + +/* diagnostic assess to RX CTRL FIFO. 8-bit FIFO_ADDR holds address of + * 81 bit control entry and 6 bit flow id. LOW and MID are both 32-bit + * accesses. HI is 7-bits with 6-bit flow id and 1 bit control + * word. RX_DMA_EN must be 0 for RX CTRL FIFO PIO access. DATA_HI + * should be last write access of the write sequence. + * DEFAULT: undefined + */ +#define REG_RX_CTRL_FIFO_ADDR 0x4094 /* RX Control FIFO and + Batching FIFO addr */ +#define REG_RX_CTRL_FIFO_DATA_LOW 0x4098 /* RX Control FIFO data + low */ +#define REG_RX_CTRL_FIFO_DATA_MID 0x409C /* RX Control FIFO data + mid */ +#define REG_RX_CTRL_FIFO_DATA_HI 0x4100 /* RX Control FIFO data + hi and flow id */ +#define RX_CTRL_FIFO_DATA_HI_CTRL 0x0001 /* upper bit of ctrl word */ +#define RX_CTRL_FIFO_DATA_HI_FLOW_MASK 0x007E /* flow id */ + +/* diagnostic access to RX IPP FIFO. same semantics as RX_FIFO. + * DEFAULT: undefined + */ +#define REG_RX_IPP_FIFO_ADDR 0x4104 /* RX IPP FIFO address */ +#define REG_RX_IPP_FIFO_TAG 0x4108 /* RX IPP FIFO tag */ +#define REG_RX_IPP_FIFO_DATA_LOW 0x410C /* RX IPP FIFO data low */ +#define REG_RX_IPP_FIFO_DATA_HI_T0 0x4110 /* RX IPP FIFO data high + T0 */ +#define REG_RX_IPP_FIFO_DATA_HI_T1 0x4114 /* RX IPP FIFO data high + T1 */ + +/* 64-bit pointer to receive data buffer in host memory used for headers and + * small packets. MSB in high register. loaded by DMA state machine and + * increments as DMA writes receive data. only 50 LSB are incremented. top + * 13 bits taken from RX descriptor. + * DEFAULT: undefined + */ +#define REG_RX_HEADER_PAGE_PTR_LOW 0x4118 /* (ro) RX header page ptr + low */ +#define REG_RX_HEADER_PAGE_PTR_HI 0x411C /* (ro) RX header page ptr + high */ +#define REG_RX_MTU_PAGE_PTR_LOW 0x4120 /* (ro) RX MTU page pointer + low */ +#define REG_RX_MTU_PAGE_PTR_HI 0x4124 /* (ro) RX MTU page pointer + high */ + +/* PIO diagnostic access to RX reassembly DMA Table RAM. 6-bit register holds + * one of 64 79-bit locations in the RX Reassembly DMA table and the addr of + * one of the 64 byte locations in the Batching table. LOW holds 32 LSB. + * MID holds the next 32 LSB. HIGH holds the 15 MSB. RX_DMA_EN must be set + * to 0 for PIO access. DATA_HIGH should be last write of write sequence. + * layout: + * reassmbl ptr [78:15] | reassmbl index [14:1] | reassmbl entry valid [0] + * DEFAULT: undefined + */ +#define REG_RX_TABLE_ADDR 0x4128 /* RX reassembly DMA table + address */ +#define RX_TABLE_ADDR_MASK 0x0000003F /* address mask */ + +#define REG_RX_TABLE_DATA_LOW 0x412C /* RX reassembly DMA table + data low */ +#define REG_RX_TABLE_DATA_MID 0x4130 /* RX reassembly DMA table + data mid */ +#define REG_RX_TABLE_DATA_HI 0x4134 /* RX reassembly DMA table + data high */ + +/* cassini+ only */ +/* 8KB aligned 64-bit pointer to base of RX rings. lower 13 bits hardwired to + * 0. same semantics as primary desc/complete rings. + */ +#define REG_PLUS_RX_DB1_LOW 0x4200 /* RX descriptor ring + 2 base low */ +#define REG_PLUS_RX_DB1_HI 0x4204 /* RX descriptor ring + 2 base high */ +#define REG_PLUS_RX_CB1_LOW 0x4208 /* RX completion ring + 2 base low. 4 total */ +#define REG_PLUS_RX_CB1_HI 0x420C /* RX completion ring + 2 base high. 4 total */ +#define REG_PLUS_RX_CBN_LOW(x) (REG_PLUS_RX_CB1_LOW + 8*((x) - 1)) +#define REG_PLUS_RX_CBN_HI(x) (REG_PLUS_RX_CB1_HI + 8*((x) - 1)) +#define REG_PLUS_RX_KICK1 0x4220 /* RX Kick 2 register */ +#define REG_PLUS_RX_COMP1 0x4224 /* (ro) RX completion 2 + reg */ +#define REG_PLUS_RX_COMP1_HEAD 0x4228 /* (ro) RX completion 2 + head reg. 4 total. */ +#define REG_PLUS_RX_COMP1_TAIL 0x422C /* RX completion 2 + tail reg. 4 total. */ +#define REG_PLUS_RX_COMPN_HEAD(x) (REG_PLUS_RX_COMP1_HEAD + 8*((x) - 1)) +#define REG_PLUS_RX_COMPN_TAIL(x) (REG_PLUS_RX_COMP1_TAIL + 8*((x) - 1)) +#define REG_PLUS_RX_AE1_THRESH 0x4240 /* RX almost empty 2 + thresholds */ +#define RX_AE1_THRESH_FREE_MASK RX_AE_THRESH_FREE_MASK +#define RX_AE1_THRESH_FREE_SHIFT RX_AE_THRESH_FREE_SHIFT + +/** header parser registers **/ + +/* RX parser configuration register. + * DEFAULT: 0x1651004 + */ +#define REG_HP_CFG 0x4140 /* header parser + configuration reg */ +#define HP_CFG_PARSE_EN 0x00000001 /* enab header parsing */ +#define HP_CFG_NUM_CPU_MASK 0x000000FC /* # processors + 0 = 64. 0x3f = 63 */ +#define HP_CFG_NUM_CPU_SHIFT 2 +#define HP_CFG_SYN_INC_MASK 0x00000100 /* SYN bit won't increment + TCP seq # by one when + stored in FDBM */ +#define HP_CFG_TCP_THRESH_MASK 0x000FFE00 /* # bytes of TCP data + needed to be considered + for reassembly */ +#define HP_CFG_TCP_THRESH_SHIFT 9 + +/* access to RX Instruction RAM. 5-bit register/counter holds addr + * of 39 bit entry to be read/written. 32 LSB in _DATA_LOW. 7 MSB in _DATA_HI. + * RX_DMA_EN must be 0 for RX instr PIO access. DATA_HI should be last access + * of sequence. + * DEFAULT: undefined + */ +#define REG_HP_INSTR_RAM_ADDR 0x4144 /* HP instruction RAM + address */ +#define HP_INSTR_RAM_ADDR_MASK 0x01F /* 5-bit mask */ +#define REG_HP_INSTR_RAM_DATA_LOW 0x4148 /* HP instruction RAM + data low */ +#define HP_INSTR_RAM_LOW_OUTMASK_MASK 0x0000FFFF +#define HP_INSTR_RAM_LOW_OUTMASK_SHIFT 0 +#define HP_INSTR_RAM_LOW_OUTSHIFT_MASK 0x000F0000 +#define HP_INSTR_RAM_LOW_OUTSHIFT_SHIFT 16 +#define HP_INSTR_RAM_LOW_OUTEN_MASK 0x00300000 +#define HP_INSTR_RAM_LOW_OUTEN_SHIFT 20 +#define HP_INSTR_RAM_LOW_OUTARG_MASK 0xFFC00000 +#define HP_INSTR_RAM_LOW_OUTARG_SHIFT 22 +#define REG_HP_INSTR_RAM_DATA_MID 0x414C /* HP instruction RAM + data mid */ +#define HP_INSTR_RAM_MID_OUTARG_MASK 0x00000003 +#define HP_INSTR_RAM_MID_OUTARG_SHIFT 0 +#define HP_INSTR_RAM_MID_OUTOP_MASK 0x0000003C +#define HP_INSTR_RAM_MID_OUTOP_SHIFT 2 +#define HP_INSTR_RAM_MID_FNEXT_MASK 0x000007C0 +#define HP_INSTR_RAM_MID_FNEXT_SHIFT 6 +#define HP_INSTR_RAM_MID_FOFF_MASK 0x0003F800 +#define HP_INSTR_RAM_MID_FOFF_SHIFT 11 +#define HP_INSTR_RAM_MID_SNEXT_MASK 0x007C0000 +#define HP_INSTR_RAM_MID_SNEXT_SHIFT 18 +#define HP_INSTR_RAM_MID_SOFF_MASK 0x3F800000 +#define HP_INSTR_RAM_MID_SOFF_SHIFT 23 +#define HP_INSTR_RAM_MID_OP_MASK 0xC0000000 +#define HP_INSTR_RAM_MID_OP_SHIFT 30 +#define REG_HP_INSTR_RAM_DATA_HI 0x4150 /* HP instruction RAM + data high */ +#define HP_INSTR_RAM_HI_VAL_MASK 0x0000FFFF +#define HP_INSTR_RAM_HI_VAL_SHIFT 0 +#define HP_INSTR_RAM_HI_MASK_MASK 0xFFFF0000 +#define HP_INSTR_RAM_HI_MASK_SHIFT 16 + +/* PIO access into RX Header parser data RAM and flow database. + * 11-bit register. Data fills the LSB portion of bus if less than 32 bits. + * DATA_RAM: write RAM_FDB_DATA with index to access DATA_RAM. + * RAM bytes = 4*(x - 1) + [3:0]. e.g., 0 -> [3:0], 31 -> [123:120] + * FLOWDB: write DATA_RAM_FDB register and then read/write FDB1-12 to access + * flow database. + * RX_DMA_EN must be 0 for RX parser RAM PIO access. RX Parser RAM data reg + * should be the last write access of the write sequence. + * DEFAULT: undefined + */ +#define REG_HP_DATA_RAM_FDB_ADDR 0x4154 /* HP data and FDB + RAM address */ +#define HP_DATA_RAM_FDB_DATA_MASK 0x001F /* select 1 of 86 byte + locations in header + parser data ram to + read/write */ +#define HP_DATA_RAM_FDB_FDB_MASK 0x3F00 /* 1 of 64 353-bit locations + in the flow database */ +#define REG_HP_DATA_RAM_DATA 0x4158 /* HP data RAM data */ + +/* HP flow database registers: 1 - 12, 0x415C - 0x4188, 4 8-bit bytes + * FLOW_DB(1) = IP_SA[127:96], FLOW_DB(2) = IP_SA[95:64] + * FLOW_DB(3) = IP_SA[63:32], FLOW_DB(4) = IP_SA[31:0] + * FLOW_DB(5) = IP_DA[127:96], FLOW_DB(6) = IP_DA[95:64] + * FLOW_DB(7) = IP_DA[63:32], FLOW_DB(8) = IP_DA[31:0] + * FLOW_DB(9) = {TCP_SP[15:0],TCP_DP[15:0]} + * FLOW_DB(10) = bit 0 has value for flow valid + * FLOW_DB(11) = TCP_SEQ[63:32], FLOW_DB(12) = TCP_SEQ[31:0] + */ +#define REG_HP_FLOW_DB0 0x415C /* HP flow database 1 reg */ +#define REG_HP_FLOW_DBN(x) (REG_HP_FLOW_DB0 + (x)*4) + +/* diagnostics for RX Header Parser block. + * ASUN: the header parser state machine register is used for diagnostics + * purposes. however, the spec doesn't have any details on it. + */ +#define REG_HP_STATE_MACHINE 0x418C /* (ro) HP state machine */ +#define REG_HP_STATUS0 0x4190 /* (ro) HP status 1 */ +#define HP_STATUS0_SAP_MASK 0xFFFF0000 /* SAP */ +#define HP_STATUS0_L3_OFF_MASK 0x0000FE00 /* L3 offset */ +#define HP_STATUS0_LB_CPUNUM_MASK 0x000001F8 /* load balancing CPU + number */ +#define HP_STATUS0_HRP_OPCODE_MASK 0x00000007 /* HRP opcode */ + +#define REG_HP_STATUS1 0x4194 /* (ro) HP status 2 */ +#define HP_STATUS1_ACCUR2_MASK 0xE0000000 /* accu R2[6:4] */ +#define HP_STATUS1_FLOWID_MASK 0x1F800000 /* flow id */ +#define HP_STATUS1_TCP_OFF_MASK 0x007F0000 /* tcp payload offset */ +#define HP_STATUS1_TCP_SIZE_MASK 0x0000FFFF /* tcp payload size */ + +#define REG_HP_STATUS2 0x4198 /* (ro) HP status 3 */ +#define HP_STATUS2_ACCUR2_MASK 0xF0000000 /* accu R2[3:0] */ +#define HP_STATUS2_CSUM_OFF_MASK 0x07F00000 /* checksum start + start offset */ +#define HP_STATUS2_ACCUR1_MASK 0x000FE000 /* accu R1 */ +#define HP_STATUS2_FORCE_DROP 0x00001000 /* force drop */ +#define HP_STATUS2_BWO_REASSM 0x00000800 /* batching w/o + reassembly */ +#define HP_STATUS2_JH_SPLIT_EN 0x00000400 /* jumbo header split + enable */ +#define HP_STATUS2_FORCE_TCP_NOCHECK 0x00000200 /* force tcp no payload + check */ +#define HP_STATUS2_DATA_MASK_ZERO 0x00000100 /* mask of data length + equal to zero */ +#define HP_STATUS2_FORCE_TCP_CHECK 0x00000080 /* force tcp payload + chk */ +#define HP_STATUS2_MASK_TCP_THRESH 0x00000040 /* mask of payload + threshold */ +#define HP_STATUS2_NO_ASSIST 0x00000020 /* no assist */ +#define HP_STATUS2_CTRL_PACKET_FLAG 0x00000010 /* control packet flag */ +#define HP_STATUS2_TCP_FLAG_CHECK 0x00000008 /* tcp flag check */ +#define HP_STATUS2_SYN_FLAG 0x00000004 /* syn flag */ +#define HP_STATUS2_TCP_CHECK 0x00000002 /* tcp payload chk */ +#define HP_STATUS2_TCP_NOCHECK 0x00000001 /* tcp no payload chk */ + +/* BIST for header parser(HP) and flow database memories (FDBM). set _START + * to start BIST. controller clears _START on completion. _START can also + * be cleared to force termination of BIST. a bit set indicates that that + * memory passed its BIST. + */ +#define REG_HP_RAM_BIST 0x419C /* HP RAM BIST reg */ +#define HP_RAM_BIST_HP_DATA_PASS 0x80000000 /* HP data ram */ +#define HP_RAM_BIST_HP_INSTR0_PASS 0x40000000 /* HP instr ram 0 */ +#define HP_RAM_BIST_HP_INSTR1_PASS 0x20000000 /* HP instr ram 1 */ +#define HP_RAM_BIST_HP_INSTR2_PASS 0x10000000 /* HP instr ram 2 */ +#define HP_RAM_BIST_FDBM_AGE0_PASS 0x08000000 /* FDBM aging RAM0 */ +#define HP_RAM_BIST_FDBM_AGE1_PASS 0x04000000 /* FDBM aging RAM1 */ +#define HP_RAM_BIST_FDBM_FLOWID00_PASS 0x02000000 /* FDBM flowid RAM0 + bank 0 */ +#define HP_RAM_BIST_FDBM_FLOWID10_PASS 0x01000000 /* FDBM flowid RAM1 + bank 0 */ +#define HP_RAM_BIST_FDBM_FLOWID20_PASS 0x00800000 /* FDBM flowid RAM2 + bank 0 */ +#define HP_RAM_BIST_FDBM_FLOWID30_PASS 0x00400000 /* FDBM flowid RAM3 + bank 0 */ +#define HP_RAM_BIST_FDBM_FLOWID01_PASS 0x00200000 /* FDBM flowid RAM0 + bank 1 */ +#define HP_RAM_BIST_FDBM_FLOWID11_PASS 0x00100000 /* FDBM flowid RAM1 + bank 2 */ +#define HP_RAM_BIST_FDBM_FLOWID21_PASS 0x00080000 /* FDBM flowid RAM2 + bank 1 */ +#define HP_RAM_BIST_FDBM_FLOWID31_PASS 0x00040000 /* FDBM flowid RAM3 + bank 1 */ +#define HP_RAM_BIST_FDBM_TCPSEQ_PASS 0x00020000 /* FDBM tcp sequence + RAM */ +#define HP_RAM_BIST_SUMMARY 0x00000002 /* all BIST tests */ +#define HP_RAM_BIST_START 0x00000001 /* start/stop BIST */ + + +/** MAC registers. **/ +/* reset bits are set using a PIO write and self-cleared after the command + * execution has completed. + */ +#define REG_MAC_TX_RESET 0x6000 /* TX MAC software reset + command (default: 0x0) */ +#define REG_MAC_RX_RESET 0x6004 /* RX MAC software reset + command (default: 0x0) */ +/* execute a pause flow control frame transmission + DEFAULT: 0x0XXXX */ +#define REG_MAC_SEND_PAUSE 0x6008 /* send pause command reg */ +#define MAC_SEND_PAUSE_TIME_MASK 0x0000FFFF /* value of pause time + to be sent on network + in units of slot + times */ +#define MAC_SEND_PAUSE_SEND 0x00010000 /* send pause flow ctrl + frame on network */ + +/* bit set indicates that event occurred. auto-cleared when status register + * is read and have corresponding mask bits in mask register. events will + * trigger an interrupt if the corresponding mask bit is 0. + * status register default: 0x00000000 + * mask register default = 0xFFFFFFFF on reset + */ +#define REG_MAC_TX_STATUS 0x6010 /* TX MAC status reg */ +#define MAC_TX_FRAME_XMIT 0x0001 /* successful frame + transmision */ +#define MAC_TX_UNDERRUN 0x0002 /* terminated frame + transmission due to + data starvation in the + xmit data path */ +#define MAC_TX_MAX_PACKET_ERR 0x0004 /* frame exceeds max allowed + length passed to TX MAC + by the DMA engine */ +#define MAC_TX_COLL_NORMAL 0x0008 /* rollover of the normal + collision counter */ +#define MAC_TX_COLL_EXCESS 0x0010 /* rollover of the excessive + collision counter */ +#define MAC_TX_COLL_LATE 0x0020 /* rollover of the late + collision counter */ +#define MAC_TX_COLL_FIRST 0x0040 /* rollover of the first + collision counter */ +#define MAC_TX_DEFER_TIMER 0x0080 /* rollover of the defer + timer */ +#define MAC_TX_PEAK_ATTEMPTS 0x0100 /* rollover of the peak + attempts counter */ + +#define REG_MAC_RX_STATUS 0x6014 /* RX MAC status reg */ +#define MAC_RX_FRAME_RECV 0x0001 /* successful receipt of + a frame */ +#define MAC_RX_OVERFLOW 0x0002 /* dropped frame due to + RX FIFO overflow */ +#define MAC_RX_FRAME_COUNT 0x0004 /* rollover of receive frame + counter */ +#define MAC_RX_ALIGN_ERR 0x0008 /* rollover of alignment + error counter */ +#define MAC_RX_CRC_ERR 0x0010 /* rollover of crc error + counter */ +#define MAC_RX_LEN_ERR 0x0020 /* rollover of length + error counter */ +#define MAC_RX_VIOL_ERR 0x0040 /* rollover of code + violation error */ + +/* DEFAULT: 0xXXXX0000 on reset */ +#define REG_MAC_CTRL_STATUS 0x6018 /* MAC control status reg */ +#define MAC_CTRL_PAUSE_RECEIVED 0x00000001 /* successful + reception of a + pause control + frame */ +#define MAC_CTRL_PAUSE_STATE 0x00000002 /* MAC has made a + transition from + "not paused" to + "paused" */ +#define MAC_CTRL_NOPAUSE_STATE 0x00000004 /* MAC has made a + transition from + "paused" to "not + paused" */ +#define MAC_CTRL_PAUSE_TIME_MASK 0xFFFF0000 /* value of pause time + operand that was + received in the last + pause flow control + frame */ + +/* layout identical to TX MAC[8:0] */ +#define REG_MAC_TX_MASK 0x6020 /* TX MAC mask reg */ +/* layout identical to RX MAC[6:0] */ +#define REG_MAC_RX_MASK 0x6024 /* RX MAC mask reg */ +/* layout identical to CTRL MAC[2:0] */ +#define REG_MAC_CTRL_MASK 0x6028 /* MAC control mask reg */ + +/* to ensure proper operation, CFG_EN must be cleared to 0 and a delay + * imposed before writes to other bits in the TX_MAC_CFG register or any of + * the MAC parameters is performed. delay dependent upon time required to + * transmit a maximum size frame (= MAC_FRAMESIZE_MAX*8/Mbps). e.g., + * the delay for a 1518-byte frame on a 100Mbps network is 125us. + * alternatively, just poll TX_CFG_EN until it reads back as 0. + * NOTE: on half-duplex 1Gbps, TX_CFG_CARRIER_EXTEND and + * RX_CFG_CARRIER_EXTEND should be set and the SLOT_TIME register should + * be 0x200 (slot time of 512 bytes) + */ +#define REG_MAC_TX_CFG 0x6030 /* TX MAC config reg */ +#define MAC_TX_CFG_EN 0x0001 /* enable TX MAC. 0 will + force TXMAC state + machine to remain in + idle state or to + transition to idle state + on completion of an + ongoing packet. */ +#define MAC_TX_CFG_IGNORE_CARRIER 0x0002 /* disable CSMA/CD deferral + process. set to 1 when + full duplex and 0 when + half duplex */ +#define MAC_TX_CFG_IGNORE_COLL 0x0004 /* disable CSMA/CD backoff + algorithm. set to 1 when + full duplex and 0 when + half duplex */ +#define MAC_TX_CFG_IPG_EN 0x0008 /* enable extension of the + Rx-to-TX IPG. after + receiving a frame, TX + MAC will reset its + deferral process to + carrier sense for the + amount of time = IPG0 + + IPG1 and commit to + transmission for time + specified in IPG2. when + 0 or when xmitting frames + back-to-pack (Tx-to-Tx + IPG), TX MAC ignores + IPG0 and will only use + IPG1 for deferral time. + IPG2 still used. */ +#define MAC_TX_CFG_NEVER_GIVE_UP_EN 0x0010 /* TX MAC will not easily + give up on frame + xmission. if backoff + algorithm reaches the + ATTEMPT_LIMIT, it will + clear attempts counter + and continue trying to + send the frame as + specified by + GIVE_UP_LIM. when 0, + TX MAC will execute + standard CSMA/CD prot. */ +#define MAC_TX_CFG_NEVER_GIVE_UP_LIM 0x0020 /* when set, TX MAC will + continue to try to xmit + until successful. when + 0, TX MAC will continue + to try xmitting until + successful or backoff + algorithm reaches + ATTEMPT_LIMIT*16 */ +#define MAC_TX_CFG_NO_BACKOFF 0x0040 /* modify CSMA/CD to disable + backoff algorithm. TX + MAC will not back off + after a xmission attempt + that resulted in a + collision. */ +#define MAC_TX_CFG_SLOW_DOWN 0x0080 /* modify CSMA/CD so that + deferral process is reset + in response to carrier + sense during the entire + duration of IPG. TX MAC + will only commit to frame + xmission after frame + xmission has actually + begun. */ +#define MAC_TX_CFG_NO_FCS 0x0100 /* TX MAC will not generate + CRC for all xmitted + packets. when clear, CRC + generation is dependent + upon NO_CRC bit in the + xmit control word from + TX DMA */ +#define MAC_TX_CFG_CARRIER_EXTEND 0x0200 /* enables xmit part of the + carrier extension + feature. this allows for + longer collision domains + by extending the carrier + and collision window + from the end of FCS until + the end of the slot time + if necessary. Required + for half-duplex at 1Gbps, + clear otherwise. */ + +/* when CRC is not stripped, reassembly packets will not contain the CRC. + * these will be stripped by HRP because it reassembles layer 4 data, and the + * CRC is layer 2. however, non-reassembly packets will still contain the CRC + * when passed to the host. to ensure proper operation, need to wait 3.2ms + * after clearing RX_CFG_EN before writing to any other RX MAC registers + * or other MAC parameters. alternatively, poll RX_CFG_EN until it clears + * to 0. similary, HASH_FILTER_EN and ADDR_FILTER_EN have the same + * restrictions as CFG_EN. + */ +#define REG_MAC_RX_CFG 0x6034 /* RX MAC config reg */ +#define MAC_RX_CFG_EN 0x0001 /* enable RX MAC */ +#define MAC_RX_CFG_STRIP_PAD 0x0002 /* always program to 0. + feature not supported */ +#define MAC_RX_CFG_STRIP_FCS 0x0004 /* RX MAC will strip the + last 4 bytes of a + received frame. */ +#define MAC_RX_CFG_PROMISC_EN 0x0008 /* promiscuous mode */ +#define MAC_RX_CFG_PROMISC_GROUP_EN 0x0010 /* accept all valid + multicast frames (group + bit in DA field set) */ +#define MAC_RX_CFG_HASH_FILTER_EN 0x0020 /* use hash table to filter + multicast addresses */ +#define MAC_RX_CFG_ADDR_FILTER_EN 0x0040 /* cause RX MAC to use + address filtering regs + to filter both unicast + and multicast + addresses */ +#define MAC_RX_CFG_DISABLE_DISCARD 0x0080 /* pass errored frames to + RX DMA by setting BAD + bit but not Abort bit + in the status. CRC, + framing, and length errs + will not increment + error counters. frames + which don't match dest + addr will be passed up + w/ BAD bit set. */ +#define MAC_RX_CFG_CARRIER_EXTEND 0x0100 /* enable reception of + packet bursts generated + by carrier extension + with packet bursting + senders. only applies + to half-duplex 1Gbps */ + +/* DEFAULT: 0x0 */ +#define REG_MAC_CTRL_CFG 0x6038 /* MAC control config reg */ +#define MAC_CTRL_CFG_SEND_PAUSE_EN 0x0001 /* respond to requests for + sending pause flow ctrl + frames */ +#define MAC_CTRL_CFG_RECV_PAUSE_EN 0x0002 /* respond to received + pause flow ctrl frames */ +#define MAC_CTRL_CFG_PASS_CTRL 0x0004 /* pass valid MAC ctrl + packets to RX DMA */ + +/* to ensure proper operation, a global initialization sequence should be + * performed when a loopback config is entered or exited. if programmed after + * a hw or global sw reset, RX/TX MAC software reset and initialization + * should be done to ensure stable clocking. + * DEFAULT: 0x0 + */ +#define REG_MAC_XIF_CFG 0x603C /* XIF config reg */ +#define MAC_XIF_TX_MII_OUTPUT_EN 0x0001 /* enable output drivers + on MII xmit bus */ +#define MAC_XIF_MII_INT_LOOPBACK 0x0002 /* loopback GMII xmit data + path to GMII recv data + path. phy mode register + clock selection must be + set to GMII mode and + GMII_MODE should be set + to 1. in loopback mode, + REFCLK will drive the + entire mac core. 0 for + normal operation. */ +#define MAC_XIF_DISABLE_ECHO 0x0004 /* disables receive data + path during packet + xmission. clear to 0 + in any full duplex mode, + in any loopback mode, + or in half-duplex SERDES + or SLINK modes. set when + in half-duplex when + using external phy. */ +#define MAC_XIF_GMII_MODE 0x0008 /* MAC operates with GMII + clocks and datapath */ +#define MAC_XIF_MII_BUFFER_OUTPUT_EN 0x0010 /* MII_BUF_EN pin. enable + external tristate buffer + on the MII receive + bus. */ +#define MAC_XIF_LINK_LED 0x0020 /* LINKLED# active (low) */ +#define MAC_XIF_FDPLX_LED 0x0040 /* FDPLXLED# active (low) */ + +#define REG_MAC_IPG0 0x6040 /* inter-packet gap0 reg. + recommended: 0x00 */ +#define REG_MAC_IPG1 0x6044 /* inter-packet gap1 reg + recommended: 0x08 */ +#define REG_MAC_IPG2 0x6048 /* inter-packet gap2 reg + recommended: 0x04 */ +#define REG_MAC_SLOT_TIME 0x604C /* slot time reg + recommended: 0x40 */ +#define REG_MAC_FRAMESIZE_MIN 0x6050 /* min frame size reg + recommended: 0x40 */ + +/* FRAMESIZE_MAX holds both the max frame size as well as the max burst size. + * recommended value: 0x2000.05EE + */ +#define REG_MAC_FRAMESIZE_MAX 0x6054 /* max frame size reg */ +#define MAC_FRAMESIZE_MAX_BURST_MASK 0x3FFF0000 /* max burst size */ +#define MAC_FRAMESIZE_MAX_BURST_SHIFT 16 +#define MAC_FRAMESIZE_MAX_FRAME_MASK 0x00007FFF /* max frame size */ +#define MAC_FRAMESIZE_MAX_FRAME_SHIFT 0 +#define REG_MAC_PA_SIZE 0x6058 /* PA size reg. number of + preamble bytes that the + TX MAC will xmit at the + beginning of each frame + value should be 2 or + greater. recommended + value: 0x07 */ +#define REG_MAC_JAM_SIZE 0x605C /* jam size reg. duration + of jam in units of media + byte time. recommended + value: 0x04 */ +#define REG_MAC_ATTEMPT_LIMIT 0x6060 /* attempt limit reg. # + of attempts TX MAC will + make to xmit a frame + before it resets its + attempts counter. after + the limit has been + reached, TX MAC may or + may not drop the frame + dependent upon value + in TX_MAC_CFG. + recommended + value: 0x10 */ +#define REG_MAC_CTRL_TYPE 0x6064 /* MAC control type reg. + type field of a MAC + ctrl frame. recommended + value: 0x8808 */ + +/* mac address registers: 0 - 44, 0x6080 - 0x6130, 4 8-bit bytes. + * register contains comparison + * 0 16 MSB of primary MAC addr [47:32] of DA field + * 1 16 middle bits "" [31:16] of DA field + * 2 16 LSB "" [15:0] of DA field + * 3*x 16MSB of alt MAC addr 1-15 [47:32] of DA field + * 4*x 16 middle bits "" [31:16] + * 5*x 16 LSB "" [15:0] + * 42 16 MSB of MAC CTRL addr [47:32] of DA. + * 43 16 middle bits "" [31:16] + * 44 16 LSB "" [15:0] + * MAC CTRL addr must be the reserved multicast addr for MAC CTRL frames. + * if there is a match, MAC will set the bit for alternative address + * filter pass [15] + + * here is the map of registers given MAC address notation: a:b:c:d:e:f + * ab cd ef + * primary addr reg 2 reg 1 reg 0 + * alt addr 1 reg 5 reg 4 reg 3 + * alt addr x reg 5*x reg 4*x reg 3*x + * ctrl addr reg 44 reg 43 reg 42 + */ +#define REG_MAC_ADDR0 0x6080 /* MAC address 0 reg */ +#define REG_MAC_ADDRN(x) (REG_MAC_ADDR0 + (x)*4) +#define REG_MAC_ADDR_FILTER0 0x614C /* address filter 0 reg + [47:32] */ +#define REG_MAC_ADDR_FILTER1 0x6150 /* address filter 1 reg + [31:16] */ +#define REG_MAC_ADDR_FILTER2 0x6154 /* address filter 2 reg + [15:0] */ +#define REG_MAC_ADDR_FILTER2_1_MASK 0x6158 /* address filter 2 and 1 + mask reg. 8-bit reg + contains nibble mask for + reg 2 and 1. */ +#define REG_MAC_ADDR_FILTER0_MASK 0x615C /* address filter 0 mask + reg */ + +/* hash table registers: 0 - 15, 0x6160 - 0x619C, 4 8-bit bytes + * 16-bit registers contain bits of the hash table. + * reg x -> [16*(15 - x) + 15 : 16*(15 - x)]. + * e.g., 15 -> [15:0], 0 -> [255:240] + */ +#define REG_MAC_HASH_TABLE0 0x6160 /* hash table 0 reg */ +#define REG_MAC_HASH_TABLEN(x) (REG_MAC_HASH_TABLE0 + (x)*4) + +/* statistics registers. these registers generate an interrupt on + * overflow. recommended initialization: 0x0000. most are 16-bits except + * for PEAK_ATTEMPTS register which is 8 bits. + */ +#define REG_MAC_COLL_NORMAL 0x61A0 /* normal collision + counter. */ +#define REG_MAC_COLL_FIRST 0x61A4 /* first attempt + successful collision + counter */ +#define REG_MAC_COLL_EXCESS 0x61A8 /* excessive collision + counter */ +#define REG_MAC_COLL_LATE 0x61AC /* late collision counter */ +#define REG_MAC_TIMER_DEFER 0x61B0 /* defer timer. time base + is the media byte + clock/256 */ +#define REG_MAC_ATTEMPTS_PEAK 0x61B4 /* peak attempts reg */ +#define REG_MAC_RECV_FRAME 0x61B8 /* receive frame counter */ +#define REG_MAC_LEN_ERR 0x61BC /* length error counter */ +#define REG_MAC_ALIGN_ERR 0x61C0 /* alignment error counter */ +#define REG_MAC_FCS_ERR 0x61C4 /* FCS error counter */ +#define REG_MAC_RX_CODE_ERR 0x61C8 /* RX code violation + error counter */ + +/* misc registers */ +#define REG_MAC_RANDOM_SEED 0x61CC /* random number seed reg. + 10-bit register used as a + seed for the random number + generator for the CSMA/CD + backoff algorithm. only + programmed after power-on + reset and should be a + random value which has a + high likelihood of being + unique for each MAC + attached to a network + segment (e.g., 10 LSB of + MAC address) */ + +/* ASUN: there's a PAUSE_TIMER (ro) described, but it's not in the address + * map + */ + +/* 27-bit register has the current state for key state machines in the MAC */ +#define REG_MAC_STATE_MACHINE 0x61D0 /* (ro) state machine reg */ +#define MAC_SM_RLM_MASK 0x07800000 +#define MAC_SM_RLM_SHIFT 23 +#define MAC_SM_RX_FC_MASK 0x00700000 +#define MAC_SM_RX_FC_SHIFT 20 +#define MAC_SM_TLM_MASK 0x000F0000 +#define MAC_SM_TLM_SHIFT 16 +#define MAC_SM_ENCAP_SM_MASK 0x0000F000 +#define MAC_SM_ENCAP_SM_SHIFT 12 +#define MAC_SM_TX_REQ_MASK 0x00000C00 +#define MAC_SM_TX_REQ_SHIFT 10 +#define MAC_SM_TX_FC_MASK 0x000003C0 +#define MAC_SM_TX_FC_SHIFT 6 +#define MAC_SM_FIFO_WRITE_SEL_MASK 0x00000038 +#define MAC_SM_FIFO_WRITE_SEL_SHIFT 3 +#define MAC_SM_TX_FIFO_EMPTY_MASK 0x00000007 +#define MAC_SM_TX_FIFO_EMPTY_SHIFT 0 + +/** MIF registers. the MIF can be programmed in either bit-bang or + * frame mode. + **/ +#define REG_MIF_BIT_BANG_CLOCK 0x6200 /* MIF bit-bang clock. + 1 -> 0 will generate a + rising edge. 0 -> 1 will + generate a falling edge. */ +#define REG_MIF_BIT_BANG_DATA 0x6204 /* MIF bit-bang data. 1-bit + register generates data */ +#define REG_MIF_BIT_BANG_OUTPUT_EN 0x6208 /* MIF bit-bang output + enable. enable when + xmitting data from MIF to + transceiver. */ + +/* 32-bit register serves as an instruction register when the MIF is + * programmed in frame mode. load this register w/ a valid instruction + * (as per IEEE 802.3u MII spec). poll this register to check for instruction + * execution completion. during a read operation, this register will also + * contain the 16-bit data returned by the tranceiver. unless specified + * otherwise, fields are considered "don't care" when polling for + * completion. + */ +#define REG_MIF_FRAME 0x620C /* MIF frame/output reg */ +#define MIF_FRAME_START_MASK 0xC0000000 /* start of frame. + load w/ 01 when + issuing an instr */ +#define MIF_FRAME_ST 0x40000000 /* STart of frame */ +#define MIF_FRAME_OPCODE_MASK 0x30000000 /* opcode. 01 for a + write. 10 for a + read */ +#define MIF_FRAME_OP_READ 0x20000000 /* read OPcode */ +#define MIF_FRAME_OP_WRITE 0x10000000 /* write OPcode */ +#define MIF_FRAME_PHY_ADDR_MASK 0x0F800000 /* phy address. when + issuing an instr, + this field should be + loaded w/ the XCVR + addr */ +#define MIF_FRAME_PHY_ADDR_SHIFT 23 +#define MIF_FRAME_REG_ADDR_MASK 0x007C0000 /* register address. + when issuing an instr, + addr of register + to be read/written */ +#define MIF_FRAME_REG_ADDR_SHIFT 18 +#define MIF_FRAME_TURN_AROUND_MSB 0x00020000 /* turn around, MSB. + when issuing an instr, + set this bit to 1 */ +#define MIF_FRAME_TURN_AROUND_LSB 0x00010000 /* turn around, LSB. + when issuing an instr, + set this bit to 0. + when polling for + completion, 1 means + that instr execution + has been completed */ +#define MIF_FRAME_DATA_MASK 0x0000FFFF /* instruction payload + load with 16-bit data + to be written in + transceiver reg for a + write. doesn't matter + in a read. when + polling for + completion, field is + "don't care" for write + and 16-bit data + returned by the + transceiver for a + read (if valid bit + is set) */ +#define REG_MIF_CFG 0x6210 /* MIF config reg */ +#define MIF_CFG_PHY_SELECT 0x0001 /* 1 -> select MDIO_1 + 0 -> select MDIO_0 */ +#define MIF_CFG_POLL_EN 0x0002 /* enable polling + mechanism. if set, + BB_MODE should be 0 */ +#define MIF_CFG_BB_MODE 0x0004 /* 1 -> bit-bang mode + 0 -> frame mode */ +#define MIF_CFG_POLL_REG_MASK 0x00F8 /* register address to be + used by polling mode. + only meaningful if POLL_EN + is set to 1 */ +#define MIF_CFG_POLL_REG_SHIFT 3 +#define MIF_CFG_MDIO_0 0x0100 /* (ro) dual purpose. + when MDIO_0 is idle, + 1 -> tranceiver is + connected to MDIO_0. + when MIF is communicating + w/ MDIO_0 in bit-bang + mode, this bit indicates + the incoming bit stream + during a read op */ +#define MIF_CFG_MDIO_1 0x0200 /* (ro) dual purpose. + when MDIO_1 is idle, + 1 -> transceiver is + connected to MDIO_1. + when MIF is communicating + w/ MDIO_1 in bit-bang + mode, this bit indicates + the incoming bit stream + during a read op */ +#define MIF_CFG_POLL_PHY_MASK 0x7C00 /* tranceiver address to + be polled */ +#define MIF_CFG_POLL_PHY_SHIFT 10 + +/* 16-bit register used to determine which bits in the POLL_STATUS portion of + * the MIF_STATUS register will cause an interrupt. if a mask bit is 0, + * corresponding bit of the POLL_STATUS will generate a MIF interrupt when + * set. DEFAULT: 0xFFFF + */ +#define REG_MIF_MASK 0x6214 /* MIF mask reg */ + +/* 32-bit register used when in poll mode. auto-cleared after being read */ +#define REG_MIF_STATUS 0x6218 /* MIF status reg */ +#define MIF_STATUS_POLL_DATA_MASK 0xFFFF0000 /* poll data contains + the "latest image" + update of the XCVR + reg being read */ +#define MIF_STATUS_POLL_DATA_SHIFT 16 +#define MIF_STATUS_POLL_STATUS_MASK 0x0000FFFF /* poll status indicates + which bits in the + POLL_DATA field have + changed since the + MIF_STATUS reg was + last read */ +#define MIF_STATUS_POLL_STATUS_SHIFT 0 + +/* 7-bit register has current state for all state machines in the MIF */ +#define REG_MIF_STATE_MACHINE 0x621C /* MIF state machine reg */ +#define MIF_SM_CONTROL_MASK 0x07 /* control state machine + state */ +#define MIF_SM_EXECUTION_MASK 0x60 /* execution state machine + state */ + +/** PCS/Serialink. the following registers are equivalent to the standard + * MII management registers except that they're directly mapped in + * Cassini's register space. + **/ + +/* the auto-negotiation enable bit should be programmed the same at + * the link partner as in the local device to enable auto-negotiation to + * complete. when that bit is reprogrammed, auto-neg/manual config is + * restarted automatically. + * DEFAULT: 0x1040 + */ +#define REG_PCS_MII_CTRL 0x9000 /* PCS MII control reg */ +#define PCS_MII_CTRL_1000_SEL 0x0040 /* reads 1. ignored on + writes */ +#define PCS_MII_CTRL_COLLISION_TEST 0x0080 /* COL signal at the PCS + to MAC interface is + activated regardless + of activity */ +#define PCS_MII_CTRL_DUPLEX 0x0100 /* forced 0x0. PCS + behaviour same for + half and full dplx */ +#define PCS_MII_RESTART_AUTONEG 0x0200 /* self clearing. + restart auto- + negotiation */ +#define PCS_MII_ISOLATE 0x0400 /* read as 0. ignored + on writes */ +#define PCS_MII_POWER_DOWN 0x0800 /* read as 0. ignored + on writes */ +#define PCS_MII_AUTONEG_EN 0x1000 /* default 1. PCS goes + through automatic + link config before it + can be used. when 0, + link can be used + w/out any link config + phase */ +#define PCS_MII_10_100_SEL 0x2000 /* read as 0. ignored on + writes */ +#define PCS_MII_RESET 0x8000 /* reset PCS. self-clears + when done */ + +/* DEFAULT: 0x0108 */ +#define REG_PCS_MII_STATUS 0x9004 /* PCS MII status reg */ +#define PCS_MII_STATUS_EXTEND_CAP 0x0001 /* reads 0 */ +#define PCS_MII_STATUS_JABBER_DETECT 0x0002 /* reads 0 */ +#define PCS_MII_STATUS_LINK_STATUS 0x0004 /* 1 -> link up. + 0 -> link down. 0 is + latched so that 0 is + kept until read. read + 2x to determine if the + link has gone up again */ +#define PCS_MII_STATUS_AUTONEG_ABLE 0x0008 /* reads 1 (able to perform + auto-neg) */ +#define PCS_MII_STATUS_REMOTE_FAULT 0x0010 /* 1 -> remote fault detected + from received link code + word. only valid after + auto-neg completed */ +#define PCS_MII_STATUS_AUTONEG_COMP 0x0020 /* 1 -> auto-negotiation + completed + 0 -> auto-negotiation not + completed */ +#define PCS_MII_STATUS_EXTEND_STATUS 0x0100 /* reads as 1. used as an + indication that this is + a 1000 Base-X PHY. writes + to it are ignored */ + +/* used during auto-negotiation. + * DEFAULT: 0x00E0 + */ +#define REG_PCS_MII_ADVERT 0x9008 /* PCS MII advertisement + reg */ +#define PCS_MII_ADVERT_FD 0x0020 /* advertise full duplex + 1000 Base-X */ +#define PCS_MII_ADVERT_HD 0x0040 /* advertise half-duplex + 1000 Base-X */ +#define PCS_MII_ADVERT_SYM_PAUSE 0x0080 /* advertise PAUSE + symmetric capability */ +#define PCS_MII_ADVERT_ASYM_PAUSE 0x0100 /* advertises PAUSE + asymmetric capability */ +#define PCS_MII_ADVERT_RF_MASK 0x3000 /* remote fault. write bit13 + to optionally indicate to + link partner that chip is + going off-line. bit12 will + get set when signal + detect == FAIL and will + remain set until + successful negotiation */ +#define PCS_MII_ADVERT_ACK 0x4000 /* (ro) */ +#define PCS_MII_ADVERT_NEXT_PAGE 0x8000 /* (ro) forced 0x0 */ + +/* contents updated as a result of autonegotiation. layout and definitions + * identical to PCS_MII_ADVERT + */ +#define REG_PCS_MII_LPA 0x900C /* PCS MII link partner + ability reg */ +#define PCS_MII_LPA_FD PCS_MII_ADVERT_FD +#define PCS_MII_LPA_HD PCS_MII_ADVERT_HD +#define PCS_MII_LPA_SYM_PAUSE PCS_MII_ADVERT_SYM_PAUSE +#define PCS_MII_LPA_ASYM_PAUSE PCS_MII_ADVERT_ASYM_PAUSE +#define PCS_MII_LPA_RF_MASK PCS_MII_ADVERT_RF_MASK +#define PCS_MII_LPA_ACK PCS_MII_ADVERT_ACK +#define PCS_MII_LPA_NEXT_PAGE PCS_MII_ADVERT_NEXT_PAGE + +/* DEFAULT: 0x0 */ +#define REG_PCS_CFG 0x9010 /* PCS config reg */ +#define PCS_CFG_EN 0x01 /* enable PCS. must be + 0 when modifying + PCS_MII_ADVERT */ +#define PCS_CFG_SD_OVERRIDE 0x02 /* sets signal detect to + OK. bit is + non-resettable */ +#define PCS_CFG_SD_ACTIVE_LOW 0x04 /* changes interpretation + of optical signal to make + signal detect okay when + signal is low */ +#define PCS_CFG_JITTER_STUDY_MASK 0x18 /* used to make jitter + measurements. a single + code group is xmitted + regularly. + 0x0 = normal operation + 0x1 = high freq test + pattern, D21.5 + 0x2 = low freq test + pattern, K28.7 + 0x3 = reserved */ +#define PCS_CFG_10MS_TIMER_OVERRIDE 0x20 /* shortens 10-20ms auto- + negotiation timer to + a few cycles for test + purposes */ + +/* used for diagnostic purposes. bits 20-22 autoclear on read */ +#define REG_PCS_STATE_MACHINE 0x9014 /* (ro) PCS state machine + and diagnostic reg */ +#define PCS_SM_TX_STATE_MASK 0x0000000F /* 0 and 1 indicate + xmission of idle. + otherwise, xmission of + a packet */ +#define PCS_SM_RX_STATE_MASK 0x000000F0 /* 0 indicates reception + of idle. otherwise, + reception of packet */ +#define PCS_SM_WORD_SYNC_STATE_MASK 0x00000700 /* 0 indicates loss of + sync */ +#define PCS_SM_SEQ_DETECT_STATE_MASK 0x00001800 /* cycling through 0-3 + indicates reception of + Config codes. cycling + through 0-1 indicates + reception of idles */ +#define PCS_SM_LINK_STATE_MASK 0x0001E000 +#define SM_LINK_STATE_UP 0x00016000 /* link state is up */ + +#define PCS_SM_LOSS_LINK_C 0x00100000 /* loss of link due to + recept of Config + codes */ +#define PCS_SM_LOSS_LINK_SYNC 0x00200000 /* loss of link due to + loss of sync */ +#define PCS_SM_LOSS_SIGNAL_DETECT 0x00400000 /* signal detect goes + from OK to FAIL. bit29 + will also be set if + this is set */ +#define PCS_SM_NO_LINK_BREAKLINK 0x01000000 /* link not up due to + receipt of breaklink + C codes from partner. + C codes w/ 0 content + received triggering + start/restart of + autonegotiation. + should be sent for + no longer than 20ms */ +#define PCS_SM_NO_LINK_SERDES 0x02000000 /* serdes being + initialized. see serdes + state reg */ +#define PCS_SM_NO_LINK_C 0x04000000 /* C codes not stable or + not received */ +#define PCS_SM_NO_LINK_SYNC 0x08000000 /* word sync not + achieved */ +#define PCS_SM_NO_LINK_WAIT_C 0x10000000 /* waiting for C codes + w/ ack bit set */ +#define PCS_SM_NO_LINK_NO_IDLE 0x20000000 /* link partner continues + to send C codes + instead of idle + symbols or pkt data */ + +/* this register indicates interrupt changes in specific PCS MII status bits. + * PCS_INT may be masked at the ISR level. only a single bit is implemented + * for link status change. + */ +#define REG_PCS_INTR_STATUS 0x9018 /* PCS interrupt status */ +#define PCS_INTR_STATUS_LINK_CHANGE 0x04 /* link status has changed + since last read */ + +/* control which network interface is used. no more than one bit should + * be set. + * DEFAULT: none + */ +#define REG_PCS_DATAPATH_MODE 0x9050 /* datapath mode reg */ +#define PCS_DATAPATH_MODE_MII 0x00 /* PCS is not used and + MII/GMII is selected. + selection between MII and + GMII is controlled by + XIF_CFG */ +#define PCS_DATAPATH_MODE_SERDES 0x02 /* PCS is used via the + 10-bit interface */ + +/* input to serdes chip or serialink block */ +#define REG_PCS_SERDES_CTRL 0x9054 /* serdes control reg */ +#define PCS_SERDES_CTRL_LOOPBACK 0x01 /* enable loopback on + serdes interface */ +#define PCS_SERDES_CTRL_SYNCD_EN 0x02 /* enable sync carrier + detection. should be + 0x0 for normal + operation */ +#define PCS_SERDES_CTRL_LOCKREF 0x04 /* frequency-lock RBC[0:1] + to REFCLK when set. + when clear, receiver + clock locks to incoming + serial data */ + +/* multiplex test outputs into the PROM address (PA_3 through PA_0) pins. + * should be 0x0 for normal operations. + * 0b000 normal operation, PROM address[3:0] selected + * 0b001 rxdma req, rxdma ack, rxdma ready, rxdma read + * 0b010 rxmac req, rx ack, rx tag, rx clk shared + * 0b011 txmac req, tx ack, tx tag, tx retry req + * 0b100 tx tp3, tx tp2, tx tp1, tx tp0 + * 0b101 R period RX, R period TX, R period HP, R period BIM + * DEFAULT: 0x0 + */ +#define REG_PCS_SHARED_OUTPUT_SEL 0x9058 /* shared output select */ +#define PCS_SOS_PROM_ADDR_MASK 0x0007 + +/* used for diagnostics. this register indicates progress of the SERDES + * boot up. + * 0b00 undergoing reset + * 0b01 waiting 500us while lockrefn is asserted + * 0b10 waiting for comma detect + * 0b11 receive data is synchronized + * DEFAULT: 0x0 + */ +#define REG_PCS_SERDES_STATE 0x905C /* (ro) serdes state */ +#define PCS_SERDES_STATE_MASK 0x03 + +/* used for diagnostics. indicates number of packets transmitted or received. + * counters rollover w/out generating an interrupt. + * DEFAULT: 0x0 + */ +#define REG_PCS_PACKET_COUNT 0x9060 /* (ro) PCS packet counter */ +#define PCS_PACKET_COUNT_TX 0x000007FF /* pkts xmitted by PCS */ +#define PCS_PACKET_COUNT_RX 0x07FF0000 /* pkts recvd by PCS + whether they + encountered an error + or not */ + +/** LocalBus Devices. the following provides run-time access to the + * Cassini's PROM + ***/ +#define REG_EXPANSION_ROM_RUN_START 0x100000 /* expansion rom run time + access */ +#define REG_EXPANSION_ROM_RUN_END 0x17FFFF + +#define REG_SECOND_LOCALBUS_START 0x180000 /* secondary local bus + device */ +#define REG_SECOND_LOCALBUS_END 0x1FFFFF + +/* entropy device */ +#define REG_ENTROPY_START REG_SECOND_LOCALBUS_START +#define REG_ENTROPY_DATA (REG_ENTROPY_START + 0x00) +#define REG_ENTROPY_STATUS (REG_ENTROPY_START + 0x04) +#define ENTROPY_STATUS_DRDY 0x01 +#define ENTROPY_STATUS_BUSY 0x02 +#define ENTROPY_STATUS_CIPHER 0x04 +#define ENTROPY_STATUS_BYPASS_MASK 0x18 +#define REG_ENTROPY_MODE (REG_ENTROPY_START + 0x05) +#define ENTROPY_MODE_KEY_MASK 0x07 +#define ENTROPY_MODE_ENCRYPT 0x40 +#define REG_ENTROPY_RAND_REG (REG_ENTROPY_START + 0x06) +#define REG_ENTROPY_RESET (REG_ENTROPY_START + 0x07) +#define ENTROPY_RESET_DES_IO 0x01 +#define ENTROPY_RESET_STC_MODE 0x02 +#define ENTROPY_RESET_KEY_CACHE 0x04 +#define ENTROPY_RESET_IV 0x08 +#define REG_ENTROPY_IV (REG_ENTROPY_START + 0x08) +#define REG_ENTROPY_KEY0 (REG_ENTROPY_START + 0x10) +#define REG_ENTROPY_KEYN(x) (REG_ENTROPY_KEY0 + 4*(x)) + +/* phys of interest w/ their special mii registers */ +#define PHY_LUCENT_B0 0x00437421 +#define LUCENT_MII_REG 0x1F + +#define PHY_NS_DP83065 0x20005c78 +#define DP83065_MII_MEM 0x16 +#define DP83065_MII_REGD 0x1D +#define DP83065_MII_REGE 0x1E + +#define PHY_BROADCOM_5411 0x00206071 +#define PHY_BROADCOM_B0 0x00206050 +#define BROADCOM_MII_REG4 0x14 +#define BROADCOM_MII_REG5 0x15 +#define BROADCOM_MII_REG7 0x17 +#define BROADCOM_MII_REG8 0x18 + +#define CAS_MII_ANNPTR 0x07 +#define CAS_MII_ANNPRR 0x08 +#define CAS_MII_1000_CTRL 0x09 +#define CAS_MII_1000_STATUS 0x0A +#define CAS_MII_1000_EXTEND 0x0F + +#define CAS_BMSR_1000_EXTEND 0x0100 /* supports 1000Base-T extended status */ +/* + * if autoneg is disabled, here's the table: + * BMCR_SPEED100 = 100Mbps + * BMCR_SPEED1000 = 1000Mbps + * ~(BMCR_SPEED100 | BMCR_SPEED1000) = 10Mbps + */ +#define CAS_BMCR_SPEED1000 0x0040 /* Select 1000Mbps */ + +#define CAS_ADVERTISE_1000HALF 0x0100 +#define CAS_ADVERTISE_1000FULL 0x0200 +#define CAS_ADVERTISE_PAUSE 0x0400 +#define CAS_ADVERTISE_ASYM_PAUSE 0x0800 + +/* regular lpa register */ +#define CAS_LPA_PAUSE CAS_ADVERTISE_PAUSE +#define CAS_LPA_ASYM_PAUSE CAS_ADVERTISE_ASYM_PAUSE + +/* 1000_STATUS register */ +#define CAS_LPA_1000HALF 0x0400 +#define CAS_LPA_1000FULL 0x0800 + +#define CAS_EXTEND_1000XFULL 0x8000 +#define CAS_EXTEND_1000XHALF 0x4000 +#define CAS_EXTEND_1000TFULL 0x2000 +#define CAS_EXTEND_1000THALF 0x1000 + +/* cassini header parser firmware */ +typedef struct cas_hp_inst { + const char *note; + + u16 mask, val; + + u8 op; + u8 soff, snext; /* if match succeeds, new offset and match */ + u8 foff, fnext; /* if match fails, new offset and match */ + /* output info */ + u8 outop; /* output opcode */ + + u16 outarg; /* output argument */ + u8 outenab; /* output enable: 0 = not, 1 = if match + 2 = if !match, 3 = always */ + u8 outshift; /* barrel shift right, 4 bits */ + u16 outmask; +} cas_hp_inst_t; + +/* comparison */ +#define OP_EQ 0 /* packet == value */ +#define OP_LT 1 /* packet < value */ +#define OP_GT 2 /* packet > value */ +#define OP_NP 3 /* new packet */ + +/* output opcodes */ +#define CL_REG 0 +#define LD_FID 1 +#define LD_SEQ 2 +#define LD_CTL 3 +#define LD_SAP 4 +#define LD_R1 5 +#define LD_L3 6 +#define LD_SUM 7 +#define LD_HDR 8 +#define IM_FID 9 +#define IM_SEQ 10 +#define IM_SAP 11 +#define IM_R1 12 +#define IM_CTL 13 +#define LD_LEN 14 +#define ST_FLG 15 + +/* match setp #s for IP4TCP4 */ +#define S1_PCKT 0 +#define S1_VLAN 1 +#define S1_CFI 2 +#define S1_8023 3 +#define S1_LLC 4 +#define S1_LLCc 5 +#define S1_IPV4 6 +#define S1_IPV4c 7 +#define S1_IPV4F 8 +#define S1_TCP44 9 +#define S1_IPV6 10 +#define S1_IPV6L 11 +#define S1_IPV6c 12 +#define S1_TCP64 13 +#define S1_TCPSQ 14 +#define S1_TCPFG 15 +#define S1_TCPHL 16 +#define S1_TCPHc 17 +#define S1_CLNP 18 +#define S1_CLNP2 19 +#define S1_DROP 20 +#define S2_HTTP 21 +#define S1_ESP4 22 +#define S1_AH4 23 +#define S1_ESP6 24 +#define S1_AH6 25 + +#define CAS_PROG_IP46TCP4_PREAMBLE \ +{ "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, S1_PCKT, \ + CL_REG, 0x3ff, 1, 0x0, 0x0000}, \ +{ "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, \ + IM_CTL, 0x00a, 3, 0x0, 0xffff}, \ +{ "CFI?", 0x1000, 0x1000, OP_EQ, 0, S1_DROP, 1, S1_8023, \ + CL_REG, 0x000, 0, 0x0, 0x0000}, \ +{ "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, \ + CL_REG, 0x000, 0, 0x0, 0x0000}, \ +{ "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP, \ + CL_REG, 0x000, 0, 0x0, 0x0000}, \ +{ "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP, \ + CL_REG, 0x000, 0, 0x0, 0x0000}, \ +{ "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, \ + LD_SAP, 0x100, 3, 0x0, 0xffff}, \ +{ "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP, \ + LD_SUM, 0x00a, 1, 0x0, 0x0000}, \ +{ "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP, \ + LD_LEN, 0x03e, 1, 0x0, 0xffff}, \ +{ "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_CLNP, \ + LD_FID, 0x182, 1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ \ +{ "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP, \ + LD_SUM, 0x015, 1, 0x0, 0x0000}, \ +{ "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP, \ + IM_R1, 0x128, 1, 0x0, 0xffff}, \ +{ "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP, \ + LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ \ +{ "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP, \ + LD_LEN, 0x03f, 1, 0x0, 0xffff} + +#ifdef USE_HP_IP46TCP4 +static cas_hp_inst_t cas_prog_ip46tcp4tab[] = { + CAS_PROG_IP46TCP4_PREAMBLE, + { "TCP seq", /* DADDR should point to dest port */ + 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ, + 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ + { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0, + S1_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */ + { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, + S1_TCPHc, LD_R1, 0x205, 3, 0xB, 0xf000}, + { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, + S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff}, + { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2, + IM_CTL, 0x001, 3, 0x0, 0x0001}, + { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x000, 0, 0x0, 0x0000}, + { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x080, 3, 0x0, 0xffff}, + { NULL }, +}; +#ifdef HP_IP46TCP4_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_ip46tcp4tab +#endif +#endif + +/* + * Alternate table load which excludes HTTP server traffic from reassembly. + * It is substantially similar to the basic table, with one extra state + * and a few extra compares. */ +#ifdef USE_HP_IP46TCP4NOHTTP +static cas_hp_inst_t cas_prog_ip46tcp4nohttptab[] = { + CAS_PROG_IP46TCP4_PREAMBLE, + { "TCP seq", /* DADDR should point to dest port */ + 0xFFFF, 0x0080, OP_EQ, 0, S2_HTTP, 0, S1_TCPFG, LD_SEQ, + 0x081, 3, 0x0, 0xffff} , /* Load TCP seq # */ + { "TCP control flags", 0xFFFF, 0x8080, OP_EQ, 0, S2_HTTP, 0, + S1_TCPHL, ST_FLG, 0x145, 2, 0x0, 0x002f, }, /* Load TCP flags */ + { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc, + LD_R1, 0x205, 3, 0xB, 0xf000}, + { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + LD_HDR, 0x0ff, 3, 0x0, 0xffff}, + { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2, + IM_CTL, 0x001, 3, 0x0, 0x0001}, + { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + CL_REG, 0x002, 3, 0x0, 0x0000}, + { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x080, 3, 0x0, 0xffff}, + { "No HTTP", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x044, 3, 0x0, 0xffff}, + { NULL }, +}; +#ifdef HP_IP46TCP4NOHTTP_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_ip46tcp4nohttptab +#endif +#endif + +/* match step #s for IP4FRAG */ +#define S3_IPV6c 11 +#define S3_TCP64 12 +#define S3_TCPSQ 13 +#define S3_TCPFG 14 +#define S3_TCPHL 15 +#define S3_TCPHc 16 +#define S3_FRAG 17 +#define S3_FOFF 18 +#define S3_CLNP 19 + +#ifdef USE_HP_IP4FRAG +static cas_hp_inst_t cas_prog_ip4fragtab[] = { + { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, S1_PCKT, + CL_REG, 0x3ff, 1, 0x0, 0x0000}, + { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, + IM_CTL, 0x00a, 3, 0x0, 0xffff}, + { "CFI?", 0x1000, 0x1000, OP_EQ, 0, S3_CLNP, 1, S1_8023, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S3_CLNP, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "LLCc?",0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S3_CLNP, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, + LD_SAP, 0x100, 3, 0x0, 0xffff}, + { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S3_CLNP, + LD_SUM, 0x00a, 1, 0x0, 0x0000}, + { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S3_FRAG, + LD_LEN, 0x03e, 3, 0x0, 0xffff}, + { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S3_TCPSQ, 0, S3_CLNP, + LD_FID, 0x182, 3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ + { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S3_IPV6c, 0, S3_CLNP, + LD_SUM, 0x015, 1, 0x0, 0x0000}, + { "IPV6 cont?", 0xf000, 0x6000, OP_EQ, 3, S3_TCP64, 0, S3_CLNP, + LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ + { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S3_TCPSQ, 0, S3_CLNP, + LD_LEN, 0x03f, 1, 0x0, 0xffff}, + { "TCP seq", /* DADDR should point to dest port */ + 0x0000, 0x0000, OP_EQ, 0, S3_TCPFG, 4, S3_TCPFG, LD_SEQ, + 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ + { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S3_TCPHL, 0, + S3_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */ + { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S3_TCPHc, 0, S3_TCPHc, + LD_R1, 0x205, 3, 0xB, 0xf000}, + { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + LD_HDR, 0x0ff, 3, 0x0, 0xffff}, + { "IP4 Fragment", 0x0000, 0x0000, OP_EQ, 0, S3_FOFF, 0, S3_FOFF, + LD_FID, 0x103, 3, 0x0, 0xffff}, /* FID IP4 src+dst */ + { "IP4 frag offset", 0x0000, 0x0000, OP_EQ, 0, S3_FOFF, 0, S3_FOFF, + LD_SEQ, 0x040, 1, 0xD, 0xfff8}, + { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x001, 3, 0x0, 0x0001}, + { NULL }, +}; +#ifdef HP_IP4FRAG_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_ip4fragtab +#endif +#endif + +/* + * Alternate table which does batching without reassembly + */ +#ifdef USE_HP_IP46TCP4BATCH +static cas_hp_inst_t cas_prog_ip46tcp4batchtab[] = { + CAS_PROG_IP46TCP4_PREAMBLE, + { "TCP seq", /* DADDR should point to dest port */ + 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 0, S1_TCPFG, LD_SEQ, + 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ + { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0, + S1_TCPHL, ST_FLG, 0x000, 3, 0x0, 0x0000}, /* Load TCP flags */ + { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, + S1_TCPHc, LD_R1, 0x205, 3, 0xB, 0xf000}, + { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, + S1_PCKT, IM_CTL, 0x040, 3, 0x0, 0xffff}, /* set batch bit */ + { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x001, 3, 0x0, 0x0001}, + { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, + S1_PCKT, IM_CTL, 0x080, 3, 0x0, 0xffff}, + { NULL }, +}; +#ifdef HP_IP46TCP4BATCH_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_ip46tcp4batchtab +#endif +#endif + +/* Workaround for Cassini rev2 descriptor corruption problem. + * Does batching without reassembly, and sets the SAP to a known + * data pattern for all packets. + */ +#ifdef USE_HP_WORKAROUND +static cas_hp_inst_t cas_prog_workaroundtab[] = { + { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, + S1_PCKT, CL_REG, 0x3ff, 1, 0x0, 0x0000} , + { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, + IM_CTL, 0x04a, 3, 0x0, 0xffff}, + { "CFI?", 0x1000, 0x1000, OP_EQ, 0, S1_CLNP, 1, S1_8023, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, + IM_SAP, 0x6AE, 3, 0x0, 0xffff}, + { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP, + LD_SUM, 0x00a, 1, 0x0, 0x0000}, + { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP, + LD_LEN, 0x03e, 1, 0x0, 0xffff}, + { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_CLNP, + LD_FID, 0x182, 3, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ + { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP, + LD_SUM, 0x015, 1, 0x0, 0x0000}, + { "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP, + IM_R1, 0x128, 1, 0x0, 0xffff}, + { "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP, + LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ + { "TCP64?", 0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_CLNP, + LD_LEN, 0x03f, 1, 0x0, 0xffff}, + { "TCP seq", /* DADDR should point to dest port */ + 0x0000, 0x0000, OP_EQ, 0, S1_TCPFG, 4, S1_TCPFG, LD_SEQ, + 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ + { "TCP control flags", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHL, 0, + S1_TCPHL, ST_FLG, 0x045, 3, 0x0, 0x002f}, /* Load TCP flags */ + { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc, + LD_R1, 0x205, 3, 0xB, 0xf000}, + { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, + S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff}, + { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2, + IM_SAP, 0x6AE, 3, 0x0, 0xffff} , + { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x001, 3, 0x0, 0x0001}, + { NULL }, +}; +#ifdef HP_WORKAROUND_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_workaroundtab +#endif +#endif + +#ifdef USE_HP_ENCRYPT +static cas_hp_inst_t cas_prog_encryptiontab[] = { + { "packet arrival?", 0xffff, 0x0000, OP_NP, 6, S1_VLAN, 0, + S1_PCKT, CL_REG, 0x3ff, 1, 0x0, 0x0000}, + { "VLAN?", 0xffff, 0x8100, OP_EQ, 1, S1_CFI, 0, S1_8023, + IM_CTL, 0x00a, 3, 0x0, 0xffff}, +#if 0 +//"CFI?", /* 02 FIND CFI and If FIND go to S1_DROP */ +//0x1000, 0x1000, OP_EQ, 0, S1_DROP, 1, S1_8023, CL_REG, 0x000, 0, 0x0, 0x00 + 00, +#endif + { "CFI?", /* FIND CFI and If FIND go to CleanUP1 (ignore and send to host) */ + 0x1000, 0x1000, OP_EQ, 0, S1_CLNP, 1, S1_8023, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "8023?", 0xffff, 0x0600, OP_LT, 1, S1_LLC, 0, S1_IPV4, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "LLC?", 0xffff, 0xaaaa, OP_EQ, 1, S1_LLCc, 0, S1_CLNP, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "LLCc?", 0xff00, 0x0300, OP_EQ, 2, S1_IPV4, 0, S1_CLNP, + CL_REG, 0x000, 0, 0x0, 0x0000}, + { "IPV4?", 0xffff, 0x0800, OP_EQ, 1, S1_IPV4c, 0, S1_IPV6, + LD_SAP, 0x100, 3, 0x0, 0xffff}, + { "IPV4 cont?", 0xff00, 0x4500, OP_EQ, 3, S1_IPV4F, 0, S1_CLNP, + LD_SUM, 0x00a, 1, 0x0, 0x0000}, + { "IPV4 frag?", 0x3fff, 0x0000, OP_EQ, 1, S1_TCP44, 0, S1_CLNP, + LD_LEN, 0x03e, 1, 0x0, 0xffff}, + { "TCP44?", 0x00ff, 0x0006, OP_EQ, 7, S1_TCPSQ, 0, S1_ESP4, + LD_FID, 0x182, 1, 0x0, 0xffff}, /* FID IP4&TCP src+dst */ + { "IPV6?", 0xffff, 0x86dd, OP_EQ, 1, S1_IPV6L, 0, S1_CLNP, + LD_SUM, 0x015, 1, 0x0, 0x0000}, + { "IPV6 len", 0xf000, 0x6000, OP_EQ, 0, S1_IPV6c, 0, S1_CLNP, + IM_R1, 0x128, 1, 0x0, 0xffff}, + { "IPV6 cont?", 0x0000, 0x0000, OP_EQ, 3, S1_TCP64, 0, S1_CLNP, + LD_FID, 0x484, 1, 0x0, 0xffff}, /* FID IP6&TCP src+dst */ + { "TCP64?", +#if 0 +//@@@0xff00, 0x0600, OP_EQ, 18, S1_TCPSQ, 0, S1_ESP6, LD_LEN, 0x03f, 1, 0x0, 0xffff, +#endif + 0xff00, 0x0600, OP_EQ, 12, S1_TCPSQ, 0, S1_ESP6, LD_LEN, + 0x03f, 1, 0x0, 0xffff}, + { "TCP seq", /* 14:DADDR should point to dest port */ + 0xFFFF, 0x0080, OP_EQ, 0, S2_HTTP, 0, S1_TCPFG, LD_SEQ, + 0x081, 3, 0x0, 0xffff}, /* Load TCP seq # */ + { "TCP control flags", 0xFFFF, 0x8080, OP_EQ, 0, S2_HTTP, 0, + S1_TCPHL, ST_FLG, 0x145, 2, 0x0, 0x002f}, /* Load TCP flags */ + { "TCP length", 0x0000, 0x0000, OP_EQ, 0, S1_TCPHc, 0, S1_TCPHc, + LD_R1, 0x205, 3, 0xB, 0xf000} , + { "TCP length cont", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, + S1_PCKT, LD_HDR, 0x0ff, 3, 0x0, 0xffff}, + { "Cleanup", 0x0000, 0x0000, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP2, + IM_CTL, 0x001, 3, 0x0, 0x0001}, + { "Cleanup 2", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + CL_REG, 0x002, 3, 0x0, 0x0000}, + { "Drop packet", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x080, 3, 0x0, 0xffff}, + { "No HTTP", 0x0000, 0x0000, OP_EQ, 0, S1_PCKT, 0, S1_PCKT, + IM_CTL, 0x044, 3, 0x0, 0xffff}, + { "IPV4 ESP encrypted?", /* S1_ESP4 */ + 0x00ff, 0x0032, OP_EQ, 0, S1_CLNP2, 0, S1_AH4, IM_CTL, + 0x021, 1, 0x0, 0xffff}, + { "IPV4 AH encrypted?", /* S1_AH4 */ + 0x00ff, 0x0033, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL, + 0x021, 1, 0x0, 0xffff}, + { "IPV6 ESP encrypted?", /* S1_ESP6 */ +#if 0 +//@@@0x00ff, 0x0032, OP_EQ, 0, S1_CLNP2, 0, S1_AH6, IM_CTL, 0x021, 1, 0x0, 0xffff, +#endif + 0xff00, 0x3200, OP_EQ, 0, S1_CLNP2, 0, S1_AH6, IM_CTL, + 0x021, 1, 0x0, 0xffff}, + { "IPV6 AH encrypted?", /* S1_AH6 */ +#if 0 +//@@@0x00ff, 0x0033, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL, 0x021, 1, 0x0, 0xffff, +#endif + 0xff00, 0x3300, OP_EQ, 0, S1_CLNP2, 0, S1_CLNP, IM_CTL, + 0x021, 1, 0x0, 0xffff}, + { NULL }, +}; +#ifdef HP_ENCRYPT_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_encryptiontab +#endif +#endif + +static cas_hp_inst_t cas_prog_null[] = { {NULL} }; +#ifdef HP_NULL_DEFAULT +#define CAS_HP_FIRMWARE cas_prog_null +#endif + +/* firmware patch for NS_DP83065 */ +typedef struct cas_saturn_patch { + u16 addr; + u16 val; +} cas_saturn_patch_t; + +#if 1 +cas_saturn_patch_t cas_saturn_patch[] = { +{0x8200, 0x007e}, {0x8201, 0x0082}, {0x8202, 0x0009}, +{0x8203, 0x0000}, {0x8204, 0x0000}, {0x8205, 0x0000}, +{0x8206, 0x0000}, {0x8207, 0x0000}, {0x8208, 0x0000}, +{0x8209, 0x008e}, {0x820a, 0x008e}, {0x820b, 0x00ff}, +{0x820c, 0x00ce}, {0x820d, 0x0082}, {0x820e, 0x0025}, +{0x820f, 0x00ff}, {0x8210, 0x0001}, {0x8211, 0x000f}, +{0x8212, 0x00ce}, {0x8213, 0x0084}, {0x8214, 0x0026}, +{0x8215, 0x00ff}, {0x8216, 0x0001}, {0x8217, 0x0011}, +{0x8218, 0x00ce}, {0x8219, 0x0085}, {0x821a, 0x003d}, +{0x821b, 0x00df}, {0x821c, 0x00e5}, {0x821d, 0x0086}, +{0x821e, 0x0039}, {0x821f, 0x00b7}, {0x8220, 0x008f}, +{0x8221, 0x00f8}, {0x8222, 0x007e}, {0x8223, 0x00c3}, +{0x8224, 0x00c2}, {0x8225, 0x0096}, {0x8226, 0x0047}, +{0x8227, 0x0084}, {0x8228, 0x00f3}, {0x8229, 0x008a}, +{0x822a, 0x0000}, {0x822b, 0x0097}, {0x822c, 0x0047}, +{0x822d, 0x00ce}, {0x822e, 0x0082}, {0x822f, 0x0033}, +{0x8230, 0x00ff}, {0x8231, 0x0001}, {0x8232, 0x000f}, +{0x8233, 0x0096}, {0x8234, 0x0046}, {0x8235, 0x0084}, +{0x8236, 0x000c}, {0x8237, 0x0081}, {0x8238, 0x0004}, +{0x8239, 0x0027}, {0x823a, 0x000b}, {0x823b, 0x0096}, +{0x823c, 0x0046}, {0x823d, 0x0084}, {0x823e, 0x000c}, +{0x823f, 0x0081}, {0x8240, 0x0008}, {0x8241, 0x0027}, +{0x8242, 0x0057}, {0x8243, 0x007e}, {0x8244, 0x0084}, +{0x8245, 0x0025}, {0x8246, 0x0096}, {0x8247, 0x0047}, +{0x8248, 0x0084}, {0x8249, 0x00f3}, {0x824a, 0x008a}, +{0x824b, 0x0004}, {0x824c, 0x0097}, {0x824d, 0x0047}, +{0x824e, 0x00ce}, {0x824f, 0x0082}, {0x8250, 0x0054}, +{0x8251, 0x00ff}, {0x8252, 0x0001}, {0x8253, 0x000f}, +{0x8254, 0x0096}, {0x8255, 0x0046}, {0x8256, 0x0084}, +{0x8257, 0x000c}, {0x8258, 0x0081}, {0x8259, 0x0004}, +{0x825a, 0x0026}, {0x825b, 0x0038}, {0x825c, 0x00b6}, +{0x825d, 0x0012}, {0x825e, 0x0020}, {0x825f, 0x0084}, +{0x8260, 0x0020}, {0x8261, 0x0026}, {0x8262, 0x0003}, +{0x8263, 0x007e}, {0x8264, 0x0084}, {0x8265, 0x0025}, +{0x8266, 0x0096}, {0x8267, 0x007b}, {0x8268, 0x00d6}, +{0x8269, 0x007c}, {0x826a, 0x00fe}, {0x826b, 0x008f}, +{0x826c, 0x0056}, {0x826d, 0x00bd}, {0x826e, 0x00f7}, +{0x826f, 0x00b6}, {0x8270, 0x00fe}, {0x8271, 0x008f}, +{0x8272, 0x004e}, {0x8273, 0x00bd}, {0x8274, 0x00ec}, +{0x8275, 0x008e}, {0x8276, 0x00bd}, {0x8277, 0x00fa}, +{0x8278, 0x00f7}, {0x8279, 0x00bd}, {0x827a, 0x00f7}, +{0x827b, 0x0028}, {0x827c, 0x00ce}, {0x827d, 0x0082}, +{0x827e, 0x0082}, {0x827f, 0x00ff}, {0x8280, 0x0001}, +{0x8281, 0x000f}, {0x8282, 0x0096}, {0x8283, 0x0046}, +{0x8284, 0x0084}, {0x8285, 0x000c}, {0x8286, 0x0081}, +{0x8287, 0x0004}, {0x8288, 0x0026}, {0x8289, 0x000a}, +{0x828a, 0x00b6}, {0x828b, 0x0012}, {0x828c, 0x0020}, +{0x828d, 0x0084}, {0x828e, 0x0020}, {0x828f, 0x0027}, +{0x8290, 0x00b5}, {0x8291, 0x007e}, {0x8292, 0x0084}, +{0x8293, 0x0025}, {0x8294, 0x00bd}, {0x8295, 0x00f7}, +{0x8296, 0x001f}, {0x8297, 0x007e}, {0x8298, 0x0084}, +{0x8299, 0x001f}, {0x829a, 0x0096}, {0x829b, 0x0047}, +{0x829c, 0x0084}, {0x829d, 0x00f3}, {0x829e, 0x008a}, +{0x829f, 0x0008}, {0x82a0, 0x0097}, {0x82a1, 0x0047}, +{0x82a2, 0x00de}, {0x82a3, 0x00e1}, {0x82a4, 0x00ad}, +{0x82a5, 0x0000}, {0x82a6, 0x00ce}, {0x82a7, 0x0082}, +{0x82a8, 0x00af}, {0x82a9, 0x00ff}, {0x82aa, 0x0001}, +{0x82ab, 0x000f}, {0x82ac, 0x007e}, {0x82ad, 0x0084}, +{0x82ae, 0x0025}, {0x82af, 0x0096}, {0x82b0, 0x0041}, +{0x82b1, 0x0085}, {0x82b2, 0x0010}, {0x82b3, 0x0026}, +{0x82b4, 0x0006}, {0x82b5, 0x0096}, {0x82b6, 0x0023}, +{0x82b7, 0x0085}, {0x82b8, 0x0040}, {0x82b9, 0x0027}, +{0x82ba, 0x0006}, {0x82bb, 0x00bd}, {0x82bc, 0x00ed}, +{0x82bd, 0x0000}, {0x82be, 0x007e}, {0x82bf, 0x0083}, +{0x82c0, 0x00a2}, {0x82c1, 0x00de}, {0x82c2, 0x0042}, +{0x82c3, 0x00bd}, {0x82c4, 0x00eb}, {0x82c5, 0x008e}, +{0x82c6, 0x0096}, {0x82c7, 0x0024}, {0x82c8, 0x0084}, +{0x82c9, 0x0008}, {0x82ca, 0x0027}, {0x82cb, 0x0003}, +{0x82cc, 0x007e}, {0x82cd, 0x0083}, {0x82ce, 0x00df}, +{0x82cf, 0x0096}, {0x82d0, 0x007b}, {0x82d1, 0x00d6}, +{0x82d2, 0x007c}, {0x82d3, 0x00fe}, {0x82d4, 0x008f}, +{0x82d5, 0x0056}, {0x82d6, 0x00bd}, {0x82d7, 0x00f7}, +{0x82d8, 0x00b6}, {0x82d9, 0x00fe}, {0x82da, 0x008f}, +{0x82db, 0x0050}, {0x82dc, 0x00bd}, {0x82dd, 0x00ec}, +{0x82de, 0x008e}, {0x82df, 0x00bd}, {0x82e0, 0x00fa}, +{0x82e1, 0x00f7}, {0x82e2, 0x0086}, {0x82e3, 0x0011}, +{0x82e4, 0x00c6}, {0x82e5, 0x0049}, {0x82e6, 0x00bd}, +{0x82e7, 0x00e4}, {0x82e8, 0x0012}, {0x82e9, 0x00ce}, +{0x82ea, 0x0082}, {0x82eb, 0x00ef}, {0x82ec, 0x00ff}, +{0x82ed, 0x0001}, {0x82ee, 0x000f}, {0x82ef, 0x0096}, +{0x82f0, 0x0046}, {0x82f1, 0x0084}, {0x82f2, 0x000c}, +{0x82f3, 0x0081}, {0x82f4, 0x0000}, {0x82f5, 0x0027}, +{0x82f6, 0x0017}, {0x82f7, 0x00c6}, {0x82f8, 0x0049}, +{0x82f9, 0x00bd}, {0x82fa, 0x00e4}, {0x82fb, 0x0091}, +{0x82fc, 0x0024}, {0x82fd, 0x000d}, {0x82fe, 0x00b6}, +{0x82ff, 0x0012}, {0x8300, 0x0020}, {0x8301, 0x0085}, +{0x8302, 0x0020}, {0x8303, 0x0026}, {0x8304, 0x000c}, +{0x8305, 0x00ce}, {0x8306, 0x0082}, {0x8307, 0x00c1}, +{0x8308, 0x00ff}, {0x8309, 0x0001}, {0x830a, 0x000f}, +{0x830b, 0x007e}, {0x830c, 0x0084}, {0x830d, 0x0025}, +{0x830e, 0x007e}, {0x830f, 0x0084}, {0x8310, 0x0016}, +{0x8311, 0x00fe}, {0x8312, 0x008f}, {0x8313, 0x0052}, +{0x8314, 0x00bd}, {0x8315, 0x00ec}, {0x8316, 0x008e}, +{0x8317, 0x00bd}, {0x8318, 0x00fa}, {0x8319, 0x00f7}, +{0x831a, 0x0086}, {0x831b, 0x006a}, {0x831c, 0x00c6}, +{0x831d, 0x0049}, {0x831e, 0x00bd}, {0x831f, 0x00e4}, +{0x8320, 0x0012}, {0x8321, 0x00ce}, {0x8322, 0x0083}, +{0x8323, 0x0027}, {0x8324, 0x00ff}, {0x8325, 0x0001}, +{0x8326, 0x000f}, {0x8327, 0x0096}, {0x8328, 0x0046}, +{0x8329, 0x0084}, {0x832a, 0x000c}, {0x832b, 0x0081}, +{0x832c, 0x0000}, {0x832d, 0x0027}, {0x832e, 0x000a}, +{0x832f, 0x00c6}, {0x8330, 0x0049}, {0x8331, 0x00bd}, +{0x8332, 0x00e4}, {0x8333, 0x0091}, {0x8334, 0x0025}, +{0x8335, 0x0006}, {0x8336, 0x007e}, {0x8337, 0x0084}, +{0x8338, 0x0025}, {0x8339, 0x007e}, {0x833a, 0x0084}, +{0x833b, 0x0016}, {0x833c, 0x00b6}, {0x833d, 0x0018}, +{0x833e, 0x0070}, {0x833f, 0x00bb}, {0x8340, 0x0019}, +{0x8341, 0x0070}, {0x8342, 0x002a}, {0x8343, 0x0004}, +{0x8344, 0x0081}, {0x8345, 0x00af}, {0x8346, 0x002e}, +{0x8347, 0x0019}, {0x8348, 0x0096}, {0x8349, 0x007b}, +{0x834a, 0x00f6}, {0x834b, 0x0020}, {0x834c, 0x0007}, +{0x834d, 0x00fa}, {0x834e, 0x0020}, {0x834f, 0x0027}, +{0x8350, 0x00c4}, {0x8351, 0x0038}, {0x8352, 0x0081}, +{0x8353, 0x0038}, {0x8354, 0x0027}, {0x8355, 0x000b}, +{0x8356, 0x00f6}, {0x8357, 0x0020}, {0x8358, 0x0007}, +{0x8359, 0x00fa}, {0x835a, 0x0020}, {0x835b, 0x0027}, +{0x835c, 0x00cb}, {0x835d, 0x0008}, {0x835e, 0x007e}, +{0x835f, 0x0082}, {0x8360, 0x00d3}, {0x8361, 0x00bd}, +{0x8362, 0x00f7}, {0x8363, 0x0066}, {0x8364, 0x0086}, +{0x8365, 0x0074}, {0x8366, 0x00c6}, {0x8367, 0x0049}, +{0x8368, 0x00bd}, {0x8369, 0x00e4}, {0x836a, 0x0012}, +{0x836b, 0x00ce}, {0x836c, 0x0083}, {0x836d, 0x0071}, +{0x836e, 0x00ff}, {0x836f, 0x0001}, {0x8370, 0x000f}, +{0x8371, 0x0096}, {0x8372, 0x0046}, {0x8373, 0x0084}, +{0x8374, 0x000c}, {0x8375, 0x0081}, {0x8376, 0x0008}, +{0x8377, 0x0026}, {0x8378, 0x000a}, {0x8379, 0x00c6}, +{0x837a, 0x0049}, {0x837b, 0x00bd}, {0x837c, 0x00e4}, +{0x837d, 0x0091}, {0x837e, 0x0025}, {0x837f, 0x0006}, +{0x8380, 0x007e}, {0x8381, 0x0084}, {0x8382, 0x0025}, +{0x8383, 0x007e}, {0x8384, 0x0084}, {0x8385, 0x0016}, +{0x8386, 0x00bd}, {0x8387, 0x00f7}, {0x8388, 0x003e}, +{0x8389, 0x0026}, {0x838a, 0x000e}, {0x838b, 0x00bd}, +{0x838c, 0x00e5}, {0x838d, 0x0009}, {0x838e, 0x0026}, +{0x838f, 0x0006}, {0x8390, 0x00ce}, {0x8391, 0x0082}, +{0x8392, 0x00c1}, {0x8393, 0x00ff}, {0x8394, 0x0001}, +{0x8395, 0x000f}, {0x8396, 0x007e}, {0x8397, 0x0084}, +{0x8398, 0x0025}, {0x8399, 0x00fe}, {0x839a, 0x008f}, +{0x839b, 0x0054}, {0x839c, 0x00bd}, {0x839d, 0x00ec}, +{0x839e, 0x008e}, {0x839f, 0x00bd}, {0x83a0, 0x00fa}, +{0x83a1, 0x00f7}, {0x83a2, 0x00bd}, {0x83a3, 0x00f7}, +{0x83a4, 0x0033}, {0x83a5, 0x0086}, {0x83a6, 0x000f}, +{0x83a7, 0x00c6}, {0x83a8, 0x0051}, {0x83a9, 0x00bd}, +{0x83aa, 0x00e4}, {0x83ab, 0x0012}, {0x83ac, 0x00ce}, +{0x83ad, 0x0083}, {0x83ae, 0x00b2}, {0x83af, 0x00ff}, +{0x83b0, 0x0001}, {0x83b1, 0x000f}, {0x83b2, 0x0096}, +{0x83b3, 0x0046}, {0x83b4, 0x0084}, {0x83b5, 0x000c}, +{0x83b6, 0x0081}, {0x83b7, 0x0008}, {0x83b8, 0x0026}, +{0x83b9, 0x005c}, {0x83ba, 0x00b6}, {0x83bb, 0x0012}, +{0x83bc, 0x0020}, {0x83bd, 0x0084}, {0x83be, 0x003f}, +{0x83bf, 0x0081}, {0x83c0, 0x003a}, {0x83c1, 0x0027}, +{0x83c2, 0x001c}, {0x83c3, 0x0096}, {0x83c4, 0x0023}, +{0x83c5, 0x0085}, {0x83c6, 0x0040}, {0x83c7, 0x0027}, +{0x83c8, 0x0003}, {0x83c9, 0x007e}, {0x83ca, 0x0084}, +{0x83cb, 0x0025}, {0x83cc, 0x00c6}, {0x83cd, 0x0051}, +{0x83ce, 0x00bd}, {0x83cf, 0x00e4}, {0x83d0, 0x0091}, +{0x83d1, 0x0025}, {0x83d2, 0x0003}, {0x83d3, 0x007e}, +{0x83d4, 0x0084}, {0x83d5, 0x0025}, {0x83d6, 0x00ce}, +{0x83d7, 0x0082}, {0x83d8, 0x00c1}, {0x83d9, 0x00ff}, +{0x83da, 0x0001}, {0x83db, 0x000f}, {0x83dc, 0x007e}, +{0x83dd, 0x0084}, {0x83de, 0x0025}, {0x83df, 0x00bd}, +{0x83e0, 0x00f8}, {0x83e1, 0x0037}, {0x83e2, 0x007c}, +{0x83e3, 0x0000}, {0x83e4, 0x007a}, {0x83e5, 0x00ce}, +{0x83e6, 0x0083}, {0x83e7, 0x00ee}, {0x83e8, 0x00ff}, +{0x83e9, 0x0001}, {0x83ea, 0x000f}, {0x83eb, 0x007e}, +{0x83ec, 0x0084}, {0x83ed, 0x0025}, {0x83ee, 0x0096}, +{0x83ef, 0x0046}, {0x83f0, 0x0084}, {0x83f1, 0x000c}, +{0x83f2, 0x0081}, {0x83f3, 0x0008}, {0x83f4, 0x0026}, +{0x83f5, 0x0020}, {0x83f6, 0x0096}, {0x83f7, 0x0024}, +{0x83f8, 0x0084}, {0x83f9, 0x0008}, {0x83fa, 0x0026}, +{0x83fb, 0x0029}, {0x83fc, 0x00b6}, {0x83fd, 0x0018}, +{0x83fe, 0x0082}, {0x83ff, 0x00bb}, {0x8400, 0x0019}, +{0x8401, 0x0082}, {0x8402, 0x00b1}, {0x8403, 0x0001}, +{0x8404, 0x003b}, {0x8405, 0x0022}, {0x8406, 0x0009}, +{0x8407, 0x00b6}, {0x8408, 0x0012}, {0x8409, 0x0020}, +{0x840a, 0x0084}, {0x840b, 0x0037}, {0x840c, 0x0081}, +{0x840d, 0x0032}, {0x840e, 0x0027}, {0x840f, 0x0015}, +{0x8410, 0x00bd}, {0x8411, 0x00f8}, {0x8412, 0x0044}, +{0x8413, 0x007e}, {0x8414, 0x0082}, {0x8415, 0x00c1}, +{0x8416, 0x00bd}, {0x8417, 0x00f7}, {0x8418, 0x001f}, +{0x8419, 0x00bd}, {0x841a, 0x00f8}, {0x841b, 0x0044}, +{0x841c, 0x00bd}, {0x841d, 0x00fc}, {0x841e, 0x0029}, +{0x841f, 0x00ce}, {0x8420, 0x0082}, {0x8421, 0x0025}, +{0x8422, 0x00ff}, {0x8423, 0x0001}, {0x8424, 0x000f}, +{0x8425, 0x0039}, {0x8426, 0x0096}, {0x8427, 0x0047}, +{0x8428, 0x0084}, {0x8429, 0x00fc}, {0x842a, 0x008a}, +{0x842b, 0x0000}, {0x842c, 0x0097}, {0x842d, 0x0047}, +{0x842e, 0x00ce}, {0x842f, 0x0084}, {0x8430, 0x0034}, +{0x8431, 0x00ff}, {0x8432, 0x0001}, {0x8433, 0x0011}, +{0x8434, 0x0096}, {0x8435, 0x0046}, {0x8436, 0x0084}, +{0x8437, 0x0003}, {0x8438, 0x0081}, {0x8439, 0x0002}, +{0x843a, 0x0027}, {0x843b, 0x0003}, {0x843c, 0x007e}, +{0x843d, 0x0085}, {0x843e, 0x001e}, {0x843f, 0x0096}, +{0x8440, 0x0047}, {0x8441, 0x0084}, {0x8442, 0x00fc}, +{0x8443, 0x008a}, {0x8444, 0x0002}, {0x8445, 0x0097}, +{0x8446, 0x0047}, {0x8447, 0x00de}, {0x8448, 0x00e1}, +{0x8449, 0x00ad}, {0x844a, 0x0000}, {0x844b, 0x0086}, +{0x844c, 0x0001}, {0x844d, 0x00b7}, {0x844e, 0x0012}, +{0x844f, 0x0051}, {0x8450, 0x00bd}, {0x8451, 0x00f7}, +{0x8452, 0x0014}, {0x8453, 0x00b6}, {0x8454, 0x0010}, +{0x8455, 0x0031}, {0x8456, 0x0084}, {0x8457, 0x00fd}, +{0x8458, 0x00b7}, {0x8459, 0x0010}, {0x845a, 0x0031}, +{0x845b, 0x00bd}, {0x845c, 0x00f8}, {0x845d, 0x001e}, +{0x845e, 0x0096}, {0x845f, 0x0081}, {0x8460, 0x00d6}, +{0x8461, 0x0082}, {0x8462, 0x00fe}, {0x8463, 0x008f}, +{0x8464, 0x005a}, {0x8465, 0x00bd}, {0x8466, 0x00f7}, +{0x8467, 0x00b6}, {0x8468, 0x00fe}, {0x8469, 0x008f}, +{0x846a, 0x005c}, {0x846b, 0x00bd}, {0x846c, 0x00ec}, +{0x846d, 0x008e}, {0x846e, 0x00bd}, {0x846f, 0x00fa}, +{0x8470, 0x00f7}, {0x8471, 0x0086}, {0x8472, 0x0008}, +{0x8473, 0x00d6}, {0x8474, 0x0000}, {0x8475, 0x00c5}, +{0x8476, 0x0010}, {0x8477, 0x0026}, {0x8478, 0x0002}, +{0x8479, 0x008b}, {0x847a, 0x0020}, {0x847b, 0x00c6}, +{0x847c, 0x0051}, {0x847d, 0x00bd}, {0x847e, 0x00e4}, +{0x847f, 0x0012}, {0x8480, 0x00ce}, {0x8481, 0x0084}, +{0x8482, 0x0086}, {0x8483, 0x00ff}, {0x8484, 0x0001}, +{0x8485, 0x0011}, {0x8486, 0x0096}, {0x8487, 0x0046}, +{0x8488, 0x0084}, {0x8489, 0x0003}, {0x848a, 0x0081}, +{0x848b, 0x0002}, {0x848c, 0x0027}, {0x848d, 0x0003}, +{0x848e, 0x007e}, {0x848f, 0x0085}, {0x8490, 0x000f}, +{0x8491, 0x00c6}, {0x8492, 0x0051}, {0x8493, 0x00bd}, +{0x8494, 0x00e4}, {0x8495, 0x0091}, {0x8496, 0x0025}, +{0x8497, 0x0003}, {0x8498, 0x007e}, {0x8499, 0x0085}, +{0x849a, 0x001e}, {0x849b, 0x0096}, {0x849c, 0x0044}, +{0x849d, 0x0085}, {0x849e, 0x0010}, {0x849f, 0x0026}, +{0x84a0, 0x000a}, {0x84a1, 0x00b6}, {0x84a2, 0x0012}, +{0x84a3, 0x0050}, {0x84a4, 0x00ba}, {0x84a5, 0x0001}, +{0x84a6, 0x003c}, {0x84a7, 0x0085}, {0x84a8, 0x0010}, +{0x84a9, 0x0027}, {0x84aa, 0x00a8}, {0x84ab, 0x00bd}, +{0x84ac, 0x00f7}, {0x84ad, 0x0066}, {0x84ae, 0x00ce}, +{0x84af, 0x0084}, {0x84b0, 0x00b7}, {0x84b1, 0x00ff}, +{0x84b2, 0x0001}, {0x84b3, 0x0011}, {0x84b4, 0x007e}, +{0x84b5, 0x0085}, {0x84b6, 0x001e}, {0x84b7, 0x0096}, +{0x84b8, 0x0046}, {0x84b9, 0x0084}, {0x84ba, 0x0003}, +{0x84bb, 0x0081}, {0x84bc, 0x0002}, {0x84bd, 0x0026}, +{0x84be, 0x0050}, {0x84bf, 0x00b6}, {0x84c0, 0x0012}, +{0x84c1, 0x0030}, {0x84c2, 0x0084}, {0x84c3, 0x0003}, +{0x84c4, 0x0081}, {0x84c5, 0x0001}, {0x84c6, 0x0027}, +{0x84c7, 0x0003}, {0x84c8, 0x007e}, {0x84c9, 0x0085}, +{0x84ca, 0x001e}, {0x84cb, 0x0096}, {0x84cc, 0x0044}, +{0x84cd, 0x0085}, {0x84ce, 0x0010}, {0x84cf, 0x0026}, +{0x84d0, 0x0013}, {0x84d1, 0x00b6}, {0x84d2, 0x0012}, +{0x84d3, 0x0050}, {0x84d4, 0x00ba}, {0x84d5, 0x0001}, +{0x84d6, 0x003c}, {0x84d7, 0x0085}, {0x84d8, 0x0010}, +{0x84d9, 0x0026}, {0x84da, 0x0009}, {0x84db, 0x00ce}, +{0x84dc, 0x0084}, {0x84dd, 0x0053}, {0x84de, 0x00ff}, +{0x84df, 0x0001}, {0x84e0, 0x0011}, {0x84e1, 0x007e}, +{0x84e2, 0x0085}, {0x84e3, 0x001e}, {0x84e4, 0x00b6}, +{0x84e5, 0x0010}, {0x84e6, 0x0031}, {0x84e7, 0x008a}, +{0x84e8, 0x0002}, {0x84e9, 0x00b7}, {0x84ea, 0x0010}, +{0x84eb, 0x0031}, {0x84ec, 0x00bd}, {0x84ed, 0x0085}, +{0x84ee, 0x001f}, {0x84ef, 0x00bd}, {0x84f0, 0x00f8}, +{0x84f1, 0x0037}, {0x84f2, 0x007c}, {0x84f3, 0x0000}, +{0x84f4, 0x0080}, {0x84f5, 0x00ce}, {0x84f6, 0x0084}, +{0x84f7, 0x00fe}, {0x84f8, 0x00ff}, {0x84f9, 0x0001}, +{0x84fa, 0x0011}, {0x84fb, 0x007e}, {0x84fc, 0x0085}, +{0x84fd, 0x001e}, {0x84fe, 0x0096}, {0x84ff, 0x0046}, +{0x8500, 0x0084}, {0x8501, 0x0003}, {0x8502, 0x0081}, +{0x8503, 0x0002}, {0x8504, 0x0026}, {0x8505, 0x0009}, +{0x8506, 0x00b6}, {0x8507, 0x0012}, {0x8508, 0x0030}, +{0x8509, 0x0084}, {0x850a, 0x0003}, {0x850b, 0x0081}, +{0x850c, 0x0001}, {0x850d, 0x0027}, {0x850e, 0x000f}, +{0x850f, 0x00bd}, {0x8510, 0x00f8}, {0x8511, 0x0044}, +{0x8512, 0x00bd}, {0x8513, 0x00f7}, {0x8514, 0x000b}, +{0x8515, 0x00bd}, {0x8516, 0x00fc}, {0x8517, 0x0029}, +{0x8518, 0x00ce}, {0x8519, 0x0084}, {0x851a, 0x0026}, +{0x851b, 0x00ff}, {0x851c, 0x0001}, {0x851d, 0x0011}, +{0x851e, 0x0039}, {0x851f, 0x00d6}, {0x8520, 0x0022}, +{0x8521, 0x00c4}, {0x8522, 0x000f}, {0x8523, 0x00b6}, +{0x8524, 0x0012}, {0x8525, 0x0030}, {0x8526, 0x00ba}, +{0x8527, 0x0012}, {0x8528, 0x0032}, {0x8529, 0x0084}, +{0x852a, 0x0004}, {0x852b, 0x0027}, {0x852c, 0x000d}, +{0x852d, 0x0096}, {0x852e, 0x0022}, {0x852f, 0x0085}, +{0x8530, 0x0004}, {0x8531, 0x0027}, {0x8532, 0x0005}, +{0x8533, 0x00ca}, {0x8534, 0x0010}, {0x8535, 0x007e}, +{0x8536, 0x0085}, {0x8537, 0x003a}, {0x8538, 0x00ca}, +{0x8539, 0x0020}, {0x853a, 0x00d7}, {0x853b, 0x0022}, +{0x853c, 0x0039}, {0x853d, 0x0086}, {0x853e, 0x0000}, +{0x853f, 0x0097}, {0x8540, 0x0083}, {0x8541, 0x0018}, +{0x8542, 0x00ce}, {0x8543, 0x001c}, {0x8544, 0x0000}, +{0x8545, 0x00bd}, {0x8546, 0x00eb}, {0x8547, 0x0046}, +{0x8548, 0x0096}, {0x8549, 0x0057}, {0x854a, 0x0085}, +{0x854b, 0x0001}, {0x854c, 0x0027}, {0x854d, 0x0002}, +{0x854e, 0x004f}, {0x854f, 0x0039}, {0x8550, 0x0085}, +{0x8551, 0x0002}, {0x8552, 0x0027}, {0x8553, 0x0001}, +{0x8554, 0x0039}, {0x8555, 0x007f}, {0x8556, 0x008f}, +{0x8557, 0x007d}, {0x8558, 0x0086}, {0x8559, 0x0004}, +{0x855a, 0x00b7}, {0x855b, 0x0012}, {0x855c, 0x0004}, +{0x855d, 0x0086}, {0x855e, 0x0008}, {0x855f, 0x00b7}, +{0x8560, 0x0012}, {0x8561, 0x0007}, {0x8562, 0x0086}, +{0x8563, 0x0010}, {0x8564, 0x00b7}, {0x8565, 0x0012}, +{0x8566, 0x000c}, {0x8567, 0x0086}, {0x8568, 0x0007}, +{0x8569, 0x00b7}, {0x856a, 0x0012}, {0x856b, 0x0006}, +{0x856c, 0x00b6}, {0x856d, 0x008f}, {0x856e, 0x007d}, +{0x856f, 0x00b7}, {0x8570, 0x0012}, {0x8571, 0x0070}, +{0x8572, 0x0086}, {0x8573, 0x0001}, {0x8574, 0x00ba}, +{0x8575, 0x0012}, {0x8576, 0x0004}, {0x8577, 0x00b7}, +{0x8578, 0x0012}, {0x8579, 0x0004}, {0x857a, 0x0001}, +{0x857b, 0x0001}, {0x857c, 0x0001}, {0x857d, 0x0001}, +{0x857e, 0x0001}, {0x857f, 0x0001}, {0x8580, 0x00b6}, +{0x8581, 0x0012}, {0x8582, 0x0004}, {0x8583, 0x0084}, +{0x8584, 0x00fe}, {0x8585, 0x008a}, {0x8586, 0x0002}, +{0x8587, 0x00b7}, {0x8588, 0x0012}, {0x8589, 0x0004}, +{0x858a, 0x0001}, {0x858b, 0x0001}, {0x858c, 0x0001}, +{0x858d, 0x0001}, {0x858e, 0x0001}, {0x858f, 0x0001}, +{0x8590, 0x0086}, {0x8591, 0x00fd}, {0x8592, 0x00b4}, +{0x8593, 0x0012}, {0x8594, 0x0004}, {0x8595, 0x00b7}, +{0x8596, 0x0012}, {0x8597, 0x0004}, {0x8598, 0x00b6}, +{0x8599, 0x0012}, {0x859a, 0x0000}, {0x859b, 0x0084}, +{0x859c, 0x0008}, {0x859d, 0x0081}, {0x859e, 0x0008}, +{0x859f, 0x0027}, {0x85a0, 0x0016}, {0x85a1, 0x00b6}, +{0x85a2, 0x008f}, {0x85a3, 0x007d}, {0x85a4, 0x0081}, +{0x85a5, 0x000c}, {0x85a6, 0x0027}, {0x85a7, 0x0008}, +{0x85a8, 0x008b}, {0x85a9, 0x0004}, {0x85aa, 0x00b7}, +{0x85ab, 0x008f}, {0x85ac, 0x007d}, {0x85ad, 0x007e}, +{0x85ae, 0x0085}, {0x85af, 0x006c}, {0x85b0, 0x0086}, +{0x85b1, 0x0003}, {0x85b2, 0x0097}, {0x85b3, 0x0040}, +{0x85b4, 0x007e}, {0x85b5, 0x0089}, {0x85b6, 0x006e}, +{0x85b7, 0x0086}, {0x85b8, 0x0007}, {0x85b9, 0x00b7}, +{0x85ba, 0x0012}, {0x85bb, 0x0006}, {0x85bc, 0x005f}, +{0x85bd, 0x00f7}, {0x85be, 0x008f}, {0x85bf, 0x0082}, +{0x85c0, 0x005f}, {0x85c1, 0x00f7}, {0x85c2, 0x008f}, +{0x85c3, 0x007f}, {0x85c4, 0x00f7}, {0x85c5, 0x008f}, +{0x85c6, 0x0070}, {0x85c7, 0x00f7}, {0x85c8, 0x008f}, +{0x85c9, 0x0071}, {0x85ca, 0x00f7}, {0x85cb, 0x008f}, +{0x85cc, 0x0072}, {0x85cd, 0x00f7}, {0x85ce, 0x008f}, +{0x85cf, 0x0073}, {0x85d0, 0x00f7}, {0x85d1, 0x008f}, +{0x85d2, 0x0074}, {0x85d3, 0x00f7}, {0x85d4, 0x008f}, +{0x85d5, 0x0075}, {0x85d6, 0x00f7}, {0x85d7, 0x008f}, +{0x85d8, 0x0076}, {0x85d9, 0x00f7}, {0x85da, 0x008f}, +{0x85db, 0x0077}, {0x85dc, 0x00f7}, {0x85dd, 0x008f}, +{0x85de, 0x0078}, {0x85df, 0x00f7}, {0x85e0, 0x008f}, +{0x85e1, 0x0079}, {0x85e2, 0x00f7}, {0x85e3, 0x008f}, +{0x85e4, 0x007a}, {0x85e5, 0x00f7}, {0x85e6, 0x008f}, +{0x85e7, 0x007b}, {0x85e8, 0x00b6}, {0x85e9, 0x0012}, +{0x85ea, 0x0004}, {0x85eb, 0x008a}, {0x85ec, 0x0010}, +{0x85ed, 0x00b7}, {0x85ee, 0x0012}, {0x85ef, 0x0004}, +{0x85f0, 0x0086}, {0x85f1, 0x00e4}, {0x85f2, 0x00b7}, +{0x85f3, 0x0012}, {0x85f4, 0x0070}, {0x85f5, 0x00b7}, +{0x85f6, 0x0012}, {0x85f7, 0x0007}, {0x85f8, 0x00f7}, +{0x85f9, 0x0012}, {0x85fa, 0x0005}, {0x85fb, 0x00f7}, +{0x85fc, 0x0012}, {0x85fd, 0x0009}, {0x85fe, 0x0086}, +{0x85ff, 0x0008}, {0x8600, 0x00ba}, {0x8601, 0x0012}, +{0x8602, 0x0004}, {0x8603, 0x00b7}, {0x8604, 0x0012}, +{0x8605, 0x0004}, {0x8606, 0x0086}, {0x8607, 0x00f7}, +{0x8608, 0x00b4}, {0x8609, 0x0012}, {0x860a, 0x0004}, +{0x860b, 0x00b7}, {0x860c, 0x0012}, {0x860d, 0x0004}, +{0x860e, 0x0001}, {0x860f, 0x0001}, {0x8610, 0x0001}, +{0x8611, 0x0001}, {0x8612, 0x0001}, {0x8613, 0x0001}, +{0x8614, 0x00b6}, {0x8615, 0x0012}, {0x8616, 0x0008}, +{0x8617, 0x0027}, {0x8618, 0x007f}, {0x8619, 0x0081}, +{0x861a, 0x0080}, {0x861b, 0x0026}, {0x861c, 0x000b}, +{0x861d, 0x0086}, {0x861e, 0x0008}, {0x861f, 0x00ce}, +{0x8620, 0x008f}, {0x8621, 0x0079}, {0x8622, 0x00bd}, +{0x8623, 0x0089}, {0x8624, 0x007b}, {0x8625, 0x007e}, +{0x8626, 0x0086}, {0x8627, 0x008e}, {0x8628, 0x0081}, +{0x8629, 0x0040}, {0x862a, 0x0026}, {0x862b, 0x000b}, +{0x862c, 0x0086}, {0x862d, 0x0004}, {0x862e, 0x00ce}, +{0x862f, 0x008f}, {0x8630, 0x0076}, {0x8631, 0x00bd}, +{0x8632, 0x0089}, {0x8633, 0x007b}, {0x8634, 0x007e}, +{0x8635, 0x0086}, {0x8636, 0x008e}, {0x8637, 0x0081}, +{0x8638, 0x0020}, {0x8639, 0x0026}, {0x863a, 0x000b}, +{0x863b, 0x0086}, {0x863c, 0x0002}, {0x863d, 0x00ce}, +{0x863e, 0x008f}, {0x863f, 0x0073}, {0x8640, 0x00bd}, +{0x8641, 0x0089}, {0x8642, 0x007b}, {0x8643, 0x007e}, +{0x8644, 0x0086}, {0x8645, 0x008e}, {0x8646, 0x0081}, +{0x8647, 0x0010}, {0x8648, 0x0026}, {0x8649, 0x000b}, +{0x864a, 0x0086}, {0x864b, 0x0001}, {0x864c, 0x00ce}, +{0x864d, 0x008f}, {0x864e, 0x0070}, {0x864f, 0x00bd}, +{0x8650, 0x0089}, {0x8651, 0x007b}, {0x8652, 0x007e}, +{0x8653, 0x0086}, {0x8654, 0x008e}, {0x8655, 0x0081}, +{0x8656, 0x0008}, {0x8657, 0x0026}, {0x8658, 0x000b}, +{0x8659, 0x0086}, {0x865a, 0x0008}, {0x865b, 0x00ce}, +{0x865c, 0x008f}, {0x865d, 0x0079}, {0x865e, 0x00bd}, +{0x865f, 0x0089}, {0x8660, 0x007f}, {0x8661, 0x007e}, +{0x8662, 0x0086}, {0x8663, 0x008e}, {0x8664, 0x0081}, +{0x8665, 0x0004}, {0x8666, 0x0026}, {0x8667, 0x000b}, +{0x8668, 0x0086}, {0x8669, 0x0004}, {0x866a, 0x00ce}, +{0x866b, 0x008f}, {0x866c, 0x0076}, {0x866d, 0x00bd}, +{0x866e, 0x0089}, {0x866f, 0x007f}, {0x8670, 0x007e}, +{0x8671, 0x0086}, {0x8672, 0x008e}, {0x8673, 0x0081}, +{0x8674, 0x0002}, {0x8675, 0x0026}, {0x8676, 0x000b}, +{0x8677, 0x008a}, {0x8678, 0x0002}, {0x8679, 0x00ce}, +{0x867a, 0x008f}, {0x867b, 0x0073}, {0x867c, 0x00bd}, +{0x867d, 0x0089}, {0x867e, 0x007f}, {0x867f, 0x007e}, +{0x8680, 0x0086}, {0x8681, 0x008e}, {0x8682, 0x0081}, +{0x8683, 0x0001}, {0x8684, 0x0026}, {0x8685, 0x0008}, +{0x8686, 0x0086}, {0x8687, 0x0001}, {0x8688, 0x00ce}, +{0x8689, 0x008f}, {0x868a, 0x0070}, {0x868b, 0x00bd}, +{0x868c, 0x0089}, {0x868d, 0x007f}, {0x868e, 0x00b6}, +{0x868f, 0x008f}, {0x8690, 0x007f}, {0x8691, 0x0081}, +{0x8692, 0x000f}, {0x8693, 0x0026}, {0x8694, 0x0003}, +{0x8695, 0x007e}, {0x8696, 0x0087}, {0x8697, 0x0047}, +{0x8698, 0x00b6}, {0x8699, 0x0012}, {0x869a, 0x0009}, +{0x869b, 0x0084}, {0x869c, 0x0003}, {0x869d, 0x0081}, +{0x869e, 0x0003}, {0x869f, 0x0027}, {0x86a0, 0x0006}, +{0x86a1, 0x007c}, {0x86a2, 0x0012}, {0x86a3, 0x0009}, +{0x86a4, 0x007e}, {0x86a5, 0x0085}, {0x86a6, 0x00fe}, +{0x86a7, 0x00b6}, {0x86a8, 0x0012}, {0x86a9, 0x0006}, +{0x86aa, 0x0084}, {0x86ab, 0x0007}, {0x86ac, 0x0081}, +{0x86ad, 0x0007}, {0x86ae, 0x0027}, {0x86af, 0x0008}, +{0x86b0, 0x008b}, {0x86b1, 0x0001}, {0x86b2, 0x00b7}, +{0x86b3, 0x0012}, {0x86b4, 0x0006}, {0x86b5, 0x007e}, +{0x86b6, 0x0086}, {0x86b7, 0x00d5}, {0x86b8, 0x00b6}, +{0x86b9, 0x008f}, {0x86ba, 0x0082}, {0x86bb, 0x0026}, +{0x86bc, 0x000a}, {0x86bd, 0x007c}, {0x86be, 0x008f}, +{0x86bf, 0x0082}, {0x86c0, 0x004f}, {0x86c1, 0x00b7}, +{0x86c2, 0x0012}, {0x86c3, 0x0006}, {0x86c4, 0x007e}, +{0x86c5, 0x0085}, {0x86c6, 0x00c0}, {0x86c7, 0x00b6}, +{0x86c8, 0x0012}, {0x86c9, 0x0006}, {0x86ca, 0x0084}, +{0x86cb, 0x003f}, {0x86cc, 0x0081}, {0x86cd, 0x003f}, +{0x86ce, 0x0027}, {0x86cf, 0x0010}, {0x86d0, 0x008b}, +{0x86d1, 0x0008}, {0x86d2, 0x00b7}, {0x86d3, 0x0012}, +{0x86d4, 0x0006}, {0x86d5, 0x00b6}, {0x86d6, 0x0012}, +{0x86d7, 0x0009}, {0x86d8, 0x0084}, {0x86d9, 0x00fc}, +{0x86da, 0x00b7}, {0x86db, 0x0012}, {0x86dc, 0x0009}, +{0x86dd, 0x007e}, {0x86de, 0x0085}, {0x86df, 0x00fe}, +{0x86e0, 0x00ce}, {0x86e1, 0x008f}, {0x86e2, 0x0070}, +{0x86e3, 0x0018}, {0x86e4, 0x00ce}, {0x86e5, 0x008f}, +{0x86e6, 0x0084}, {0x86e7, 0x00c6}, {0x86e8, 0x000c}, +{0x86e9, 0x00bd}, {0x86ea, 0x0089}, {0x86eb, 0x006f}, +{0x86ec, 0x00ce}, {0x86ed, 0x008f}, {0x86ee, 0x0084}, +{0x86ef, 0x0018}, {0x86f0, 0x00ce}, {0x86f1, 0x008f}, +{0x86f2, 0x0070}, {0x86f3, 0x00c6}, {0x86f4, 0x000c}, +{0x86f5, 0x00bd}, {0x86f6, 0x0089}, {0x86f7, 0x006f}, +{0x86f8, 0x00d6}, {0x86f9, 0x0083}, {0x86fa, 0x00c1}, +{0x86fb, 0x004f}, {0x86fc, 0x002d}, {0x86fd, 0x0003}, +{0x86fe, 0x007e}, {0x86ff, 0x0087}, {0x8700, 0x0040}, +{0x8701, 0x00b6}, {0x8702, 0x008f}, {0x8703, 0x007f}, +{0x8704, 0x0081}, {0x8705, 0x0007}, {0x8706, 0x0027}, +{0x8707, 0x000f}, {0x8708, 0x0081}, {0x8709, 0x000b}, +{0x870a, 0x0027}, {0x870b, 0x0015}, {0x870c, 0x0081}, +{0x870d, 0x000d}, {0x870e, 0x0027}, {0x870f, 0x001b}, +{0x8710, 0x0081}, {0x8711, 0x000e}, {0x8712, 0x0027}, +{0x8713, 0x0021}, {0x8714, 0x007e}, {0x8715, 0x0087}, +{0x8716, 0x0040}, {0x8717, 0x00f7}, {0x8718, 0x008f}, +{0x8719, 0x007b}, {0x871a, 0x0086}, {0x871b, 0x0002}, +{0x871c, 0x00b7}, {0x871d, 0x008f}, {0x871e, 0x007a}, +{0x871f, 0x0020}, {0x8720, 0x001c}, {0x8721, 0x00f7}, +{0x8722, 0x008f}, {0x8723, 0x0078}, {0x8724, 0x0086}, +{0x8725, 0x0002}, {0x8726, 0x00b7}, {0x8727, 0x008f}, +{0x8728, 0x0077}, {0x8729, 0x0020}, {0x872a, 0x0012}, +{0x872b, 0x00f7}, {0x872c, 0x008f}, {0x872d, 0x0075}, +{0x872e, 0x0086}, {0x872f, 0x0002}, {0x8730, 0x00b7}, +{0x8731, 0x008f}, {0x8732, 0x0074}, {0x8733, 0x0020}, +{0x8734, 0x0008}, {0x8735, 0x00f7}, {0x8736, 0x008f}, +{0x8737, 0x0072}, {0x8738, 0x0086}, {0x8739, 0x0002}, +{0x873a, 0x00b7}, {0x873b, 0x008f}, {0x873c, 0x0071}, +{0x873d, 0x007e}, {0x873e, 0x0087}, {0x873f, 0x0047}, +{0x8740, 0x0086}, {0x8741, 0x0004}, {0x8742, 0x0097}, +{0x8743, 0x0040}, {0x8744, 0x007e}, {0x8745, 0x0089}, +{0x8746, 0x006e}, {0x8747, 0x00ce}, {0x8748, 0x008f}, +{0x8749, 0x0072}, {0x874a, 0x00bd}, {0x874b, 0x0089}, +{0x874c, 0x00f7}, {0x874d, 0x00ce}, {0x874e, 0x008f}, +{0x874f, 0x0075}, {0x8750, 0x00bd}, {0x8751, 0x0089}, +{0x8752, 0x00f7}, {0x8753, 0x00ce}, {0x8754, 0x008f}, +{0x8755, 0x0078}, {0x8756, 0x00bd}, {0x8757, 0x0089}, +{0x8758, 0x00f7}, {0x8759, 0x00ce}, {0x875a, 0x008f}, +{0x875b, 0x007b}, {0x875c, 0x00bd}, {0x875d, 0x0089}, +{0x875e, 0x00f7}, {0x875f, 0x004f}, {0x8760, 0x00b7}, +{0x8761, 0x008f}, {0x8762, 0x007d}, {0x8763, 0x00b7}, +{0x8764, 0x008f}, {0x8765, 0x0081}, {0x8766, 0x00b6}, +{0x8767, 0x008f}, {0x8768, 0x0072}, {0x8769, 0x0027}, +{0x876a, 0x0047}, {0x876b, 0x007c}, {0x876c, 0x008f}, +{0x876d, 0x007d}, {0x876e, 0x00b6}, {0x876f, 0x008f}, +{0x8770, 0x0075}, {0x8771, 0x0027}, {0x8772, 0x003f}, +{0x8773, 0x007c}, {0x8774, 0x008f}, {0x8775, 0x007d}, +{0x8776, 0x00b6}, {0x8777, 0x008f}, {0x8778, 0x0078}, +{0x8779, 0x0027}, {0x877a, 0x0037}, {0x877b, 0x007c}, +{0x877c, 0x008f}, {0x877d, 0x007d}, {0x877e, 0x00b6}, +{0x877f, 0x008f}, {0x8780, 0x007b}, {0x8781, 0x0027}, +{0x8782, 0x002f}, {0x8783, 0x007f}, {0x8784, 0x008f}, +{0x8785, 0x007d}, {0x8786, 0x007c}, {0x8787, 0x008f}, +{0x8788, 0x0081}, {0x8789, 0x007a}, {0x878a, 0x008f}, +{0x878b, 0x0072}, {0x878c, 0x0027}, {0x878d, 0x001b}, +{0x878e, 0x007c}, {0x878f, 0x008f}, {0x8790, 0x007d}, +{0x8791, 0x007a}, {0x8792, 0x008f}, {0x8793, 0x0075}, +{0x8794, 0x0027}, {0x8795, 0x0016}, {0x8796, 0x007c}, +{0x8797, 0x008f}, {0x8798, 0x007d}, {0x8799, 0x007a}, +{0x879a, 0x008f}, {0x879b, 0x0078}, {0x879c, 0x0027}, +{0x879d, 0x0011}, {0x879e, 0x007c}, {0x879f, 0x008f}, +{0x87a0, 0x007d}, {0x87a1, 0x007a}, {0x87a2, 0x008f}, +{0x87a3, 0x007b}, {0x87a4, 0x0027}, {0x87a5, 0x000c}, +{0x87a6, 0x007e}, {0x87a7, 0x0087}, {0x87a8, 0x0083}, +{0x87a9, 0x007a}, {0x87aa, 0x008f}, {0x87ab, 0x0075}, +{0x87ac, 0x007a}, {0x87ad, 0x008f}, {0x87ae, 0x0078}, +{0x87af, 0x007a}, {0x87b0, 0x008f}, {0x87b1, 0x007b}, +{0x87b2, 0x00ce}, {0x87b3, 0x00c1}, {0x87b4, 0x00fc}, +{0x87b5, 0x00f6}, {0x87b6, 0x008f}, {0x87b7, 0x007d}, +{0x87b8, 0x003a}, {0x87b9, 0x00a6}, {0x87ba, 0x0000}, +{0x87bb, 0x00b7}, {0x87bc, 0x0012}, {0x87bd, 0x0070}, +{0x87be, 0x00b6}, {0x87bf, 0x008f}, {0x87c0, 0x0072}, +{0x87c1, 0x0026}, {0x87c2, 0x0003}, {0x87c3, 0x007e}, +{0x87c4, 0x0087}, {0x87c5, 0x00fa}, {0x87c6, 0x00b6}, +{0x87c7, 0x008f}, {0x87c8, 0x0075}, {0x87c9, 0x0026}, +{0x87ca, 0x000a}, {0x87cb, 0x0018}, {0x87cc, 0x00ce}, +{0x87cd, 0x008f}, {0x87ce, 0x0073}, {0x87cf, 0x00bd}, +{0x87d0, 0x0089}, {0x87d1, 0x00d5}, {0x87d2, 0x007e}, +{0x87d3, 0x0087}, {0x87d4, 0x00fa}, {0x87d5, 0x00b6}, +{0x87d6, 0x008f}, {0x87d7, 0x0078}, {0x87d8, 0x0026}, +{0x87d9, 0x000a}, {0x87da, 0x0018}, {0x87db, 0x00ce}, +{0x87dc, 0x008f}, {0x87dd, 0x0076}, {0x87de, 0x00bd}, +{0x87df, 0x0089}, {0x87e0, 0x00d5}, {0x87e1, 0x007e}, +{0x87e2, 0x0087}, {0x87e3, 0x00fa}, {0x87e4, 0x00b6}, +{0x87e5, 0x008f}, {0x87e6, 0x007b}, {0x87e7, 0x0026}, +{0x87e8, 0x000a}, {0x87e9, 0x0018}, {0x87ea, 0x00ce}, +{0x87eb, 0x008f}, {0x87ec, 0x0079}, {0x87ed, 0x00bd}, +{0x87ee, 0x0089}, {0x87ef, 0x00d5}, {0x87f0, 0x007e}, +{0x87f1, 0x0087}, {0x87f2, 0x00fa}, {0x87f3, 0x0086}, +{0x87f4, 0x0005}, {0x87f5, 0x0097}, {0x87f6, 0x0040}, +{0x87f7, 0x007e}, {0x87f8, 0x0089}, {0x87f9, 0x0000}, +{0x87fa, 0x00b6}, {0x87fb, 0x008f}, {0x87fc, 0x0075}, +{0x87fd, 0x0081}, {0x87fe, 0x0007}, {0x87ff, 0x002e}, +{0x8800, 0x00f2}, {0x8801, 0x00f6}, {0x8802, 0x0012}, +{0x8803, 0x0006}, {0x8804, 0x00c4}, {0x8805, 0x00f8}, +{0x8806, 0x001b}, {0x8807, 0x00b7}, {0x8808, 0x0012}, +{0x8809, 0x0006}, {0x880a, 0x00b6}, {0x880b, 0x008f}, +{0x880c, 0x0078}, {0x880d, 0x0081}, {0x880e, 0x0007}, +{0x880f, 0x002e}, {0x8810, 0x00e2}, {0x8811, 0x0048}, +{0x8812, 0x0048}, {0x8813, 0x0048}, {0x8814, 0x00f6}, +{0x8815, 0x0012}, {0x8816, 0x0006}, {0x8817, 0x00c4}, +{0x8818, 0x00c7}, {0x8819, 0x001b}, {0x881a, 0x00b7}, +{0x881b, 0x0012}, {0x881c, 0x0006}, {0x881d, 0x00b6}, +{0x881e, 0x008f}, {0x881f, 0x007b}, {0x8820, 0x0081}, +{0x8821, 0x0007}, {0x8822, 0x002e}, {0x8823, 0x00cf}, +{0x8824, 0x00f6}, {0x8825, 0x0012}, {0x8826, 0x0005}, +{0x8827, 0x00c4}, {0x8828, 0x00f8}, {0x8829, 0x001b}, +{0x882a, 0x00b7}, {0x882b, 0x0012}, {0x882c, 0x0005}, +{0x882d, 0x0086}, {0x882e, 0x0000}, {0x882f, 0x00f6}, +{0x8830, 0x008f}, {0x8831, 0x0071}, {0x8832, 0x00bd}, +{0x8833, 0x0089}, {0x8834, 0x0094}, {0x8835, 0x0086}, +{0x8836, 0x0001}, {0x8837, 0x00f6}, {0x8838, 0x008f}, +{0x8839, 0x0074}, {0x883a, 0x00bd}, {0x883b, 0x0089}, +{0x883c, 0x0094}, {0x883d, 0x0086}, {0x883e, 0x0002}, +{0x883f, 0x00f6}, {0x8840, 0x008f}, {0x8841, 0x0077}, +{0x8842, 0x00bd}, {0x8843, 0x0089}, {0x8844, 0x0094}, +{0x8845, 0x0086}, {0x8846, 0x0003}, {0x8847, 0x00f6}, +{0x8848, 0x008f}, {0x8849, 0x007a}, {0x884a, 0x00bd}, +{0x884b, 0x0089}, {0x884c, 0x0094}, {0x884d, 0x00ce}, +{0x884e, 0x008f}, {0x884f, 0x0070}, {0x8850, 0x00a6}, +{0x8851, 0x0001}, {0x8852, 0x0081}, {0x8853, 0x0001}, +{0x8854, 0x0027}, {0x8855, 0x0007}, {0x8856, 0x0081}, +{0x8857, 0x0003}, {0x8858, 0x0027}, {0x8859, 0x0003}, +{0x885a, 0x007e}, {0x885b, 0x0088}, {0x885c, 0x0066}, +{0x885d, 0x00a6}, {0x885e, 0x0000}, {0x885f, 0x00b8}, +{0x8860, 0x008f}, {0x8861, 0x0081}, {0x8862, 0x0084}, +{0x8863, 0x0001}, {0x8864, 0x0026}, {0x8865, 0x000b}, +{0x8866, 0x008c}, {0x8867, 0x008f}, {0x8868, 0x0079}, +{0x8869, 0x002c}, {0x886a, 0x000e}, {0x886b, 0x0008}, +{0x886c, 0x0008}, {0x886d, 0x0008}, {0x886e, 0x007e}, +{0x886f, 0x0088}, {0x8870, 0x0050}, {0x8871, 0x00b6}, +{0x8872, 0x0012}, {0x8873, 0x0004}, {0x8874, 0x008a}, +{0x8875, 0x0040}, {0x8876, 0x00b7}, {0x8877, 0x0012}, +{0x8878, 0x0004}, {0x8879, 0x00b6}, {0x887a, 0x0012}, +{0x887b, 0x0004}, {0x887c, 0x0084}, {0x887d, 0x00fb}, +{0x887e, 0x0084}, {0x887f, 0x00ef}, {0x8880, 0x00b7}, +{0x8881, 0x0012}, {0x8882, 0x0004}, {0x8883, 0x00b6}, +{0x8884, 0x0012}, {0x8885, 0x0007}, {0x8886, 0x0036}, +{0x8887, 0x00b6}, {0x8888, 0x008f}, {0x8889, 0x007c}, +{0x888a, 0x0048}, {0x888b, 0x0048}, {0x888c, 0x00b7}, +{0x888d, 0x0012}, {0x888e, 0x0007}, {0x888f, 0x0086}, +{0x8890, 0x0001}, {0x8891, 0x00ba}, {0x8892, 0x0012}, +{0x8893, 0x0004}, {0x8894, 0x00b7}, {0x8895, 0x0012}, +{0x8896, 0x0004}, {0x8897, 0x0001}, {0x8898, 0x0001}, +{0x8899, 0x0001}, {0x889a, 0x0001}, {0x889b, 0x0001}, +{0x889c, 0x0001}, {0x889d, 0x0086}, {0x889e, 0x00fe}, +{0x889f, 0x00b4}, {0x88a0, 0x0012}, {0x88a1, 0x0004}, +{0x88a2, 0x00b7}, {0x88a3, 0x0012}, {0x88a4, 0x0004}, +{0x88a5, 0x0086}, {0x88a6, 0x0002}, {0x88a7, 0x00ba}, +{0x88a8, 0x0012}, {0x88a9, 0x0004}, {0x88aa, 0x00b7}, +{0x88ab, 0x0012}, {0x88ac, 0x0004}, {0x88ad, 0x0086}, +{0x88ae, 0x00fd}, {0x88af, 0x00b4}, {0x88b0, 0x0012}, +{0x88b1, 0x0004}, {0x88b2, 0x00b7}, {0x88b3, 0x0012}, +{0x88b4, 0x0004}, {0x88b5, 0x0032}, {0x88b6, 0x00b7}, +{0x88b7, 0x0012}, {0x88b8, 0x0007}, {0x88b9, 0x00b6}, +{0x88ba, 0x0012}, {0x88bb, 0x0000}, {0x88bc, 0x0084}, +{0x88bd, 0x0008}, {0x88be, 0x0081}, {0x88bf, 0x0008}, +{0x88c0, 0x0027}, {0x88c1, 0x000f}, {0x88c2, 0x007c}, +{0x88c3, 0x0082}, {0x88c4, 0x0008}, {0x88c5, 0x0026}, +{0x88c6, 0x0007}, {0x88c7, 0x0086}, {0x88c8, 0x0076}, +{0x88c9, 0x0097}, {0x88ca, 0x0040}, {0x88cb, 0x007e}, +{0x88cc, 0x0089}, {0x88cd, 0x006e}, {0x88ce, 0x007e}, +{0x88cf, 0x0086}, {0x88d0, 0x00ec}, {0x88d1, 0x00b6}, +{0x88d2, 0x008f}, {0x88d3, 0x007f}, {0x88d4, 0x0081}, +{0x88d5, 0x000f}, {0x88d6, 0x0027}, {0x88d7, 0x003c}, +{0x88d8, 0x00bd}, {0x88d9, 0x00e6}, {0x88da, 0x00c7}, +{0x88db, 0x00b7}, {0x88dc, 0x0012}, {0x88dd, 0x000d}, +{0x88de, 0x00bd}, {0x88df, 0x00e6}, {0x88e0, 0x00cb}, +{0x88e1, 0x00b6}, {0x88e2, 0x0012}, {0x88e3, 0x0004}, +{0x88e4, 0x008a}, {0x88e5, 0x0020}, {0x88e6, 0x00b7}, +{0x88e7, 0x0012}, {0x88e8, 0x0004}, {0x88e9, 0x00ce}, +{0x88ea, 0x00ff}, {0x88eb, 0x00ff}, {0x88ec, 0x00b6}, +{0x88ed, 0x0012}, {0x88ee, 0x0000}, {0x88ef, 0x0081}, +{0x88f0, 0x000c}, {0x88f1, 0x0026}, {0x88f2, 0x0005}, +{0x88f3, 0x0009}, {0x88f4, 0x0026}, {0x88f5, 0x00f6}, +{0x88f6, 0x0027}, {0x88f7, 0x001c}, {0x88f8, 0x00b6}, +{0x88f9, 0x0012}, {0x88fa, 0x0004}, {0x88fb, 0x0084}, +{0x88fc, 0x00df}, {0x88fd, 0x00b7}, {0x88fe, 0x0012}, +{0x88ff, 0x0004}, {0x8900, 0x0096}, {0x8901, 0x0083}, +{0x8902, 0x0081}, {0x8903, 0x0007}, {0x8904, 0x002c}, +{0x8905, 0x0005}, {0x8906, 0x007c}, {0x8907, 0x0000}, +{0x8908, 0x0083}, {0x8909, 0x0020}, {0x890a, 0x0006}, +{0x890b, 0x0096}, {0x890c, 0x0083}, {0x890d, 0x008b}, +{0x890e, 0x0008}, {0x890f, 0x0097}, {0x8910, 0x0083}, +{0x8911, 0x007e}, {0x8912, 0x0085}, {0x8913, 0x0041}, +{0x8914, 0x007f}, {0x8915, 0x008f}, {0x8916, 0x007e}, +{0x8917, 0x0086}, {0x8918, 0x0080}, {0x8919, 0x00b7}, +{0x891a, 0x0012}, {0x891b, 0x000c}, {0x891c, 0x0086}, +{0x891d, 0x0001}, {0x891e, 0x00b7}, {0x891f, 0x008f}, +{0x8920, 0x007d}, {0x8921, 0x00b6}, {0x8922, 0x0012}, +{0x8923, 0x000c}, {0x8924, 0x0084}, {0x8925, 0x007f}, +{0x8926, 0x00b7}, {0x8927, 0x0012}, {0x8928, 0x000c}, +{0x8929, 0x008a}, {0x892a, 0x0080}, {0x892b, 0x00b7}, +{0x892c, 0x0012}, {0x892d, 0x000c}, {0x892e, 0x0086}, +{0x892f, 0x000a}, {0x8930, 0x00bd}, {0x8931, 0x008a}, +{0x8932, 0x0006}, {0x8933, 0x00b6}, {0x8934, 0x0012}, +{0x8935, 0x000a}, {0x8936, 0x002a}, {0x8937, 0x0009}, +{0x8938, 0x00b6}, {0x8939, 0x0012}, {0x893a, 0x000c}, +{0x893b, 0x00ba}, {0x893c, 0x008f}, {0x893d, 0x007d}, +{0x893e, 0x00b7}, {0x893f, 0x0012}, {0x8940, 0x000c}, +{0x8941, 0x00b6}, {0x8942, 0x008f}, {0x8943, 0x007e}, +{0x8944, 0x0081}, {0x8945, 0x0060}, {0x8946, 0x0027}, +{0x8947, 0x001a}, {0x8948, 0x008b}, {0x8949, 0x0020}, +{0x894a, 0x00b7}, {0x894b, 0x008f}, {0x894c, 0x007e}, +{0x894d, 0x00b6}, {0x894e, 0x0012}, {0x894f, 0x000c}, +{0x8950, 0x0084}, {0x8951, 0x009f}, {0x8952, 0x00ba}, +{0x8953, 0x008f}, {0x8954, 0x007e}, {0x8955, 0x00b7}, +{0x8956, 0x0012}, {0x8957, 0x000c}, {0x8958, 0x00b6}, +{0x8959, 0x008f}, {0x895a, 0x007d}, {0x895b, 0x0048}, +{0x895c, 0x00b7}, {0x895d, 0x008f}, {0x895e, 0x007d}, +{0x895f, 0x007e}, {0x8960, 0x0089}, {0x8961, 0x0021}, +{0x8962, 0x00b6}, {0x8963, 0x0012}, {0x8964, 0x0004}, +{0x8965, 0x008a}, {0x8966, 0x0020}, {0x8967, 0x00b7}, +{0x8968, 0x0012}, {0x8969, 0x0004}, {0x896a, 0x00bd}, +{0x896b, 0x008a}, {0x896c, 0x000a}, {0x896d, 0x004f}, +{0x896e, 0x0039}, {0x896f, 0x00a6}, {0x8970, 0x0000}, +{0x8971, 0x0018}, {0x8972, 0x00a7}, {0x8973, 0x0000}, +{0x8974, 0x0008}, {0x8975, 0x0018}, {0x8976, 0x0008}, +{0x8977, 0x005a}, {0x8978, 0x0026}, {0x8979, 0x00f5}, +{0x897a, 0x0039}, {0x897b, 0x0036}, {0x897c, 0x006c}, +{0x897d, 0x0000}, {0x897e, 0x0032}, {0x897f, 0x00ba}, +{0x8980, 0x008f}, {0x8981, 0x007f}, {0x8982, 0x00b7}, +{0x8983, 0x008f}, {0x8984, 0x007f}, {0x8985, 0x00b6}, +{0x8986, 0x0012}, {0x8987, 0x0009}, {0x8988, 0x0084}, +{0x8989, 0x0003}, {0x898a, 0x00a7}, {0x898b, 0x0001}, +{0x898c, 0x00b6}, {0x898d, 0x0012}, {0x898e, 0x0006}, +{0x898f, 0x0084}, {0x8990, 0x003f}, {0x8991, 0x00a7}, +{0x8992, 0x0002}, {0x8993, 0x0039}, {0x8994, 0x0036}, +{0x8995, 0x0086}, {0x8996, 0x0003}, {0x8997, 0x00b7}, +{0x8998, 0x008f}, {0x8999, 0x0080}, {0x899a, 0x0032}, +{0x899b, 0x00c1}, {0x899c, 0x0000}, {0x899d, 0x0026}, +{0x899e, 0x0006}, {0x899f, 0x00b7}, {0x89a0, 0x008f}, +{0x89a1, 0x007c}, {0x89a2, 0x007e}, {0x89a3, 0x0089}, +{0x89a4, 0x00c9}, {0x89a5, 0x00c1}, {0x89a6, 0x0001}, +{0x89a7, 0x0027}, {0x89a8, 0x0018}, {0x89a9, 0x00c1}, +{0x89aa, 0x0002}, {0x89ab, 0x0027}, {0x89ac, 0x000c}, +{0x89ad, 0x00c1}, {0x89ae, 0x0003}, {0x89af, 0x0027}, +{0x89b0, 0x0000}, {0x89b1, 0x00f6}, {0x89b2, 0x008f}, +{0x89b3, 0x0080}, {0x89b4, 0x0005}, {0x89b5, 0x0005}, +{0x89b6, 0x00f7}, {0x89b7, 0x008f}, {0x89b8, 0x0080}, +{0x89b9, 0x00f6}, {0x89ba, 0x008f}, {0x89bb, 0x0080}, +{0x89bc, 0x0005}, {0x89bd, 0x0005}, {0x89be, 0x00f7}, +{0x89bf, 0x008f}, {0x89c0, 0x0080}, {0x89c1, 0x00f6}, +{0x89c2, 0x008f}, {0x89c3, 0x0080}, {0x89c4, 0x0005}, +{0x89c5, 0x0005}, {0x89c6, 0x00f7}, {0x89c7, 0x008f}, +{0x89c8, 0x0080}, {0x89c9, 0x00f6}, {0x89ca, 0x008f}, +{0x89cb, 0x0080}, {0x89cc, 0x0053}, {0x89cd, 0x00f4}, +{0x89ce, 0x0012}, {0x89cf, 0x0007}, {0x89d0, 0x001b}, +{0x89d1, 0x00b7}, {0x89d2, 0x0012}, {0x89d3, 0x0007}, +{0x89d4, 0x0039}, {0x89d5, 0x00ce}, {0x89d6, 0x008f}, +{0x89d7, 0x0070}, {0x89d8, 0x00a6}, {0x89d9, 0x0000}, +{0x89da, 0x0018}, {0x89db, 0x00e6}, {0x89dc, 0x0000}, +{0x89dd, 0x0018}, {0x89de, 0x00a7}, {0x89df, 0x0000}, +{0x89e0, 0x00e7}, {0x89e1, 0x0000}, {0x89e2, 0x00a6}, +{0x89e3, 0x0001}, {0x89e4, 0x0018}, {0x89e5, 0x00e6}, +{0x89e6, 0x0001}, {0x89e7, 0x0018}, {0x89e8, 0x00a7}, +{0x89e9, 0x0001}, {0x89ea, 0x00e7}, {0x89eb, 0x0001}, +{0x89ec, 0x00a6}, {0x89ed, 0x0002}, {0x89ee, 0x0018}, +{0x89ef, 0x00e6}, {0x89f0, 0x0002}, {0x89f1, 0x0018}, +{0x89f2, 0x00a7}, {0x89f3, 0x0002}, {0x89f4, 0x00e7}, +{0x89f5, 0x0002}, {0x89f6, 0x0039}, {0x89f7, 0x00a6}, +{0x89f8, 0x0000}, {0x89f9, 0x0084}, {0x89fa, 0x0007}, +{0x89fb, 0x00e6}, {0x89fc, 0x0000}, {0x89fd, 0x00c4}, +{0x89fe, 0x0038}, {0x89ff, 0x0054}, {0x8a00, 0x0054}, +{0x8a01, 0x0054}, {0x8a02, 0x001b}, {0x8a03, 0x00a7}, +{0x8a04, 0x0000}, {0x8a05, 0x0039}, {0x8a06, 0x004a}, +{0x8a07, 0x0026}, {0x8a08, 0x00fd}, {0x8a09, 0x0039}, +{0x8a0a, 0x0096}, {0x8a0b, 0x0022}, {0x8a0c, 0x0084}, +{0x8a0d, 0x000f}, {0x8a0e, 0x0097}, {0x8a0f, 0x0022}, +{0x8a10, 0x0086}, {0x8a11, 0x0001}, {0x8a12, 0x00b7}, +{0x8a13, 0x008f}, {0x8a14, 0x0070}, {0x8a15, 0x00b6}, +{0x8a16, 0x0012}, {0x8a17, 0x0007}, {0x8a18, 0x00b7}, +{0x8a19, 0x008f}, {0x8a1a, 0x0071}, {0x8a1b, 0x00f6}, +{0x8a1c, 0x0012}, {0x8a1d, 0x000c}, {0x8a1e, 0x00c4}, +{0x8a1f, 0x000f}, {0x8a20, 0x00c8}, {0x8a21, 0x000f}, +{0x8a22, 0x00f7}, {0x8a23, 0x008f}, {0x8a24, 0x0072}, +{0x8a25, 0x00f6}, {0x8a26, 0x008f}, {0x8a27, 0x0072}, +{0x8a28, 0x00b6}, {0x8a29, 0x008f}, {0x8a2a, 0x0071}, +{0x8a2b, 0x0084}, {0x8a2c, 0x0003}, {0x8a2d, 0x0027}, +{0x8a2e, 0x0014}, {0x8a2f, 0x0081}, {0x8a30, 0x0001}, +{0x8a31, 0x0027}, {0x8a32, 0x001c}, {0x8a33, 0x0081}, +{0x8a34, 0x0002}, {0x8a35, 0x0027}, {0x8a36, 0x0024}, +{0x8a37, 0x00f4}, {0x8a38, 0x008f}, {0x8a39, 0x0070}, +{0x8a3a, 0x0027}, {0x8a3b, 0x002a}, {0x8a3c, 0x0096}, +{0x8a3d, 0x0022}, {0x8a3e, 0x008a}, {0x8a3f, 0x0080}, +{0x8a40, 0x007e}, {0x8a41, 0x008a}, {0x8a42, 0x0064}, +{0x8a43, 0x00f4}, {0x8a44, 0x008f}, {0x8a45, 0x0070}, +{0x8a46, 0x0027}, {0x8a47, 0x001e}, {0x8a48, 0x0096}, +{0x8a49, 0x0022}, {0x8a4a, 0x008a}, {0x8a4b, 0x0010}, +{0x8a4c, 0x007e}, {0x8a4d, 0x008a}, {0x8a4e, 0x0064}, +{0x8a4f, 0x00f4}, {0x8a50, 0x008f}, {0x8a51, 0x0070}, +{0x8a52, 0x0027}, {0x8a53, 0x0012}, {0x8a54, 0x0096}, +{0x8a55, 0x0022}, {0x8a56, 0x008a}, {0x8a57, 0x0020}, +{0x8a58, 0x007e}, {0x8a59, 0x008a}, {0x8a5a, 0x0064}, +{0x8a5b, 0x00f4}, {0x8a5c, 0x008f}, {0x8a5d, 0x0070}, +{0x8a5e, 0x0027}, {0x8a5f, 0x0006}, {0x8a60, 0x0096}, +{0x8a61, 0x0022}, {0x8a62, 0x008a}, {0x8a63, 0x0040}, +{0x8a64, 0x0097}, {0x8a65, 0x0022}, {0x8a66, 0x0074}, +{0x8a67, 0x008f}, {0x8a68, 0x0071}, {0x8a69, 0x0074}, +{0x8a6a, 0x008f}, {0x8a6b, 0x0071}, {0x8a6c, 0x0078}, +{0x8a6d, 0x008f}, {0x8a6e, 0x0070}, {0x8a6f, 0x00b6}, +{0x8a70, 0x008f}, {0x8a71, 0x0070}, {0x8a72, 0x0085}, +{0x8a73, 0x0010}, {0x8a74, 0x0027}, {0x8a75, 0x00af}, +{0x8a76, 0x00d6}, {0x8a77, 0x0022}, {0x8a78, 0x00c4}, +{0x8a79, 0x0010}, {0x8a7a, 0x0058}, {0x8a7b, 0x00b6}, +{0x8a7c, 0x0012}, {0x8a7d, 0x0070}, {0x8a7e, 0x0081}, +{0x8a7f, 0x00e4}, {0x8a80, 0x0027}, {0x8a81, 0x0036}, +{0x8a82, 0x0081}, {0x8a83, 0x00e1}, {0x8a84, 0x0026}, +{0x8a85, 0x000c}, {0x8a86, 0x0096}, {0x8a87, 0x0022}, +{0x8a88, 0x0084}, {0x8a89, 0x0020}, {0x8a8a, 0x0044}, +{0x8a8b, 0x001b}, {0x8a8c, 0x00d6}, {0x8a8d, 0x0022}, +{0x8a8e, 0x00c4}, {0x8a8f, 0x00cf}, {0x8a90, 0x0020}, +{0x8a91, 0x0023}, {0x8a92, 0x0058}, {0x8a93, 0x0081}, +{0x8a94, 0x00c6}, {0x8a95, 0x0026}, {0x8a96, 0x000d}, +{0x8a97, 0x0096}, {0x8a98, 0x0022}, {0x8a99, 0x0084}, +{0x8a9a, 0x0040}, {0x8a9b, 0x0044}, {0x8a9c, 0x0044}, +{0x8a9d, 0x001b}, {0x8a9e, 0x00d6}, {0x8a9f, 0x0022}, +{0x8aa0, 0x00c4}, {0x8aa1, 0x00af}, {0x8aa2, 0x0020}, +{0x8aa3, 0x0011}, {0x8aa4, 0x0058}, {0x8aa5, 0x0081}, +{0x8aa6, 0x0027}, {0x8aa7, 0x0026}, {0x8aa8, 0x000f}, +{0x8aa9, 0x0096}, {0x8aaa, 0x0022}, {0x8aab, 0x0084}, +{0x8aac, 0x0080}, {0x8aad, 0x0044}, {0x8aae, 0x0044}, +{0x8aaf, 0x0044}, {0x8ab0, 0x001b}, {0x8ab1, 0x00d6}, +{0x8ab2, 0x0022}, {0x8ab3, 0x00c4}, {0x8ab4, 0x006f}, +{0x8ab5, 0x001b}, {0x8ab6, 0x0097}, {0x8ab7, 0x0022}, +{0x8ab8, 0x0039}, {0x8ab9, 0x0027}, {0x8aba, 0x000c}, +{0x8abb, 0x007c}, {0x8abc, 0x0082}, {0x8abd, 0x0006}, +{0x8abe, 0x00bd}, {0x8abf, 0x00d9}, {0x8ac0, 0x00ed}, +{0x8ac1, 0x00b6}, {0x8ac2, 0x0082}, {0x8ac3, 0x0007}, +{0x8ac4, 0x007e}, {0x8ac5, 0x008a}, {0x8ac6, 0x00b9}, +{0x8ac7, 0x007f}, {0x8ac8, 0x0082}, {0x8ac9, 0x0006}, +{0x8aca, 0x0039}, { 0x0, 0x0 } +}; +#else +cas_saturn_patch_t cas_saturn_patch[] = { +{0x8200, 0x007e}, {0x8201, 0x0082}, {0x8202, 0x0009}, +{0x8203, 0x0000}, {0x8204, 0x0000}, {0x8205, 0x0000}, +{0x8206, 0x0000}, {0x8207, 0x0000}, {0x8208, 0x0000}, +{0x8209, 0x008e}, {0x820a, 0x008e}, {0x820b, 0x00ff}, +{0x820c, 0x00ce}, {0x820d, 0x0082}, {0x820e, 0x0025}, +{0x820f, 0x00ff}, {0x8210, 0x0001}, {0x8211, 0x000f}, +{0x8212, 0x00ce}, {0x8213, 0x0084}, {0x8214, 0x0026}, +{0x8215, 0x00ff}, {0x8216, 0x0001}, {0x8217, 0x0011}, +{0x8218, 0x00ce}, {0x8219, 0x0085}, {0x821a, 0x003d}, +{0x821b, 0x00df}, {0x821c, 0x00e5}, {0x821d, 0x0086}, +{0x821e, 0x0039}, {0x821f, 0x00b7}, {0x8220, 0x008f}, +{0x8221, 0x00f8}, {0x8222, 0x007e}, {0x8223, 0x00c3}, +{0x8224, 0x00c2}, {0x8225, 0x0096}, {0x8226, 0x0047}, +{0x8227, 0x0084}, {0x8228, 0x00f3}, {0x8229, 0x008a}, +{0x822a, 0x0000}, {0x822b, 0x0097}, {0x822c, 0x0047}, +{0x822d, 0x00ce}, {0x822e, 0x0082}, {0x822f, 0x0033}, +{0x8230, 0x00ff}, {0x8231, 0x0001}, {0x8232, 0x000f}, +{0x8233, 0x0096}, {0x8234, 0x0046}, {0x8235, 0x0084}, +{0x8236, 0x000c}, {0x8237, 0x0081}, {0x8238, 0x0004}, +{0x8239, 0x0027}, {0x823a, 0x000b}, {0x823b, 0x0096}, +{0x823c, 0x0046}, {0x823d, 0x0084}, {0x823e, 0x000c}, +{0x823f, 0x0081}, {0x8240, 0x0008}, {0x8241, 0x0027}, +{0x8242, 0x0057}, {0x8243, 0x007e}, {0x8244, 0x0084}, +{0x8245, 0x0025}, {0x8246, 0x0096}, {0x8247, 0x0047}, +{0x8248, 0x0084}, {0x8249, 0x00f3}, {0x824a, 0x008a}, +{0x824b, 0x0004}, {0x824c, 0x0097}, {0x824d, 0x0047}, +{0x824e, 0x00ce}, {0x824f, 0x0082}, {0x8250, 0x0054}, +{0x8251, 0x00ff}, {0x8252, 0x0001}, {0x8253, 0x000f}, +{0x8254, 0x0096}, {0x8255, 0x0046}, {0x8256, 0x0084}, +{0x8257, 0x000c}, {0x8258, 0x0081}, {0x8259, 0x0004}, +{0x825a, 0x0026}, {0x825b, 0x0038}, {0x825c, 0x00b6}, +{0x825d, 0x0012}, {0x825e, 0x0020}, {0x825f, 0x0084}, +{0x8260, 0x0020}, {0x8261, 0x0026}, {0x8262, 0x0003}, +{0x8263, 0x007e}, {0x8264, 0x0084}, {0x8265, 0x0025}, +{0x8266, 0x0096}, {0x8267, 0x007b}, {0x8268, 0x00d6}, +{0x8269, 0x007c}, {0x826a, 0x00fe}, {0x826b, 0x008f}, +{0x826c, 0x0056}, {0x826d, 0x00bd}, {0x826e, 0x00f7}, +{0x826f, 0x00b6}, {0x8270, 0x00fe}, {0x8271, 0x008f}, +{0x8272, 0x004e}, {0x8273, 0x00bd}, {0x8274, 0x00ec}, +{0x8275, 0x008e}, {0x8276, 0x00bd}, {0x8277, 0x00fa}, +{0x8278, 0x00f7}, {0x8279, 0x00bd}, {0x827a, 0x00f7}, +{0x827b, 0x0028}, {0x827c, 0x00ce}, {0x827d, 0x0082}, +{0x827e, 0x0082}, {0x827f, 0x00ff}, {0x8280, 0x0001}, +{0x8281, 0x000f}, {0x8282, 0x0096}, {0x8283, 0x0046}, +{0x8284, 0x0084}, {0x8285, 0x000c}, {0x8286, 0x0081}, +{0x8287, 0x0004}, {0x8288, 0x0026}, {0x8289, 0x000a}, +{0x828a, 0x00b6}, {0x828b, 0x0012}, {0x828c, 0x0020}, +{0x828d, 0x0084}, {0x828e, 0x0020}, {0x828f, 0x0027}, +{0x8290, 0x00b5}, {0x8291, 0x007e}, {0x8292, 0x0084}, +{0x8293, 0x0025}, {0x8294, 0x00bd}, {0x8295, 0x00f7}, +{0x8296, 0x001f}, {0x8297, 0x007e}, {0x8298, 0x0084}, +{0x8299, 0x001f}, {0x829a, 0x0096}, {0x829b, 0x0047}, +{0x829c, 0x0084}, {0x829d, 0x00f3}, {0x829e, 0x008a}, +{0x829f, 0x0008}, {0x82a0, 0x0097}, {0x82a1, 0x0047}, +{0x82a2, 0x00de}, {0x82a3, 0x00e1}, {0x82a4, 0x00ad}, +{0x82a5, 0x0000}, {0x82a6, 0x00ce}, {0x82a7, 0x0082}, +{0x82a8, 0x00af}, {0x82a9, 0x00ff}, {0x82aa, 0x0001}, +{0x82ab, 0x000f}, {0x82ac, 0x007e}, {0x82ad, 0x0084}, +{0x82ae, 0x0025}, {0x82af, 0x0096}, {0x82b0, 0x0041}, +{0x82b1, 0x0085}, {0x82b2, 0x0010}, {0x82b3, 0x0026}, +{0x82b4, 0x0006}, {0x82b5, 0x0096}, {0x82b6, 0x0023}, +{0x82b7, 0x0085}, {0x82b8, 0x0040}, {0x82b9, 0x0027}, +{0x82ba, 0x0006}, {0x82bb, 0x00bd}, {0x82bc, 0x00ed}, +{0x82bd, 0x0000}, {0x82be, 0x007e}, {0x82bf, 0x0083}, +{0x82c0, 0x00a2}, {0x82c1, 0x00de}, {0x82c2, 0x0042}, +{0x82c3, 0x00bd}, {0x82c4, 0x00eb}, {0x82c5, 0x008e}, +{0x82c6, 0x0096}, {0x82c7, 0x0024}, {0x82c8, 0x0084}, +{0x82c9, 0x0008}, {0x82ca, 0x0027}, {0x82cb, 0x0003}, +{0x82cc, 0x007e}, {0x82cd, 0x0083}, {0x82ce, 0x00df}, +{0x82cf, 0x0096}, {0x82d0, 0x007b}, {0x82d1, 0x00d6}, +{0x82d2, 0x007c}, {0x82d3, 0x00fe}, {0x82d4, 0x008f}, +{0x82d5, 0x0056}, {0x82d6, 0x00bd}, {0x82d7, 0x00f7}, +{0x82d8, 0x00b6}, {0x82d9, 0x00fe}, {0x82da, 0x008f}, +{0x82db, 0x0050}, {0x82dc, 0x00bd}, {0x82dd, 0x00ec}, +{0x82de, 0x008e}, {0x82df, 0x00bd}, {0x82e0, 0x00fa}, +{0x82e1, 0x00f7}, {0x82e2, 0x0086}, {0x82e3, 0x0011}, +{0x82e4, 0x00c6}, {0x82e5, 0x0049}, {0x82e6, 0x00bd}, +{0x82e7, 0x00e4}, {0x82e8, 0x0012}, {0x82e9, 0x00ce}, +{0x82ea, 0x0082}, {0x82eb, 0x00ef}, {0x82ec, 0x00ff}, +{0x82ed, 0x0001}, {0x82ee, 0x000f}, {0x82ef, 0x0096}, +{0x82f0, 0x0046}, {0x82f1, 0x0084}, {0x82f2, 0x000c}, +{0x82f3, 0x0081}, {0x82f4, 0x0000}, {0x82f5, 0x0027}, +{0x82f6, 0x0017}, {0x82f7, 0x00c6}, {0x82f8, 0x0049}, +{0x82f9, 0x00bd}, {0x82fa, 0x00e4}, {0x82fb, 0x0091}, +{0x82fc, 0x0024}, {0x82fd, 0x000d}, {0x82fe, 0x00b6}, +{0x82ff, 0x0012}, {0x8300, 0x0020}, {0x8301, 0x0085}, +{0x8302, 0x0020}, {0x8303, 0x0026}, {0x8304, 0x000c}, +{0x8305, 0x00ce}, {0x8306, 0x0082}, {0x8307, 0x00c1}, +{0x8308, 0x00ff}, {0x8309, 0x0001}, {0x830a, 0x000f}, +{0x830b, 0x007e}, {0x830c, 0x0084}, {0x830d, 0x0025}, +{0x830e, 0x007e}, {0x830f, 0x0084}, {0x8310, 0x0016}, +{0x8311, 0x00fe}, {0x8312, 0x008f}, {0x8313, 0x0052}, +{0x8314, 0x00bd}, {0x8315, 0x00ec}, {0x8316, 0x008e}, +{0x8317, 0x00bd}, {0x8318, 0x00fa}, {0x8319, 0x00f7}, +{0x831a, 0x0086}, {0x831b, 0x006a}, {0x831c, 0x00c6}, +{0x831d, 0x0049}, {0x831e, 0x00bd}, {0x831f, 0x00e4}, +{0x8320, 0x0012}, {0x8321, 0x00ce}, {0x8322, 0x0083}, +{0x8323, 0x0027}, {0x8324, 0x00ff}, {0x8325, 0x0001}, +{0x8326, 0x000f}, {0x8327, 0x0096}, {0x8328, 0x0046}, +{0x8329, 0x0084}, {0x832a, 0x000c}, {0x832b, 0x0081}, +{0x832c, 0x0000}, {0x832d, 0x0027}, {0x832e, 0x000a}, +{0x832f, 0x00c6}, {0x8330, 0x0049}, {0x8331, 0x00bd}, +{0x8332, 0x00e4}, {0x8333, 0x0091}, {0x8334, 0x0025}, +{0x8335, 0x0006}, {0x8336, 0x007e}, {0x8337, 0x0084}, +{0x8338, 0x0025}, {0x8339, 0x007e}, {0x833a, 0x0084}, +{0x833b, 0x0016}, {0x833c, 0x00b6}, {0x833d, 0x0018}, +{0x833e, 0x0070}, {0x833f, 0x00bb}, {0x8340, 0x0019}, +{0x8341, 0x0070}, {0x8342, 0x002a}, {0x8343, 0x0004}, +{0x8344, 0x0081}, {0x8345, 0x00af}, {0x8346, 0x002e}, +{0x8347, 0x0019}, {0x8348, 0x0096}, {0x8349, 0x007b}, +{0x834a, 0x00f6}, {0x834b, 0x0020}, {0x834c, 0x0007}, +{0x834d, 0x00fa}, {0x834e, 0x0020}, {0x834f, 0x0027}, +{0x8350, 0x00c4}, {0x8351, 0x0038}, {0x8352, 0x0081}, +{0x8353, 0x0038}, {0x8354, 0x0027}, {0x8355, 0x000b}, +{0x8356, 0x00f6}, {0x8357, 0x0020}, {0x8358, 0x0007}, +{0x8359, 0x00fa}, {0x835a, 0x0020}, {0x835b, 0x0027}, +{0x835c, 0x00cb}, {0x835d, 0x0008}, {0x835e, 0x007e}, +{0x835f, 0x0082}, {0x8360, 0x00d3}, {0x8361, 0x00bd}, +{0x8362, 0x00f7}, {0x8363, 0x0066}, {0x8364, 0x0086}, +{0x8365, 0x0074}, {0x8366, 0x00c6}, {0x8367, 0x0049}, +{0x8368, 0x00bd}, {0x8369, 0x00e4}, {0x836a, 0x0012}, +{0x836b, 0x00ce}, {0x836c, 0x0083}, {0x836d, 0x0071}, +{0x836e, 0x00ff}, {0x836f, 0x0001}, {0x8370, 0x000f}, +{0x8371, 0x0096}, {0x8372, 0x0046}, {0x8373, 0x0084}, +{0x8374, 0x000c}, {0x8375, 0x0081}, {0x8376, 0x0008}, +{0x8377, 0x0026}, {0x8378, 0x000a}, {0x8379, 0x00c6}, +{0x837a, 0x0049}, {0x837b, 0x00bd}, {0x837c, 0x00e4}, +{0x837d, 0x0091}, {0x837e, 0x0025}, {0x837f, 0x0006}, +{0x8380, 0x007e}, {0x8381, 0x0084}, {0x8382, 0x0025}, +{0x8383, 0x007e}, {0x8384, 0x0084}, {0x8385, 0x0016}, +{0x8386, 0x00bd}, {0x8387, 0x00f7}, {0x8388, 0x003e}, +{0x8389, 0x0026}, {0x838a, 0x000e}, {0x838b, 0x00bd}, +{0x838c, 0x00e5}, {0x838d, 0x0009}, {0x838e, 0x0026}, +{0x838f, 0x0006}, {0x8390, 0x00ce}, {0x8391, 0x0082}, +{0x8392, 0x00c1}, {0x8393, 0x00ff}, {0x8394, 0x0001}, +{0x8395, 0x000f}, {0x8396, 0x007e}, {0x8397, 0x0084}, +{0x8398, 0x0025}, {0x8399, 0x00fe}, {0x839a, 0x008f}, +{0x839b, 0x0054}, {0x839c, 0x00bd}, {0x839d, 0x00ec}, +{0x839e, 0x008e}, {0x839f, 0x00bd}, {0x83a0, 0x00fa}, +{0x83a1, 0x00f7}, {0x83a2, 0x00bd}, {0x83a3, 0x00f7}, +{0x83a4, 0x0033}, {0x83a5, 0x0086}, {0x83a6, 0x000f}, +{0x83a7, 0x00c6}, {0x83a8, 0x0051}, {0x83a9, 0x00bd}, +{0x83aa, 0x00e4}, {0x83ab, 0x0012}, {0x83ac, 0x00ce}, +{0x83ad, 0x0083}, {0x83ae, 0x00b2}, {0x83af, 0x00ff}, +{0x83b0, 0x0001}, {0x83b1, 0x000f}, {0x83b2, 0x0096}, +{0x83b3, 0x0046}, {0x83b4, 0x0084}, {0x83b5, 0x000c}, +{0x83b6, 0x0081}, {0x83b7, 0x0008}, {0x83b8, 0x0026}, +{0x83b9, 0x005c}, {0x83ba, 0x00b6}, {0x83bb, 0x0012}, +{0x83bc, 0x0020}, {0x83bd, 0x0084}, {0x83be, 0x003f}, +{0x83bf, 0x0081}, {0x83c0, 0x003a}, {0x83c1, 0x0027}, +{0x83c2, 0x001c}, {0x83c3, 0x0096}, {0x83c4, 0x0023}, +{0x83c5, 0x0085}, {0x83c6, 0x0040}, {0x83c7, 0x0027}, +{0x83c8, 0x0003}, {0x83c9, 0x007e}, {0x83ca, 0x0084}, +{0x83cb, 0x0025}, {0x83cc, 0x00c6}, {0x83cd, 0x0051}, +{0x83ce, 0x00bd}, {0x83cf, 0x00e4}, {0x83d0, 0x0091}, +{0x83d1, 0x0025}, {0x83d2, 0x0003}, {0x83d3, 0x007e}, +{0x83d4, 0x0084}, {0x83d5, 0x0025}, {0x83d6, 0x00ce}, +{0x83d7, 0x0082}, {0x83d8, 0x00c1}, {0x83d9, 0x00ff}, +{0x83da, 0x0001}, {0x83db, 0x000f}, {0x83dc, 0x007e}, +{0x83dd, 0x0084}, {0x83de, 0x0025}, {0x83df, 0x00bd}, +{0x83e0, 0x00f8}, {0x83e1, 0x0037}, {0x83e2, 0x007c}, +{0x83e3, 0x0000}, {0x83e4, 0x007a}, {0x83e5, 0x00ce}, +{0x83e6, 0x0083}, {0x83e7, 0x00ee}, {0x83e8, 0x00ff}, +{0x83e9, 0x0001}, {0x83ea, 0x000f}, {0x83eb, 0x007e}, +{0x83ec, 0x0084}, {0x83ed, 0x0025}, {0x83ee, 0x0096}, +{0x83ef, 0x0046}, {0x83f0, 0x0084}, {0x83f1, 0x000c}, +{0x83f2, 0x0081}, {0x83f3, 0x0008}, {0x83f4, 0x0026}, +{0x83f5, 0x0020}, {0x83f6, 0x0096}, {0x83f7, 0x0024}, +{0x83f8, 0x0084}, {0x83f9, 0x0008}, {0x83fa, 0x0026}, +{0x83fb, 0x0029}, {0x83fc, 0x00b6}, {0x83fd, 0x0018}, +{0x83fe, 0x0082}, {0x83ff, 0x00bb}, {0x8400, 0x0019}, +{0x8401, 0x0082}, {0x8402, 0x00b1}, {0x8403, 0x0001}, +{0x8404, 0x003b}, {0x8405, 0x0022}, {0x8406, 0x0009}, +{0x8407, 0x00b6}, {0x8408, 0x0012}, {0x8409, 0x0020}, +{0x840a, 0x0084}, {0x840b, 0x0037}, {0x840c, 0x0081}, +{0x840d, 0x0032}, {0x840e, 0x0027}, {0x840f, 0x0015}, +{0x8410, 0x00bd}, {0x8411, 0x00f8}, {0x8412, 0x0044}, +{0x8413, 0x007e}, {0x8414, 0x0082}, {0x8415, 0x00c1}, +{0x8416, 0x00bd}, {0x8417, 0x00f7}, {0x8418, 0x001f}, +{0x8419, 0x00bd}, {0x841a, 0x00f8}, {0x841b, 0x0044}, +{0x841c, 0x00bd}, {0x841d, 0x00fc}, {0x841e, 0x0029}, +{0x841f, 0x00ce}, {0x8420, 0x0082}, {0x8421, 0x0025}, +{0x8422, 0x00ff}, {0x8423, 0x0001}, {0x8424, 0x000f}, +{0x8425, 0x0039}, {0x8426, 0x0096}, {0x8427, 0x0047}, +{0x8428, 0x0084}, {0x8429, 0x00fc}, {0x842a, 0x008a}, +{0x842b, 0x0000}, {0x842c, 0x0097}, {0x842d, 0x0047}, +{0x842e, 0x00ce}, {0x842f, 0x0084}, {0x8430, 0x0034}, +{0x8431, 0x00ff}, {0x8432, 0x0001}, {0x8433, 0x0011}, +{0x8434, 0x0096}, {0x8435, 0x0046}, {0x8436, 0x0084}, +{0x8437, 0x0003}, {0x8438, 0x0081}, {0x8439, 0x0002}, +{0x843a, 0x0027}, {0x843b, 0x0003}, {0x843c, 0x007e}, +{0x843d, 0x0085}, {0x843e, 0x001e}, {0x843f, 0x0096}, +{0x8440, 0x0047}, {0x8441, 0x0084}, {0x8442, 0x00fc}, +{0x8443, 0x008a}, {0x8444, 0x0002}, {0x8445, 0x0097}, +{0x8446, 0x0047}, {0x8447, 0x00de}, {0x8448, 0x00e1}, +{0x8449, 0x00ad}, {0x844a, 0x0000}, {0x844b, 0x0086}, +{0x844c, 0x0001}, {0x844d, 0x00b7}, {0x844e, 0x0012}, +{0x844f, 0x0051}, {0x8450, 0x00bd}, {0x8451, 0x00f7}, +{0x8452, 0x0014}, {0x8453, 0x00b6}, {0x8454, 0x0010}, +{0x8455, 0x0031}, {0x8456, 0x0084}, {0x8457, 0x00fd}, +{0x8458, 0x00b7}, {0x8459, 0x0010}, {0x845a, 0x0031}, +{0x845b, 0x00bd}, {0x845c, 0x00f8}, {0x845d, 0x001e}, +{0x845e, 0x0096}, {0x845f, 0x0081}, {0x8460, 0x00d6}, +{0x8461, 0x0082}, {0x8462, 0x00fe}, {0x8463, 0x008f}, +{0x8464, 0x005a}, {0x8465, 0x00bd}, {0x8466, 0x00f7}, +{0x8467, 0x00b6}, {0x8468, 0x00fe}, {0x8469, 0x008f}, +{0x846a, 0x005c}, {0x846b, 0x00bd}, {0x846c, 0x00ec}, +{0x846d, 0x008e}, {0x846e, 0x00bd}, {0x846f, 0x00fa}, +{0x8470, 0x00f7}, {0x8471, 0x0086}, {0x8472, 0x0008}, +{0x8473, 0x00d6}, {0x8474, 0x0000}, {0x8475, 0x00c5}, +{0x8476, 0x0010}, {0x8477, 0x0026}, {0x8478, 0x0002}, +{0x8479, 0x008b}, {0x847a, 0x0020}, {0x847b, 0x00c6}, +{0x847c, 0x0051}, {0x847d, 0x00bd}, {0x847e, 0x00e4}, +{0x847f, 0x0012}, {0x8480, 0x00ce}, {0x8481, 0x0084}, +{0x8482, 0x0086}, {0x8483, 0x00ff}, {0x8484, 0x0001}, +{0x8485, 0x0011}, {0x8486, 0x0096}, {0x8487, 0x0046}, +{0x8488, 0x0084}, {0x8489, 0x0003}, {0x848a, 0x0081}, +{0x848b, 0x0002}, {0x848c, 0x0027}, {0x848d, 0x0003}, +{0x848e, 0x007e}, {0x848f, 0x0085}, {0x8490, 0x000f}, +{0x8491, 0x00c6}, {0x8492, 0x0051}, {0x8493, 0x00bd}, +{0x8494, 0x00e4}, {0x8495, 0x0091}, {0x8496, 0x0025}, +{0x8497, 0x0003}, {0x8498, 0x007e}, {0x8499, 0x0085}, +{0x849a, 0x001e}, {0x849b, 0x0096}, {0x849c, 0x0044}, +{0x849d, 0x0085}, {0x849e, 0x0010}, {0x849f, 0x0026}, +{0x84a0, 0x000a}, {0x84a1, 0x00b6}, {0x84a2, 0x0012}, +{0x84a3, 0x0050}, {0x84a4, 0x00ba}, {0x84a5, 0x0001}, +{0x84a6, 0x003c}, {0x84a7, 0x0085}, {0x84a8, 0x0010}, +{0x84a9, 0x0027}, {0x84aa, 0x00a8}, {0x84ab, 0x00bd}, +{0x84ac, 0x00f7}, {0x84ad, 0x0066}, {0x84ae, 0x00ce}, +{0x84af, 0x0084}, {0x84b0, 0x00b7}, {0x84b1, 0x00ff}, +{0x84b2, 0x0001}, {0x84b3, 0x0011}, {0x84b4, 0x007e}, +{0x84b5, 0x0085}, {0x84b6, 0x001e}, {0x84b7, 0x0096}, +{0x84b8, 0x0046}, {0x84b9, 0x0084}, {0x84ba, 0x0003}, +{0x84bb, 0x0081}, {0x84bc, 0x0002}, {0x84bd, 0x0026}, +{0x84be, 0x0050}, {0x84bf, 0x00b6}, {0x84c0, 0x0012}, +{0x84c1, 0x0030}, {0x84c2, 0x0084}, {0x84c3, 0x0003}, +{0x84c4, 0x0081}, {0x84c5, 0x0001}, {0x84c6, 0x0027}, +{0x84c7, 0x0003}, {0x84c8, 0x007e}, {0x84c9, 0x0085}, +{0x84ca, 0x001e}, {0x84cb, 0x0096}, {0x84cc, 0x0044}, +{0x84cd, 0x0085}, {0x84ce, 0x0010}, {0x84cf, 0x0026}, +{0x84d0, 0x0013}, {0x84d1, 0x00b6}, {0x84d2, 0x0012}, +{0x84d3, 0x0050}, {0x84d4, 0x00ba}, {0x84d5, 0x0001}, +{0x84d6, 0x003c}, {0x84d7, 0x0085}, {0x84d8, 0x0010}, +{0x84d9, 0x0026}, {0x84da, 0x0009}, {0x84db, 0x00ce}, +{0x84dc, 0x0084}, {0x84dd, 0x0053}, {0x84de, 0x00ff}, +{0x84df, 0x0001}, {0x84e0, 0x0011}, {0x84e1, 0x007e}, +{0x84e2, 0x0085}, {0x84e3, 0x001e}, {0x84e4, 0x00b6}, +{0x84e5, 0x0010}, {0x84e6, 0x0031}, {0x84e7, 0x008a}, +{0x84e8, 0x0002}, {0x84e9, 0x00b7}, {0x84ea, 0x0010}, +{0x84eb, 0x0031}, {0x84ec, 0x00bd}, {0x84ed, 0x0085}, +{0x84ee, 0x001f}, {0x84ef, 0x00bd}, {0x84f0, 0x00f8}, +{0x84f1, 0x0037}, {0x84f2, 0x007c}, {0x84f3, 0x0000}, +{0x84f4, 0x0080}, {0x84f5, 0x00ce}, {0x84f6, 0x0084}, +{0x84f7, 0x00fe}, {0x84f8, 0x00ff}, {0x84f9, 0x0001}, +{0x84fa, 0x0011}, {0x84fb, 0x007e}, {0x84fc, 0x0085}, +{0x84fd, 0x001e}, {0x84fe, 0x0096}, {0x84ff, 0x0046}, +{0x8500, 0x0084}, {0x8501, 0x0003}, {0x8502, 0x0081}, +{0x8503, 0x0002}, {0x8504, 0x0026}, {0x8505, 0x0009}, +{0x8506, 0x00b6}, {0x8507, 0x0012}, {0x8508, 0x0030}, +{0x8509, 0x0084}, {0x850a, 0x0003}, {0x850b, 0x0081}, +{0x850c, 0x0001}, {0x850d, 0x0027}, {0x850e, 0x000f}, +{0x850f, 0x00bd}, {0x8510, 0x00f8}, {0x8511, 0x0044}, +{0x8512, 0x00bd}, {0x8513, 0x00f7}, {0x8514, 0x000b}, +{0x8515, 0x00bd}, {0x8516, 0x00fc}, {0x8517, 0x0029}, +{0x8518, 0x00ce}, {0x8519, 0x0084}, {0x851a, 0x0026}, +{0x851b, 0x00ff}, {0x851c, 0x0001}, {0x851d, 0x0011}, +{0x851e, 0x0039}, {0x851f, 0x00d6}, {0x8520, 0x0022}, +{0x8521, 0x00c4}, {0x8522, 0x000f}, {0x8523, 0x00b6}, +{0x8524, 0x0012}, {0x8525, 0x0030}, {0x8526, 0x00ba}, +{0x8527, 0x0012}, {0x8528, 0x0032}, {0x8529, 0x0084}, +{0x852a, 0x0004}, {0x852b, 0x0027}, {0x852c, 0x000d}, +{0x852d, 0x0096}, {0x852e, 0x0022}, {0x852f, 0x0085}, +{0x8530, 0x0004}, {0x8531, 0x0027}, {0x8532, 0x0005}, +{0x8533, 0x00ca}, {0x8534, 0x0010}, {0x8535, 0x007e}, +{0x8536, 0x0085}, {0x8537, 0x003a}, {0x8538, 0x00ca}, +{0x8539, 0x0020}, {0x853a, 0x00d7}, {0x853b, 0x0022}, +{0x853c, 0x0039}, {0x853d, 0x0086}, {0x853e, 0x0000}, +{0x853f, 0x0097}, {0x8540, 0x0083}, {0x8541, 0x0018}, +{0x8542, 0x00ce}, {0x8543, 0x001c}, {0x8544, 0x0000}, +{0x8545, 0x00bd}, {0x8546, 0x00eb}, {0x8547, 0x0046}, +{0x8548, 0x0096}, {0x8549, 0x0057}, {0x854a, 0x0085}, +{0x854b, 0x0001}, {0x854c, 0x0027}, {0x854d, 0x0002}, +{0x854e, 0x004f}, {0x854f, 0x0039}, {0x8550, 0x0085}, +{0x8551, 0x0002}, {0x8552, 0x0027}, {0x8553, 0x0001}, +{0x8554, 0x0039}, {0x8555, 0x007f}, {0x8556, 0x008f}, +{0x8557, 0x007d}, {0x8558, 0x0086}, {0x8559, 0x0004}, +{0x855a, 0x00b7}, {0x855b, 0x0012}, {0x855c, 0x0004}, +{0x855d, 0x0086}, {0x855e, 0x0008}, {0x855f, 0x00b7}, +{0x8560, 0x0012}, {0x8561, 0x0007}, {0x8562, 0x0086}, +{0x8563, 0x0010}, {0x8564, 0x00b7}, {0x8565, 0x0012}, +{0x8566, 0x000c}, {0x8567, 0x0086}, {0x8568, 0x0007}, +{0x8569, 0x00b7}, {0x856a, 0x0012}, {0x856b, 0x0006}, +{0x856c, 0x00b6}, {0x856d, 0x008f}, {0x856e, 0x007d}, +{0x856f, 0x00b7}, {0x8570, 0x0012}, {0x8571, 0x0070}, +{0x8572, 0x0086}, {0x8573, 0x0001}, {0x8574, 0x00ba}, +{0x8575, 0x0012}, {0x8576, 0x0004}, {0x8577, 0x00b7}, +{0x8578, 0x0012}, {0x8579, 0x0004}, {0x857a, 0x0001}, +{0x857b, 0x0001}, {0x857c, 0x0001}, {0x857d, 0x0001}, +{0x857e, 0x0001}, {0x857f, 0x0001}, {0x8580, 0x00b6}, +{0x8581, 0x0012}, {0x8582, 0x0004}, {0x8583, 0x0084}, +{0x8584, 0x00fe}, {0x8585, 0x008a}, {0x8586, 0x0002}, +{0x8587, 0x00b7}, {0x8588, 0x0012}, {0x8589, 0x0004}, +{0x858a, 0x0001}, {0x858b, 0x0001}, {0x858c, 0x0001}, +{0x858d, 0x0001}, {0x858e, 0x0001}, {0x858f, 0x0001}, +{0x8590, 0x0086}, {0x8591, 0x00fd}, {0x8592, 0x00b4}, +{0x8593, 0x0012}, {0x8594, 0x0004}, {0x8595, 0x00b7}, +{0x8596, 0x0012}, {0x8597, 0x0004}, {0x8598, 0x00b6}, +{0x8599, 0x0012}, {0x859a, 0x0000}, {0x859b, 0x0084}, +{0x859c, 0x0008}, {0x859d, 0x0081}, {0x859e, 0x0008}, +{0x859f, 0x0027}, {0x85a0, 0x0016}, {0x85a1, 0x00b6}, +{0x85a2, 0x008f}, {0x85a3, 0x007d}, {0x85a4, 0x0081}, +{0x85a5, 0x000c}, {0x85a6, 0x0027}, {0x85a7, 0x0008}, +{0x85a8, 0x008b}, {0x85a9, 0x0004}, {0x85aa, 0x00b7}, +{0x85ab, 0x008f}, {0x85ac, 0x007d}, {0x85ad, 0x007e}, +{0x85ae, 0x0085}, {0x85af, 0x006c}, {0x85b0, 0x0086}, +{0x85b1, 0x0003}, {0x85b2, 0x0097}, {0x85b3, 0x0040}, +{0x85b4, 0x007e}, {0x85b5, 0x0089}, {0x85b6, 0x006e}, +{0x85b7, 0x0086}, {0x85b8, 0x0007}, {0x85b9, 0x00b7}, +{0x85ba, 0x0012}, {0x85bb, 0x0006}, {0x85bc, 0x005f}, +{0x85bd, 0x00f7}, {0x85be, 0x008f}, {0x85bf, 0x0082}, +{0x85c0, 0x005f}, {0x85c1, 0x00f7}, {0x85c2, 0x008f}, +{0x85c3, 0x007f}, {0x85c4, 0x00f7}, {0x85c5, 0x008f}, +{0x85c6, 0x0070}, {0x85c7, 0x00f7}, {0x85c8, 0x008f}, +{0x85c9, 0x0071}, {0x85ca, 0x00f7}, {0x85cb, 0x008f}, +{0x85cc, 0x0072}, {0x85cd, 0x00f7}, {0x85ce, 0x008f}, +{0x85cf, 0x0073}, {0x85d0, 0x00f7}, {0x85d1, 0x008f}, +{0x85d2, 0x0074}, {0x85d3, 0x00f7}, {0x85d4, 0x008f}, +{0x85d5, 0x0075}, {0x85d6, 0x00f7}, {0x85d7, 0x008f}, +{0x85d8, 0x0076}, {0x85d9, 0x00f7}, {0x85da, 0x008f}, +{0x85db, 0x0077}, {0x85dc, 0x00f7}, {0x85dd, 0x008f}, +{0x85de, 0x0078}, {0x85df, 0x00f7}, {0x85e0, 0x008f}, +{0x85e1, 0x0079}, {0x85e2, 0x00f7}, {0x85e3, 0x008f}, +{0x85e4, 0x007a}, {0x85e5, 0x00f7}, {0x85e6, 0x008f}, +{0x85e7, 0x007b}, {0x85e8, 0x00b6}, {0x85e9, 0x0012}, +{0x85ea, 0x0004}, {0x85eb, 0x008a}, {0x85ec, 0x0010}, +{0x85ed, 0x00b7}, {0x85ee, 0x0012}, {0x85ef, 0x0004}, +{0x85f0, 0x0086}, {0x85f1, 0x00e4}, {0x85f2, 0x00b7}, +{0x85f3, 0x0012}, {0x85f4, 0x0070}, {0x85f5, 0x00b7}, +{0x85f6, 0x0012}, {0x85f7, 0x0007}, {0x85f8, 0x00f7}, +{0x85f9, 0x0012}, {0x85fa, 0x0005}, {0x85fb, 0x00f7}, +{0x85fc, 0x0012}, {0x85fd, 0x0009}, {0x85fe, 0x0086}, +{0x85ff, 0x0008}, {0x8600, 0x00ba}, {0x8601, 0x0012}, +{0x8602, 0x0004}, {0x8603, 0x00b7}, {0x8604, 0x0012}, +{0x8605, 0x0004}, {0x8606, 0x0086}, {0x8607, 0x00f7}, +{0x8608, 0x00b4}, {0x8609, 0x0012}, {0x860a, 0x0004}, +{0x860b, 0x00b7}, {0x860c, 0x0012}, {0x860d, 0x0004}, +{0x860e, 0x0001}, {0x860f, 0x0001}, {0x8610, 0x0001}, +{0x8611, 0x0001}, {0x8612, 0x0001}, {0x8613, 0x0001}, +{0x8614, 0x00b6}, {0x8615, 0x0012}, {0x8616, 0x0008}, +{0x8617, 0x0027}, {0x8618, 0x007f}, {0x8619, 0x0081}, +{0x861a, 0x0080}, {0x861b, 0x0026}, {0x861c, 0x000b}, +{0x861d, 0x0086}, {0x861e, 0x0008}, {0x861f, 0x00ce}, +{0x8620, 0x008f}, {0x8621, 0x0079}, {0x8622, 0x00bd}, +{0x8623, 0x0089}, {0x8624, 0x007b}, {0x8625, 0x007e}, +{0x8626, 0x0086}, {0x8627, 0x008e}, {0x8628, 0x0081}, +{0x8629, 0x0040}, {0x862a, 0x0026}, {0x862b, 0x000b}, +{0x862c, 0x0086}, {0x862d, 0x0004}, {0x862e, 0x00ce}, +{0x862f, 0x008f}, {0x8630, 0x0076}, {0x8631, 0x00bd}, +{0x8632, 0x0089}, {0x8633, 0x007b}, {0x8634, 0x007e}, +{0x8635, 0x0086}, {0x8636, 0x008e}, {0x8637, 0x0081}, +{0x8638, 0x0020}, {0x8639, 0x0026}, {0x863a, 0x000b}, +{0x863b, 0x0086}, {0x863c, 0x0002}, {0x863d, 0x00ce}, +{0x863e, 0x008f}, {0x863f, 0x0073}, {0x8640, 0x00bd}, +{0x8641, 0x0089}, {0x8642, 0x007b}, {0x8643, 0x007e}, +{0x8644, 0x0086}, {0x8645, 0x008e}, {0x8646, 0x0081}, +{0x8647, 0x0010}, {0x8648, 0x0026}, {0x8649, 0x000b}, +{0x864a, 0x0086}, {0x864b, 0x0001}, {0x864c, 0x00ce}, +{0x864d, 0x008f}, {0x864e, 0x0070}, {0x864f, 0x00bd}, +{0x8650, 0x0089}, {0x8651, 0x007b}, {0x8652, 0x007e}, +{0x8653, 0x0086}, {0x8654, 0x008e}, {0x8655, 0x0081}, +{0x8656, 0x0008}, {0x8657, 0x0026}, {0x8658, 0x000b}, +{0x8659, 0x0086}, {0x865a, 0x0008}, {0x865b, 0x00ce}, +{0x865c, 0x008f}, {0x865d, 0x0079}, {0x865e, 0x00bd}, +{0x865f, 0x0089}, {0x8660, 0x007f}, {0x8661, 0x007e}, +{0x8662, 0x0086}, {0x8663, 0x008e}, {0x8664, 0x0081}, +{0x8665, 0x0004}, {0x8666, 0x0026}, {0x8667, 0x000b}, +{0x8668, 0x0086}, {0x8669, 0x0004}, {0x866a, 0x00ce}, +{0x866b, 0x008f}, {0x866c, 0x0076}, {0x866d, 0x00bd}, +{0x866e, 0x0089}, {0x866f, 0x007f}, {0x8670, 0x007e}, +{0x8671, 0x0086}, {0x8672, 0x008e}, {0x8673, 0x0081}, +{0x8674, 0x0002}, {0x8675, 0x0026}, {0x8676, 0x000b}, +{0x8677, 0x008a}, {0x8678, 0x0002}, {0x8679, 0x00ce}, +{0x867a, 0x008f}, {0x867b, 0x0073}, {0x867c, 0x00bd}, +{0x867d, 0x0089}, {0x867e, 0x007f}, {0x867f, 0x007e}, +{0x8680, 0x0086}, {0x8681, 0x008e}, {0x8682, 0x0081}, +{0x8683, 0x0001}, {0x8684, 0x0026}, {0x8685, 0x0008}, +{0x8686, 0x0086}, {0x8687, 0x0001}, {0x8688, 0x00ce}, +{0x8689, 0x008f}, {0x868a, 0x0070}, {0x868b, 0x00bd}, +{0x868c, 0x0089}, {0x868d, 0x007f}, {0x868e, 0x00b6}, +{0x868f, 0x008f}, {0x8690, 0x007f}, {0x8691, 0x0081}, +{0x8692, 0x000f}, {0x8693, 0x0026}, {0x8694, 0x0003}, +{0x8695, 0x007e}, {0x8696, 0x0087}, {0x8697, 0x0047}, +{0x8698, 0x00b6}, {0x8699, 0x0012}, {0x869a, 0x0009}, +{0x869b, 0x0084}, {0x869c, 0x0003}, {0x869d, 0x0081}, +{0x869e, 0x0003}, {0x869f, 0x0027}, {0x86a0, 0x0006}, +{0x86a1, 0x007c}, {0x86a2, 0x0012}, {0x86a3, 0x0009}, +{0x86a4, 0x007e}, {0x86a5, 0x0085}, {0x86a6, 0x00fe}, +{0x86a7, 0x00b6}, {0x86a8, 0x0012}, {0x86a9, 0x0006}, +{0x86aa, 0x0084}, {0x86ab, 0x0007}, {0x86ac, 0x0081}, +{0x86ad, 0x0007}, {0x86ae, 0x0027}, {0x86af, 0x0008}, +{0x86b0, 0x008b}, {0x86b1, 0x0001}, {0x86b2, 0x00b7}, +{0x86b3, 0x0012}, {0x86b4, 0x0006}, {0x86b5, 0x007e}, +{0x86b6, 0x0086}, {0x86b7, 0x00d5}, {0x86b8, 0x00b6}, +{0x86b9, 0x008f}, {0x86ba, 0x0082}, {0x86bb, 0x0026}, +{0x86bc, 0x000a}, {0x86bd, 0x007c}, {0x86be, 0x008f}, +{0x86bf, 0x0082}, {0x86c0, 0x004f}, {0x86c1, 0x00b7}, +{0x86c2, 0x0012}, {0x86c3, 0x0006}, {0x86c4, 0x007e}, +{0x86c5, 0x0085}, {0x86c6, 0x00c0}, {0x86c7, 0x00b6}, +{0x86c8, 0x0012}, {0x86c9, 0x0006}, {0x86ca, 0x0084}, +{0x86cb, 0x003f}, {0x86cc, 0x0081}, {0x86cd, 0x003f}, +{0x86ce, 0x0027}, {0x86cf, 0x0010}, {0x86d0, 0x008b}, +{0x86d1, 0x0008}, {0x86d2, 0x00b7}, {0x86d3, 0x0012}, +{0x86d4, 0x0006}, {0x86d5, 0x00b6}, {0x86d6, 0x0012}, +{0x86d7, 0x0009}, {0x86d8, 0x0084}, {0x86d9, 0x00fc}, +{0x86da, 0x00b7}, {0x86db, 0x0012}, {0x86dc, 0x0009}, +{0x86dd, 0x007e}, {0x86de, 0x0085}, {0x86df, 0x00fe}, +{0x86e0, 0x00ce}, {0x86e1, 0x008f}, {0x86e2, 0x0070}, +{0x86e3, 0x0018}, {0x86e4, 0x00ce}, {0x86e5, 0x008f}, +{0x86e6, 0x0084}, {0x86e7, 0x00c6}, {0x86e8, 0x000c}, +{0x86e9, 0x00bd}, {0x86ea, 0x0089}, {0x86eb, 0x006f}, +{0x86ec, 0x00ce}, {0x86ed, 0x008f}, {0x86ee, 0x0084}, +{0x86ef, 0x0018}, {0x86f0, 0x00ce}, {0x86f1, 0x008f}, +{0x86f2, 0x0070}, {0x86f3, 0x00c6}, {0x86f4, 0x000c}, +{0x86f5, 0x00bd}, {0x86f6, 0x0089}, {0x86f7, 0x006f}, +{0x86f8, 0x00d6}, {0x86f9, 0x0083}, {0x86fa, 0x00c1}, +{0x86fb, 0x004f}, {0x86fc, 0x002d}, {0x86fd, 0x0003}, +{0x86fe, 0x007e}, {0x86ff, 0x0087}, {0x8700, 0x0040}, +{0x8701, 0x00b6}, {0x8702, 0x008f}, {0x8703, 0x007f}, +{0x8704, 0x0081}, {0x8705, 0x0007}, {0x8706, 0x0027}, +{0x8707, 0x000f}, {0x8708, 0x0081}, {0x8709, 0x000b}, +{0x870a, 0x0027}, {0x870b, 0x0015}, {0x870c, 0x0081}, +{0x870d, 0x000d}, {0x870e, 0x0027}, {0x870f, 0x001b}, +{0x8710, 0x0081}, {0x8711, 0x000e}, {0x8712, 0x0027}, +{0x8713, 0x0021}, {0x8714, 0x007e}, {0x8715, 0x0087}, +{0x8716, 0x0040}, {0x8717, 0x00f7}, {0x8718, 0x008f}, +{0x8719, 0x007b}, {0x871a, 0x0086}, {0x871b, 0x0002}, +{0x871c, 0x00b7}, {0x871d, 0x008f}, {0x871e, 0x007a}, +{0x871f, 0x0020}, {0x8720, 0x001c}, {0x8721, 0x00f7}, +{0x8722, 0x008f}, {0x8723, 0x0078}, {0x8724, 0x0086}, +{0x8725, 0x0002}, {0x8726, 0x00b7}, {0x8727, 0x008f}, +{0x8728, 0x0077}, {0x8729, 0x0020}, {0x872a, 0x0012}, +{0x872b, 0x00f7}, {0x872c, 0x008f}, {0x872d, 0x0075}, +{0x872e, 0x0086}, {0x872f, 0x0002}, {0x8730, 0x00b7}, +{0x8731, 0x008f}, {0x8732, 0x0074}, {0x8733, 0x0020}, +{0x8734, 0x0008}, {0x8735, 0x00f7}, {0x8736, 0x008f}, +{0x8737, 0x0072}, {0x8738, 0x0086}, {0x8739, 0x0002}, +{0x873a, 0x00b7}, {0x873b, 0x008f}, {0x873c, 0x0071}, +{0x873d, 0x007e}, {0x873e, 0x0087}, {0x873f, 0x0047}, +{0x8740, 0x0086}, {0x8741, 0x0004}, {0x8742, 0x0097}, +{0x8743, 0x0040}, {0x8744, 0x007e}, {0x8745, 0x0089}, +{0x8746, 0x006e}, {0x8747, 0x00ce}, {0x8748, 0x008f}, +{0x8749, 0x0072}, {0x874a, 0x00bd}, {0x874b, 0x0089}, +{0x874c, 0x00f7}, {0x874d, 0x00ce}, {0x874e, 0x008f}, +{0x874f, 0x0075}, {0x8750, 0x00bd}, {0x8751, 0x0089}, +{0x8752, 0x00f7}, {0x8753, 0x00ce}, {0x8754, 0x008f}, +{0x8755, 0x0078}, {0x8756, 0x00bd}, {0x8757, 0x0089}, +{0x8758, 0x00f7}, {0x8759, 0x00ce}, {0x875a, 0x008f}, +{0x875b, 0x007b}, {0x875c, 0x00bd}, {0x875d, 0x0089}, +{0x875e, 0x00f7}, {0x875f, 0x004f}, {0x8760, 0x00b7}, +{0x8761, 0x008f}, {0x8762, 0x007d}, {0x8763, 0x00b7}, +{0x8764, 0x008f}, {0x8765, 0x0081}, {0x8766, 0x00b6}, +{0x8767, 0x008f}, {0x8768, 0x0072}, {0x8769, 0x0027}, +{0x876a, 0x0047}, {0x876b, 0x007c}, {0x876c, 0x008f}, +{0x876d, 0x007d}, {0x876e, 0x00b6}, {0x876f, 0x008f}, +{0x8770, 0x0075}, {0x8771, 0x0027}, {0x8772, 0x003f}, +{0x8773, 0x007c}, {0x8774, 0x008f}, {0x8775, 0x007d}, +{0x8776, 0x00b6}, {0x8777, 0x008f}, {0x8778, 0x0078}, +{0x8779, 0x0027}, {0x877a, 0x0037}, {0x877b, 0x007c}, +{0x877c, 0x008f}, {0x877d, 0x007d}, {0x877e, 0x00b6}, +{0x877f, 0x008f}, {0x8780, 0x007b}, {0x8781, 0x0027}, +{0x8782, 0x002f}, {0x8783, 0x007f}, {0x8784, 0x008f}, +{0x8785, 0x007d}, {0x8786, 0x007c}, {0x8787, 0x008f}, +{0x8788, 0x0081}, {0x8789, 0x007a}, {0x878a, 0x008f}, +{0x878b, 0x0072}, {0x878c, 0x0027}, {0x878d, 0x001b}, +{0x878e, 0x007c}, {0x878f, 0x008f}, {0x8790, 0x007d}, +{0x8791, 0x007a}, {0x8792, 0x008f}, {0x8793, 0x0075}, +{0x8794, 0x0027}, {0x8795, 0x0016}, {0x8796, 0x007c}, +{0x8797, 0x008f}, {0x8798, 0x007d}, {0x8799, 0x007a}, +{0x879a, 0x008f}, {0x879b, 0x0078}, {0x879c, 0x0027}, +{0x879d, 0x0011}, {0x879e, 0x007c}, {0x879f, 0x008f}, +{0x87a0, 0x007d}, {0x87a1, 0x007a}, {0x87a2, 0x008f}, +{0x87a3, 0x007b}, {0x87a4, 0x0027}, {0x87a5, 0x000c}, +{0x87a6, 0x007e}, {0x87a7, 0x0087}, {0x87a8, 0x0083}, +{0x87a9, 0x007a}, {0x87aa, 0x008f}, {0x87ab, 0x0075}, +{0x87ac, 0x007a}, {0x87ad, 0x008f}, {0x87ae, 0x0078}, +{0x87af, 0x007a}, {0x87b0, 0x008f}, {0x87b1, 0x007b}, +{0x87b2, 0x00ce}, {0x87b3, 0x00c1}, {0x87b4, 0x00fc}, +{0x87b5, 0x00f6}, {0x87b6, 0x008f}, {0x87b7, 0x007d}, +{0x87b8, 0x003a}, {0x87b9, 0x00a6}, {0x87ba, 0x0000}, +{0x87bb, 0x00b7}, {0x87bc, 0x0012}, {0x87bd, 0x0070}, +{0x87be, 0x00b6}, {0x87bf, 0x008f}, {0x87c0, 0x0072}, +{0x87c1, 0x0026}, {0x87c2, 0x0003}, {0x87c3, 0x007e}, +{0x87c4, 0x0087}, {0x87c5, 0x00fa}, {0x87c6, 0x00b6}, +{0x87c7, 0x008f}, {0x87c8, 0x0075}, {0x87c9, 0x0026}, +{0x87ca, 0x000a}, {0x87cb, 0x0018}, {0x87cc, 0x00ce}, +{0x87cd, 0x008f}, {0x87ce, 0x0073}, {0x87cf, 0x00bd}, +{0x87d0, 0x0089}, {0x87d1, 0x00d5}, {0x87d2, 0x007e}, +{0x87d3, 0x0087}, {0x87d4, 0x00fa}, {0x87d5, 0x00b6}, +{0x87d6, 0x008f}, {0x87d7, 0x0078}, {0x87d8, 0x0026}, +{0x87d9, 0x000a}, {0x87da, 0x0018}, {0x87db, 0x00ce}, +{0x87dc, 0x008f}, {0x87dd, 0x0076}, {0x87de, 0x00bd}, +{0x87df, 0x0089}, {0x87e0, 0x00d5}, {0x87e1, 0x007e}, +{0x87e2, 0x0087}, {0x87e3, 0x00fa}, {0x87e4, 0x00b6}, +{0x87e5, 0x008f}, {0x87e6, 0x007b}, {0x87e7, 0x0026}, +{0x87e8, 0x000a}, {0x87e9, 0x0018}, {0x87ea, 0x00ce}, +{0x87eb, 0x008f}, {0x87ec, 0x0079}, {0x87ed, 0x00bd}, +{0x87ee, 0x0089}, {0x87ef, 0x00d5}, {0x87f0, 0x007e}, +{0x87f1, 0x0087}, {0x87f2, 0x00fa}, {0x87f3, 0x0086}, +{0x87f4, 0x0005}, {0x87f5, 0x0097}, {0x87f6, 0x0040}, +{0x87f7, 0x007e}, {0x87f8, 0x0089}, {0x87f9, 0x006e}, +{0x87fa, 0x00b6}, {0x87fb, 0x008f}, {0x87fc, 0x0075}, +{0x87fd, 0x0081}, {0x87fe, 0x0007}, {0x87ff, 0x002e}, +{0x8800, 0x00f2}, {0x8801, 0x00f6}, {0x8802, 0x0012}, +{0x8803, 0x0006}, {0x8804, 0x00c4}, {0x8805, 0x00f8}, +{0x8806, 0x001b}, {0x8807, 0x00b7}, {0x8808, 0x0012}, +{0x8809, 0x0006}, {0x880a, 0x00b6}, {0x880b, 0x008f}, +{0x880c, 0x0078}, {0x880d, 0x0081}, {0x880e, 0x0007}, +{0x880f, 0x002e}, {0x8810, 0x00e2}, {0x8811, 0x0048}, +{0x8812, 0x0048}, {0x8813, 0x0048}, {0x8814, 0x00f6}, +{0x8815, 0x0012}, {0x8816, 0x0006}, {0x8817, 0x00c4}, +{0x8818, 0x00c7}, {0x8819, 0x001b}, {0x881a, 0x00b7}, +{0x881b, 0x0012}, {0x881c, 0x0006}, {0x881d, 0x00b6}, +{0x881e, 0x008f}, {0x881f, 0x007b}, {0x8820, 0x0081}, +{0x8821, 0x0007}, {0x8822, 0x002e}, {0x8823, 0x00cf}, +{0x8824, 0x00f6}, {0x8825, 0x0012}, {0x8826, 0x0005}, +{0x8827, 0x00c4}, {0x8828, 0x00f8}, {0x8829, 0x001b}, +{0x882a, 0x00b7}, {0x882b, 0x0012}, {0x882c, 0x0005}, +{0x882d, 0x0086}, {0x882e, 0x0000}, {0x882f, 0x00f6}, +{0x8830, 0x008f}, {0x8831, 0x0071}, {0x8832, 0x00bd}, +{0x8833, 0x0089}, {0x8834, 0x0094}, {0x8835, 0x0086}, +{0x8836, 0x0001}, {0x8837, 0x00f6}, {0x8838, 0x008f}, +{0x8839, 0x0074}, {0x883a, 0x00bd}, {0x883b, 0x0089}, +{0x883c, 0x0094}, {0x883d, 0x0086}, {0x883e, 0x0002}, +{0x883f, 0x00f6}, {0x8840, 0x008f}, {0x8841, 0x0077}, +{0x8842, 0x00bd}, {0x8843, 0x0089}, {0x8844, 0x0094}, +{0x8845, 0x0086}, {0x8846, 0x0003}, {0x8847, 0x00f6}, +{0x8848, 0x008f}, {0x8849, 0x007a}, {0x884a, 0x00bd}, +{0x884b, 0x0089}, {0x884c, 0x0094}, {0x884d, 0x00ce}, +{0x884e, 0x008f}, {0x884f, 0x0070}, {0x8850, 0x00a6}, +{0x8851, 0x0001}, {0x8852, 0x0081}, {0x8853, 0x0001}, +{0x8854, 0x0027}, {0x8855, 0x0007}, {0x8856, 0x0081}, +{0x8857, 0x0003}, {0x8858, 0x0027}, {0x8859, 0x0003}, +{0x885a, 0x007e}, {0x885b, 0x0088}, {0x885c, 0x0066}, +{0x885d, 0x00a6}, {0x885e, 0x0000}, {0x885f, 0x00b8}, +{0x8860, 0x008f}, {0x8861, 0x0081}, {0x8862, 0x0084}, +{0x8863, 0x0001}, {0x8864, 0x0026}, {0x8865, 0x000b}, +{0x8866, 0x008c}, {0x8867, 0x008f}, {0x8868, 0x0079}, +{0x8869, 0x002c}, {0x886a, 0x000e}, {0x886b, 0x0008}, +{0x886c, 0x0008}, {0x886d, 0x0008}, {0x886e, 0x007e}, +{0x886f, 0x0088}, {0x8870, 0x0050}, {0x8871, 0x00b6}, +{0x8872, 0x0012}, {0x8873, 0x0004}, {0x8874, 0x008a}, +{0x8875, 0x0040}, {0x8876, 0x00b7}, {0x8877, 0x0012}, +{0x8878, 0x0004}, {0x8879, 0x00b6}, {0x887a, 0x0012}, +{0x887b, 0x0004}, {0x887c, 0x0084}, {0x887d, 0x00fb}, +{0x887e, 0x0084}, {0x887f, 0x00ef}, {0x8880, 0x00b7}, +{0x8881, 0x0012}, {0x8882, 0x0004}, {0x8883, 0x00b6}, +{0x8884, 0x0012}, {0x8885, 0x0007}, {0x8886, 0x0036}, +{0x8887, 0x00b6}, {0x8888, 0x008f}, {0x8889, 0x007c}, +{0x888a, 0x0048}, {0x888b, 0x0048}, {0x888c, 0x00b7}, +{0x888d, 0x0012}, {0x888e, 0x0007}, {0x888f, 0x0086}, +{0x8890, 0x0001}, {0x8891, 0x00ba}, {0x8892, 0x0012}, +{0x8893, 0x0004}, {0x8894, 0x00b7}, {0x8895, 0x0012}, +{0x8896, 0x0004}, {0x8897, 0x0001}, {0x8898, 0x0001}, +{0x8899, 0x0001}, {0x889a, 0x0001}, {0x889b, 0x0001}, +{0x889c, 0x0001}, {0x889d, 0x0086}, {0x889e, 0x00fe}, +{0x889f, 0x00b4}, {0x88a0, 0x0012}, {0x88a1, 0x0004}, +{0x88a2, 0x00b7}, {0x88a3, 0x0012}, {0x88a4, 0x0004}, +{0x88a5, 0x0086}, {0x88a6, 0x0002}, {0x88a7, 0x00ba}, +{0x88a8, 0x0012}, {0x88a9, 0x0004}, {0x88aa, 0x00b7}, +{0x88ab, 0x0012}, {0x88ac, 0x0004}, {0x88ad, 0x0086}, +{0x88ae, 0x00fd}, {0x88af, 0x00b4}, {0x88b0, 0x0012}, +{0x88b1, 0x0004}, {0x88b2, 0x00b7}, {0x88b3, 0x0012}, +{0x88b4, 0x0004}, {0x88b5, 0x0032}, {0x88b6, 0x00b7}, +{0x88b7, 0x0012}, {0x88b8, 0x0007}, {0x88b9, 0x00b6}, +{0x88ba, 0x0012}, {0x88bb, 0x0000}, {0x88bc, 0x0084}, +{0x88bd, 0x0008}, {0x88be, 0x0081}, {0x88bf, 0x0008}, +{0x88c0, 0x0027}, {0x88c1, 0x000f}, {0x88c2, 0x007c}, +{0x88c3, 0x0082}, {0x88c4, 0x0008}, {0x88c5, 0x0026}, +{0x88c6, 0x0007}, {0x88c7, 0x0086}, {0x88c8, 0x0076}, +{0x88c9, 0x0097}, {0x88ca, 0x0040}, {0x88cb, 0x007e}, +{0x88cc, 0x0089}, {0x88cd, 0x006e}, {0x88ce, 0x007e}, +{0x88cf, 0x0086}, {0x88d0, 0x00ec}, {0x88d1, 0x00b6}, +{0x88d2, 0x008f}, {0x88d3, 0x007f}, {0x88d4, 0x0081}, +{0x88d5, 0x000f}, {0x88d6, 0x0027}, {0x88d7, 0x003c}, +{0x88d8, 0x00bd}, {0x88d9, 0x00e6}, {0x88da, 0x00c7}, +{0x88db, 0x00b7}, {0x88dc, 0x0012}, {0x88dd, 0x000d}, +{0x88de, 0x00bd}, {0x88df, 0x00e6}, {0x88e0, 0x00cb}, +{0x88e1, 0x00b6}, {0x88e2, 0x0012}, {0x88e3, 0x0004}, +{0x88e4, 0x008a}, {0x88e5, 0x0020}, {0x88e6, 0x00b7}, +{0x88e7, 0x0012}, {0x88e8, 0x0004}, {0x88e9, 0x00ce}, +{0x88ea, 0x00ff}, {0x88eb, 0x00ff}, {0x88ec, 0x00b6}, +{0x88ed, 0x0012}, {0x88ee, 0x0000}, {0x88ef, 0x0081}, +{0x88f0, 0x000c}, {0x88f1, 0x0026}, {0x88f2, 0x0005}, +{0x88f3, 0x0009}, {0x88f4, 0x0026}, {0x88f5, 0x00f6}, +{0x88f6, 0x0027}, {0x88f7, 0x001c}, {0x88f8, 0x00b6}, +{0x88f9, 0x0012}, {0x88fa, 0x0004}, {0x88fb, 0x0084}, +{0x88fc, 0x00df}, {0x88fd, 0x00b7}, {0x88fe, 0x0012}, +{0x88ff, 0x0004}, {0x8900, 0x0096}, {0x8901, 0x0083}, +{0x8902, 0x0081}, {0x8903, 0x0007}, {0x8904, 0x002c}, +{0x8905, 0x0005}, {0x8906, 0x007c}, {0x8907, 0x0000}, +{0x8908, 0x0083}, {0x8909, 0x0020}, {0x890a, 0x0006}, +{0x890b, 0x0096}, {0x890c, 0x0083}, {0x890d, 0x008b}, +{0x890e, 0x0008}, {0x890f, 0x0097}, {0x8910, 0x0083}, +{0x8911, 0x007e}, {0x8912, 0x0085}, {0x8913, 0x0041}, +{0x8914, 0x007f}, {0x8915, 0x008f}, {0x8916, 0x007e}, +{0x8917, 0x0086}, {0x8918, 0x0080}, {0x8919, 0x00b7}, +{0x891a, 0x0012}, {0x891b, 0x000c}, {0x891c, 0x0086}, +{0x891d, 0x0001}, {0x891e, 0x00b7}, {0x891f, 0x008f}, +{0x8920, 0x007d}, {0x8921, 0x00b6}, {0x8922, 0x0012}, +{0x8923, 0x000c}, {0x8924, 0x0084}, {0x8925, 0x007f}, +{0x8926, 0x00b7}, {0x8927, 0x0012}, {0x8928, 0x000c}, +{0x8929, 0x008a}, {0x892a, 0x0080}, {0x892b, 0x00b7}, +{0x892c, 0x0012}, {0x892d, 0x000c}, {0x892e, 0x0086}, +{0x892f, 0x000a}, {0x8930, 0x00bd}, {0x8931, 0x008a}, +{0x8932, 0x0006}, {0x8933, 0x00b6}, {0x8934, 0x0012}, +{0x8935, 0x000a}, {0x8936, 0x002a}, {0x8937, 0x0009}, +{0x8938, 0x00b6}, {0x8939, 0x0012}, {0x893a, 0x000c}, +{0x893b, 0x00ba}, {0x893c, 0x008f}, {0x893d, 0x007d}, +{0x893e, 0x00b7}, {0x893f, 0x0012}, {0x8940, 0x000c}, +{0x8941, 0x00b6}, {0x8942, 0x008f}, {0x8943, 0x007e}, +{0x8944, 0x0081}, {0x8945, 0x0060}, {0x8946, 0x0027}, +{0x8947, 0x001a}, {0x8948, 0x008b}, {0x8949, 0x0020}, +{0x894a, 0x00b7}, {0x894b, 0x008f}, {0x894c, 0x007e}, +{0x894d, 0x00b6}, {0x894e, 0x0012}, {0x894f, 0x000c}, +{0x8950, 0x0084}, {0x8951, 0x009f}, {0x8952, 0x00ba}, +{0x8953, 0x008f}, {0x8954, 0x007e}, {0x8955, 0x00b7}, +{0x8956, 0x0012}, {0x8957, 0x000c}, {0x8958, 0x00b6}, +{0x8959, 0x008f}, {0x895a, 0x007d}, {0x895b, 0x0048}, +{0x895c, 0x00b7}, {0x895d, 0x008f}, {0x895e, 0x007d}, +{0x895f, 0x007e}, {0x8960, 0x0089}, {0x8961, 0x0021}, +{0x8962, 0x00b6}, {0x8963, 0x0012}, {0x8964, 0x0004}, +{0x8965, 0x008a}, {0x8966, 0x0020}, {0x8967, 0x00b7}, +{0x8968, 0x0012}, {0x8969, 0x0004}, {0x896a, 0x00bd}, +{0x896b, 0x008a}, {0x896c, 0x000a}, {0x896d, 0x004f}, +{0x896e, 0x0039}, {0x896f, 0x00a6}, {0x8970, 0x0000}, +{0x8971, 0x0018}, {0x8972, 0x00a7}, {0x8973, 0x0000}, +{0x8974, 0x0008}, {0x8975, 0x0018}, {0x8976, 0x0008}, +{0x8977, 0x005a}, {0x8978, 0x0026}, {0x8979, 0x00f5}, +{0x897a, 0x0039}, {0x897b, 0x0036}, {0x897c, 0x006c}, +{0x897d, 0x0000}, {0x897e, 0x0032}, {0x897f, 0x00ba}, +{0x8980, 0x008f}, {0x8981, 0x007f}, {0x8982, 0x00b7}, +{0x8983, 0x008f}, {0x8984, 0x007f}, {0x8985, 0x00b6}, +{0x8986, 0x0012}, {0x8987, 0x0009}, {0x8988, 0x0084}, +{0x8989, 0x0003}, {0x898a, 0x00a7}, {0x898b, 0x0001}, +{0x898c, 0x00b6}, {0x898d, 0x0012}, {0x898e, 0x0006}, +{0x898f, 0x0084}, {0x8990, 0x003f}, {0x8991, 0x00a7}, +{0x8992, 0x0002}, {0x8993, 0x0039}, {0x8994, 0x0036}, +{0x8995, 0x0086}, {0x8996, 0x0003}, {0x8997, 0x00b7}, +{0x8998, 0x008f}, {0x8999, 0x0080}, {0x899a, 0x0032}, +{0x899b, 0x00c1}, {0x899c, 0x0000}, {0x899d, 0x0026}, +{0x899e, 0x0006}, {0x899f, 0x00b7}, {0x89a0, 0x008f}, +{0x89a1, 0x007c}, {0x89a2, 0x007e}, {0x89a3, 0x0089}, +{0x89a4, 0x00c9}, {0x89a5, 0x00c1}, {0x89a6, 0x0001}, +{0x89a7, 0x0027}, {0x89a8, 0x0018}, {0x89a9, 0x00c1}, +{0x89aa, 0x0002}, {0x89ab, 0x0027}, {0x89ac, 0x000c}, +{0x89ad, 0x00c1}, {0x89ae, 0x0003}, {0x89af, 0x0027}, +{0x89b0, 0x0000}, {0x89b1, 0x00f6}, {0x89b2, 0x008f}, +{0x89b3, 0x0080}, {0x89b4, 0x0005}, {0x89b5, 0x0005}, +{0x89b6, 0x00f7}, {0x89b7, 0x008f}, {0x89b8, 0x0080}, +{0x89b9, 0x00f6}, {0x89ba, 0x008f}, {0x89bb, 0x0080}, +{0x89bc, 0x0005}, {0x89bd, 0x0005}, {0x89be, 0x00f7}, +{0x89bf, 0x008f}, {0x89c0, 0x0080}, {0x89c1, 0x00f6}, +{0x89c2, 0x008f}, {0x89c3, 0x0080}, {0x89c4, 0x0005}, +{0x89c5, 0x0005}, {0x89c6, 0x00f7}, {0x89c7, 0x008f}, +{0x89c8, 0x0080}, {0x89c9, 0x00f6}, {0x89ca, 0x008f}, +{0x89cb, 0x0080}, {0x89cc, 0x0053}, {0x89cd, 0x00f4}, +{0x89ce, 0x0012}, {0x89cf, 0x0007}, {0x89d0, 0x001b}, +{0x89d1, 0x00b7}, {0x89d2, 0x0012}, {0x89d3, 0x0007}, +{0x89d4, 0x0039}, {0x89d5, 0x00ce}, {0x89d6, 0x008f}, +{0x89d7, 0x0070}, {0x89d8, 0x00a6}, {0x89d9, 0x0000}, +{0x89da, 0x0018}, {0x89db, 0x00e6}, {0x89dc, 0x0000}, +{0x89dd, 0x0018}, {0x89de, 0x00a7}, {0x89df, 0x0000}, +{0x89e0, 0x00e7}, {0x89e1, 0x0000}, {0x89e2, 0x00a6}, +{0x89e3, 0x0001}, {0x89e4, 0x0018}, {0x89e5, 0x00e6}, +{0x89e6, 0x0001}, {0x89e7, 0x0018}, {0x89e8, 0x00a7}, +{0x89e9, 0x0001}, {0x89ea, 0x00e7}, {0x89eb, 0x0001}, +{0x89ec, 0x00a6}, {0x89ed, 0x0002}, {0x89ee, 0x0018}, +{0x89ef, 0x00e6}, {0x89f0, 0x0002}, {0x89f1, 0x0018}, +{0x89f2, 0x00a7}, {0x89f3, 0x0002}, {0x89f4, 0x00e7}, +{0x89f5, 0x0002}, {0x89f6, 0x0039}, {0x89f7, 0x00a6}, +{0x89f8, 0x0000}, {0x89f9, 0x0084}, {0x89fa, 0x0007}, +{0x89fb, 0x00e6}, {0x89fc, 0x0000}, {0x89fd, 0x00c4}, +{0x89fe, 0x0038}, {0x89ff, 0x0054}, {0x8a00, 0x0054}, +{0x8a01, 0x0054}, {0x8a02, 0x001b}, {0x8a03, 0x00a7}, +{0x8a04, 0x0000}, {0x8a05, 0x0039}, {0x8a06, 0x004a}, +{0x8a07, 0x0026}, {0x8a08, 0x00fd}, {0x8a09, 0x0039}, +{0x8a0a, 0x0096}, {0x8a0b, 0x0022}, {0x8a0c, 0x0084}, +{0x8a0d, 0x000f}, {0x8a0e, 0x0097}, {0x8a0f, 0x0022}, +{0x8a10, 0x0086}, {0x8a11, 0x0001}, {0x8a12, 0x00b7}, +{0x8a13, 0x008f}, {0x8a14, 0x0070}, {0x8a15, 0x00b6}, +{0x8a16, 0x0012}, {0x8a17, 0x0007}, {0x8a18, 0x00b7}, +{0x8a19, 0x008f}, {0x8a1a, 0x0071}, {0x8a1b, 0x00f6}, +{0x8a1c, 0x0012}, {0x8a1d, 0x000c}, {0x8a1e, 0x00c4}, +{0x8a1f, 0x000f}, {0x8a20, 0x00c8}, {0x8a21, 0x000f}, +{0x8a22, 0x00f7}, {0x8a23, 0x008f}, {0x8a24, 0x0072}, +{0x8a25, 0x00f6}, {0x8a26, 0x008f}, {0x8a27, 0x0072}, +{0x8a28, 0x00b6}, {0x8a29, 0x008f}, {0x8a2a, 0x0071}, +{0x8a2b, 0x0084}, {0x8a2c, 0x0003}, {0x8a2d, 0x0027}, +{0x8a2e, 0x0014}, {0x8a2f, 0x0081}, {0x8a30, 0x0001}, +{0x8a31, 0x0027}, {0x8a32, 0x001c}, {0x8a33, 0x0081}, +{0x8a34, 0x0002}, {0x8a35, 0x0027}, {0x8a36, 0x0024}, +{0x8a37, 0x00f4}, {0x8a38, 0x008f}, {0x8a39, 0x0070}, +{0x8a3a, 0x0027}, {0x8a3b, 0x002a}, {0x8a3c, 0x0096}, +{0x8a3d, 0x0022}, {0x8a3e, 0x008a}, {0x8a3f, 0x0080}, +{0x8a40, 0x007e}, {0x8a41, 0x008a}, {0x8a42, 0x0064}, +{0x8a43, 0x00f4}, {0x8a44, 0x008f}, {0x8a45, 0x0070}, +{0x8a46, 0x0027}, {0x8a47, 0x001e}, {0x8a48, 0x0096}, +{0x8a49, 0x0022}, {0x8a4a, 0x008a}, {0x8a4b, 0x0010}, +{0x8a4c, 0x007e}, {0x8a4d, 0x008a}, {0x8a4e, 0x0064}, +{0x8a4f, 0x00f4}, {0x8a50, 0x008f}, {0x8a51, 0x0070}, +{0x8a52, 0x0027}, {0x8a53, 0x0012}, {0x8a54, 0x0096}, +{0x8a55, 0x0022}, {0x8a56, 0x008a}, {0x8a57, 0x0020}, +{0x8a58, 0x007e}, {0x8a59, 0x008a}, {0x8a5a, 0x0064}, +{0x8a5b, 0x00f4}, {0x8a5c, 0x008f}, {0x8a5d, 0x0070}, +{0x8a5e, 0x0027}, {0x8a5f, 0x0006}, {0x8a60, 0x0096}, +{0x8a61, 0x0022}, {0x8a62, 0x008a}, {0x8a63, 0x0040}, +{0x8a64, 0x0097}, {0x8a65, 0x0022}, {0x8a66, 0x0074}, +{0x8a67, 0x008f}, {0x8a68, 0x0071}, {0x8a69, 0x0074}, +{0x8a6a, 0x008f}, {0x8a6b, 0x0071}, {0x8a6c, 0x0078}, +{0x8a6d, 0x008f}, {0x8a6e, 0x0070}, {0x8a6f, 0x00b6}, +{0x8a70, 0x008f}, {0x8a71, 0x0070}, {0x8a72, 0x0085}, +{0x8a73, 0x0010}, {0x8a74, 0x0027}, {0x8a75, 0x00af}, +{0x8a76, 0x00d6}, {0x8a77, 0x0022}, {0x8a78, 0x00c4}, +{0x8a79, 0x0010}, {0x8a7a, 0x0058}, {0x8a7b, 0x00b6}, +{0x8a7c, 0x0012}, {0x8a7d, 0x0070}, {0x8a7e, 0x0081}, +{0x8a7f, 0x00e4}, {0x8a80, 0x0027}, {0x8a81, 0x0036}, +{0x8a82, 0x0081}, {0x8a83, 0x00e1}, {0x8a84, 0x0026}, +{0x8a85, 0x000c}, {0x8a86, 0x0096}, {0x8a87, 0x0022}, +{0x8a88, 0x0084}, {0x8a89, 0x0020}, {0x8a8a, 0x0044}, +{0x8a8b, 0x001b}, {0x8a8c, 0x00d6}, {0x8a8d, 0x0022}, +{0x8a8e, 0x00c4}, {0x8a8f, 0x00cf}, {0x8a90, 0x0020}, +{0x8a91, 0x0023}, {0x8a92, 0x0058}, {0x8a93, 0x0081}, +{0x8a94, 0x00c6}, {0x8a95, 0x0026}, {0x8a96, 0x000d}, +{0x8a97, 0x0096}, {0x8a98, 0x0022}, {0x8a99, 0x0084}, +{0x8a9a, 0x0040}, {0x8a9b, 0x0044}, {0x8a9c, 0x0044}, +{0x8a9d, 0x001b}, {0x8a9e, 0x00d6}, {0x8a9f, 0x0022}, +{0x8aa0, 0x00c4}, {0x8aa1, 0x00af}, {0x8aa2, 0x0020}, +{0x8aa3, 0x0011}, {0x8aa4, 0x0058}, {0x8aa5, 0x0081}, +{0x8aa6, 0x0027}, {0x8aa7, 0x0026}, {0x8aa8, 0x000f}, +{0x8aa9, 0x0096}, {0x8aaa, 0x0022}, {0x8aab, 0x0084}, +{0x8aac, 0x0080}, {0x8aad, 0x0044}, {0x8aae, 0x0044}, +{0x8aaf, 0x0044}, {0x8ab0, 0x001b}, {0x8ab1, 0x00d6}, +{0x8ab2, 0x0022}, {0x8ab3, 0x00c4}, {0x8ab4, 0x006f}, +{0x8ab5, 0x001b}, {0x8ab6, 0x0097}, {0x8ab7, 0x0022}, +{0x8ab8, 0x0039}, {0x8ab9, 0x0027}, {0x8aba, 0x000c}, +{0x8abb, 0x007c}, {0x8abc, 0x0082}, {0x8abd, 0x0006}, +{0x8abe, 0x00bd}, {0x8abf, 0x00d9}, {0x8ac0, 0x00ed}, +{0x8ac1, 0x00b6}, {0x8ac2, 0x0082}, {0x8ac3, 0x0007}, +{0x8ac4, 0x007e}, {0x8ac5, 0x008a}, {0x8ac6, 0x00b9}, +{0x8ac7, 0x007f}, {0x8ac8, 0x0082}, {0x8ac9, 0x0006}, +{0x8aca, 0x0039}, { 0x0, 0x0 } +}; +#endif + + +/* phy types */ +#define CAS_PHY_UNKNOWN 0x00 +#define CAS_PHY_SERDES 0x01 +#define CAS_PHY_MII_MDIO0 0x02 +#define CAS_PHY_MII_MDIO1 0x04 +#define CAS_PHY_MII(x) ((x) & (CAS_PHY_MII_MDIO0 | CAS_PHY_MII_MDIO1)) + +/* _RING_INDEX is the index for the ring sizes to be used. _RING_SIZE + * is the actual size. the default index for the various rings is + * 8. NOTE: there a bunch of alignment constraints for the rings. to + * deal with that, i just allocate rings to create the desired + * alignment. here are the constraints: + * RX DESC and COMP rings must be 8KB aligned + * TX DESC must be 2KB aligned. + * if you change the numbers, be cognizant of how the alignment will change + * in INIT_BLOCK as well. + */ + +#define DESC_RING_I_TO_S(x) (32*(1 << (x))) +#define COMP_RING_I_TO_S(x) (128*(1 << (x))) +#define TX_DESC_RING_INDEX 4 /* 512 = 8k */ +#define RX_DESC_RING_INDEX 4 /* 512 = 8k */ +#define RX_COMP_RING_INDEX 4 /* 2048 = 64k: should be 4x rx ring size */ + +#if (TX_DESC_RING_INDEX > 8) || (TX_DESC_RING_INDEX < 0) +#error TX_DESC_RING_INDEX must be between 0 and 8 +#endif + +#if (RX_DESC_RING_INDEX > 8) || (RX_DESC_RING_INDEX < 0) +#error RX_DESC_RING_INDEX must be between 0 and 8 +#endif + +#if (RX_COMP_RING_INDEX > 8) || (RX_COMP_RING_INDEX < 0) +#error RX_COMP_RING_INDEX must be between 0 and 8 +#endif + +#define N_TX_RINGS MAX_TX_RINGS /* for QoS */ +#define N_TX_RINGS_MASK MAX_TX_RINGS_MASK +#define N_RX_DESC_RINGS MAX_RX_DESC_RINGS /* 1 for ipsec */ +#define N_RX_COMP_RINGS 0x1 /* for mult. PCI interrupts */ + +/* number of flows that can go through re-assembly */ +#define N_RX_FLOWS 64 + +#define TX_DESC_RING_SIZE DESC_RING_I_TO_S(TX_DESC_RING_INDEX) +#define RX_DESC_RING_SIZE DESC_RING_I_TO_S(RX_DESC_RING_INDEX) +#define RX_COMP_RING_SIZE COMP_RING_I_TO_S(RX_COMP_RING_INDEX) +#define TX_DESC_RINGN_INDEX(x) TX_DESC_RING_INDEX +#define RX_DESC_RINGN_INDEX(x) RX_DESC_RING_INDEX +#define RX_COMP_RINGN_INDEX(x) RX_COMP_RING_INDEX +#define TX_DESC_RINGN_SIZE(x) TX_DESC_RING_SIZE +#define RX_DESC_RINGN_SIZE(x) RX_DESC_RING_SIZE +#define RX_COMP_RINGN_SIZE(x) RX_COMP_RING_SIZE + +/* convert values */ +#define CAS_BASE(x, y) (((y) << (x ## _SHIFT)) & (x ## _MASK)) +#define CAS_VAL(x, y) (((y) & (x ## _MASK)) >> (x ## _SHIFT)) +#define CAS_TX_RINGN_BASE(y) ((TX_DESC_RINGN_INDEX(y) << \ + TX_CFG_DESC_RINGN_SHIFT(y)) & \ + TX_CFG_DESC_RINGN_MASK(y)) + +/* min is 2k, but we can't do jumbo frames unless it's at least 8k */ +#define CAS_MIN_PAGE_SHIFT 11 /* 2048 */ +#define CAS_JUMBO_PAGE_SHIFT 13 /* 8192 */ +#define CAS_MAX_PAGE_SHIFT 14 /* 16384 */ + +#define TX_DESC_BUFLEN_MASK 0x0000000000003FFFULL /* buffer length in + bytes. 0 - 9256 */ +#define TX_DESC_BUFLEN_SHIFT 0 +#define TX_DESC_CSUM_START_MASK 0x00000000001F8000ULL /* checksum start. # + of bytes to be + skipped before + csum calc begins. + value must be + even */ +#define TX_DESC_CSUM_START_SHIFT 15 +#define TX_DESC_CSUM_STUFF_MASK 0x000000001FE00000ULL /* checksum stuff. + byte offset w/in + the pkt for the + 1st csum byte. + must be > 8 */ +#define TX_DESC_CSUM_STUFF_SHIFT 21 +#define TX_DESC_CSUM_EN 0x0000000020000000ULL /* enable checksum */ +#define TX_DESC_EOF 0x0000000040000000ULL /* end of frame */ +#define TX_DESC_SOF 0x0000000080000000ULL /* start of frame */ +#define TX_DESC_INTME 0x0000000100000000ULL /* interrupt me */ +#define TX_DESC_NO_CRC 0x0000000200000000ULL /* debugging only. + CRC will not be + inserted into + outgoing frame. */ +struct cas_tx_desc { + u64 control; + u64 buffer; +}; + +/* descriptor ring for free buffers contains page-sized buffers. the index + * value is not used by the hw in any way. it's just stored and returned in + * the completion ring. + */ +struct cas_rx_desc { + u64 index; + u64 buffer; +}; + +/* received packets are put on the completion ring. */ +/* word 1 */ +#define RX_COMP1_DATA_SIZE_MASK 0x0000000007FFE000ULL +#define RX_COMP1_DATA_SIZE_SHIFT 13 +#define RX_COMP1_DATA_OFF_MASK 0x000001FFF8000000ULL +#define RX_COMP1_DATA_OFF_SHIFT 27 +#define RX_COMP1_DATA_INDEX_MASK 0x007FFE0000000000ULL +#define RX_COMP1_DATA_INDEX_SHIFT 41 +#define RX_COMP1_SKIP_MASK 0x0180000000000000ULL +#define RX_COMP1_SKIP_SHIFT 55 +#define RX_COMP1_RELEASE_NEXT 0x0200000000000000ULL +#define RX_COMP1_SPLIT_PKT 0x0400000000000000ULL +#define RX_COMP1_RELEASE_FLOW 0x0800000000000000ULL +#define RX_COMP1_RELEASE_DATA 0x1000000000000000ULL +#define RX_COMP1_RELEASE_HDR 0x2000000000000000ULL +#define RX_COMP1_TYPE_MASK 0xC000000000000000ULL +#define RX_COMP1_TYPE_SHIFT 62 + +/* word 2 */ +#define RX_COMP2_NEXT_INDEX_MASK 0x00000007FFE00000ULL +#define RX_COMP2_NEXT_INDEX_SHIFT 21 +#define RX_COMP2_HDR_SIZE_MASK 0x00000FF800000000ULL +#define RX_COMP2_HDR_SIZE_SHIFT 35 +#define RX_COMP2_HDR_OFF_MASK 0x0003F00000000000ULL +#define RX_COMP2_HDR_OFF_SHIFT 44 +#define RX_COMP2_HDR_INDEX_MASK 0xFFFC000000000000ULL +#define RX_COMP2_HDR_INDEX_SHIFT 50 + +/* word 3 */ +#define RX_COMP3_SMALL_PKT 0x0000000000000001ULL +#define RX_COMP3_JUMBO_PKT 0x0000000000000002ULL +#define RX_COMP3_JUMBO_HDR_SPLIT_EN 0x0000000000000004ULL +#define RX_COMP3_CSUM_START_MASK 0x000000000007F000ULL +#define RX_COMP3_CSUM_START_SHIFT 12 +#define RX_COMP3_FLOWID_MASK 0x0000000001F80000ULL +#define RX_COMP3_FLOWID_SHIFT 19 +#define RX_COMP3_OPCODE_MASK 0x000000000E000000ULL +#define RX_COMP3_OPCODE_SHIFT 25 +#define RX_COMP3_FORCE_FLAG 0x0000000010000000ULL +#define RX_COMP3_NO_ASSIST 0x0000000020000000ULL +#define RX_COMP3_LOAD_BAL_MASK 0x000001F800000000ULL +#define RX_COMP3_LOAD_BAL_SHIFT 35 +#define RX_PLUS_COMP3_ENC_PKT 0x0000020000000000ULL /* cas+ */ +#define RX_COMP3_L3_HEAD_OFF_MASK 0x0000FE0000000000ULL /* cas */ +#define RX_COMP3_L3_HEAD_OFF_SHIFT 41 +#define RX_PLUS_COMP_L3_HEAD_OFF_MASK 0x0000FC0000000000ULL /* cas+ */ +#define RX_PLUS_COMP_L3_HEAD_OFF_SHIFT 42 +#define RX_COMP3_SAP_MASK 0xFFFF000000000000ULL +#define RX_COMP3_SAP_SHIFT 48 + +/* word 4 */ +#define RX_COMP4_TCP_CSUM_MASK 0x000000000000FFFFULL +#define RX_COMP4_TCP_CSUM_SHIFT 0 +#define RX_COMP4_PKT_LEN_MASK 0x000000003FFF0000ULL +#define RX_COMP4_PKT_LEN_SHIFT 16 +#define RX_COMP4_PERFECT_MATCH_MASK 0x00000003C0000000ULL +#define RX_COMP4_PERFECT_MATCH_SHIFT 30 +#define RX_COMP4_ZERO 0x0000080000000000ULL +#define RX_COMP4_HASH_VAL_MASK 0x0FFFF00000000000ULL +#define RX_COMP4_HASH_VAL_SHIFT 44 +#define RX_COMP4_HASH_PASS 0x1000000000000000ULL +#define RX_COMP4_BAD 0x4000000000000000ULL +#define RX_COMP4_LEN_MISMATCH 0x8000000000000000ULL + +/* we encode the following: ring/index/release. only 14 bits + * are usable. + * NOTE: the encoding is dependent upon RX_DESC_RING_SIZE and + * MAX_RX_DESC_RINGS. */ +#define RX_INDEX_NUM_MASK 0x0000000000000FFFULL +#define RX_INDEX_NUM_SHIFT 0 +#define RX_INDEX_RING_MASK 0x0000000000001000ULL +#define RX_INDEX_RING_SHIFT 12 +#define RX_INDEX_RELEASE 0x0000000000002000ULL + +struct cas_rx_comp { + u64 word1; + u64 word2; + u64 word3; + u64 word4; +}; + +enum link_state { + link_down = 0, /* No link, will retry */ + link_aneg, /* Autoneg in progress */ + link_force_try, /* Try Forced link speed */ + link_force_ret, /* Forced mode worked, retrying autoneg */ + link_force_ok, /* Stay in forced mode */ + link_up /* Link is up */ +}; + +typedef struct cas_page { + struct list_head list; + struct page *buffer; + dma_addr_t dma_addr; + int used; +} cas_page_t; + + +/* some alignment constraints: + * TX DESC, RX DESC, and RX COMP must each be 8K aligned. + * TX COMPWB must be 8-byte aligned. + * to accomplish this, here's what we do: + * + * INIT_BLOCK_RX_COMP = 64k (already aligned) + * INIT_BLOCK_RX_DESC = 8k + * INIT_BLOCK_TX = 8k + * INIT_BLOCK_RX1_DESC = 8k + * TX COMPWB + */ +#define INIT_BLOCK_TX (TX_DESC_RING_SIZE) +#define INIT_BLOCK_RX_DESC (RX_DESC_RING_SIZE) +#define INIT_BLOCK_RX_COMP (RX_COMP_RING_SIZE) + +struct cas_init_block { + struct cas_rx_comp rxcs[N_RX_COMP_RINGS][INIT_BLOCK_RX_COMP]; + struct cas_rx_desc rxds[N_RX_DESC_RINGS][INIT_BLOCK_RX_DESC]; + struct cas_tx_desc txds[N_TX_RINGS][INIT_BLOCK_TX]; + u64 tx_compwb; +}; + +/* tiny buffers to deal with target abort issue. we allocate a bit + * over so that we don't have target abort issues with these buffers + * as well. + */ +#define TX_TINY_BUF_LEN 0x100 +#define TX_TINY_BUF_BLOCK ((INIT_BLOCK_TX + 1)*TX_TINY_BUF_LEN) + +struct cas_tiny_count { + int nbufs; + int used; +}; + +struct cas { + spinlock_t lock; /* for most bits */ + spinlock_t tx_lock[N_TX_RINGS]; /* tx bits */ + spinlock_t stat_lock[N_TX_RINGS + 1]; /* for stat gathering */ + spinlock_t rx_inuse_lock; /* rx inuse list */ + spinlock_t rx_spare_lock; /* rx spare list */ + + void __iomem *regs; + int tx_new[N_TX_RINGS], tx_old[N_TX_RINGS]; + int rx_old[N_RX_DESC_RINGS]; + int rx_cur[N_RX_COMP_RINGS], rx_new[N_RX_COMP_RINGS]; + int rx_last[N_RX_DESC_RINGS]; + + /* Set when chip is actually in operational state + * (ie. not power managed) */ + int hw_running; + int opened; + struct semaphore pm_sem; /* open/close/suspend/resume */ + + struct cas_init_block *init_block; + struct cas_tx_desc *init_txds[MAX_TX_RINGS]; + struct cas_rx_desc *init_rxds[MAX_RX_DESC_RINGS]; + struct cas_rx_comp *init_rxcs[MAX_RX_COMP_RINGS]; + + /* we use sk_buffs for tx and pages for rx. the rx skbuffs + * are there for flow re-assembly. */ + struct sk_buff *tx_skbs[N_TX_RINGS][TX_DESC_RING_SIZE]; + struct sk_buff_head rx_flows[N_RX_FLOWS]; + cas_page_t *rx_pages[N_RX_DESC_RINGS][RX_DESC_RING_SIZE]; + struct list_head rx_spare_list, rx_inuse_list; + int rx_spares_needed; + + /* for small packets when copying would be quicker than + mapping */ + struct cas_tiny_count tx_tiny_use[N_TX_RINGS][TX_DESC_RING_SIZE]; + u8 *tx_tiny_bufs[N_TX_RINGS]; + + u32 msg_enable; + + /* N_TX_RINGS must be >= N_RX_DESC_RINGS */ + struct net_device_stats net_stats[N_TX_RINGS + 1]; + + u32 pci_cfg[64 >> 2]; + u8 pci_revision; + + int phy_type; + int phy_addr; + u32 phy_id; +#define CAS_FLAG_1000MB_CAP 0x00000001 +#define CAS_FLAG_REG_PLUS 0x00000002 +#define CAS_FLAG_TARGET_ABORT 0x00000004 +#define CAS_FLAG_SATURN 0x00000008 +#define CAS_FLAG_RXD_POST_MASK 0x000000F0 +#define CAS_FLAG_RXD_POST_SHIFT 4 +#define CAS_FLAG_RXD_POST(x) ((1 << (CAS_FLAG_RXD_POST_SHIFT + (x))) & \ + CAS_FLAG_RXD_POST_MASK) +#define CAS_FLAG_ENTROPY_DEV 0x00000100 +#define CAS_FLAG_NO_HW_CSUM 0x00000200 + u32 cas_flags; + int packet_min; /* minimum packet size */ + int tx_fifo_size; + int rx_fifo_size; + int rx_pause_off; + int rx_pause_on; + int crc_size; /* 4 if half-duplex */ + + int pci_irq_INTC; + int min_frame_size; /* for tx fifo workaround */ + + /* page size allocation */ + int page_size; + int page_order; + int mtu_stride; + + u32 mac_rx_cfg; + + /* Autoneg & PHY control */ + int link_cntl; + int link_fcntl; + enum link_state lstate; + struct timer_list link_timer; + int timer_ticks; + struct work_struct reset_task; +#if 0 + atomic_t reset_task_pending; +#else + atomic_t reset_task_pending; + atomic_t reset_task_pending_mtu; + atomic_t reset_task_pending_spare; + atomic_t reset_task_pending_all; +#endif + +#ifdef CONFIG_CASSINI_QGE_DEBUG + atomic_t interrupt_seen; /* 1 if any interrupts are getting through */ +#endif + + /* Link-down problem workaround */ +#define LINK_TRANSITION_UNKNOWN 0 +#define LINK_TRANSITION_ON_FAILURE 1 +#define LINK_TRANSITION_STILL_FAILED 2 +#define LINK_TRANSITION_LINK_UP 3 +#define LINK_TRANSITION_LINK_CONFIG 4 +#define LINK_TRANSITION_LINK_DOWN 5 +#define LINK_TRANSITION_REQUESTED_RESET 6 + int link_transition; + int link_transition_jiffies_valid; + unsigned long link_transition_jiffies; + + /* Tuning */ + u8 orig_cacheline_size; /* value when loaded */ +#define CAS_PREF_CACHELINE_SIZE 0x20 /* Minimum desired */ + + /* Diagnostic counters and state. */ + int casreg_len; /* reg-space size for dumping */ + u64 pause_entered; + u16 pause_last_time_recvd; + + dma_addr_t block_dvma, tx_tiny_dvma[N_TX_RINGS]; + struct pci_dev *pdev; + struct net_device *dev; +}; + +#define TX_DESC_NEXT(r, x) (((x) + 1) & (TX_DESC_RINGN_SIZE(r) - 1)) +#define RX_DESC_ENTRY(r, x) ((x) & (RX_DESC_RINGN_SIZE(r) - 1)) +#define RX_COMP_ENTRY(r, x) ((x) & (RX_COMP_RINGN_SIZE(r) - 1)) + +#define TX_BUFF_COUNT(r, x, y) ((x) <= (y) ? ((y) - (x)) : \ + (TX_DESC_RINGN_SIZE(r) - (x) + (y))) + +#define TX_BUFFS_AVAIL(cp, i) ((cp)->tx_old[(i)] <= (cp)->tx_new[(i)] ? \ + (cp)->tx_old[(i)] + (TX_DESC_RINGN_SIZE(i) - 1) - (cp)->tx_new[(i)] : \ + (cp)->tx_old[(i)] - (cp)->tx_new[(i)] - 1) + +#define CAS_ALIGN(addr, align) \ + (((unsigned long) (addr) + ((align) - 1UL)) & ~((align) - 1)) + +#define RX_FIFO_SIZE 16384 +#define EXPANSION_ROM_SIZE 65536 + +#define CAS_MC_EXACT_MATCH_SIZE 15 +#define CAS_MC_HASH_SIZE 256 +#define CAS_MC_HASH_MAX (CAS_MC_EXACT_MATCH_SIZE + \ + CAS_MC_HASH_SIZE) + +#define TX_TARGET_ABORT_LEN 0x20 +#define RX_SWIVEL_OFF_VAL 0x2 +#define RX_AE_FREEN_VAL(x) (RX_DESC_RINGN_SIZE(x) >> 1) +#define RX_AE_COMP_VAL (RX_COMP_RING_SIZE >> 1) +#define RX_BLANK_INTR_PKT_VAL 0x05 +#define RX_BLANK_INTR_TIME_VAL 0x0F +#define HP_TCP_THRESH_VAL 1530 /* reduce to enable reassembly */ + +#define RX_SPARE_COUNT (RX_DESC_RING_SIZE >> 1) +#define RX_SPARE_RECOVER_VAL (RX_SPARE_COUNT >> 2) + +#endif /* _CASSINI_H */ diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c index cdc07ccd7332..a6078ad9b654 100644 --- a/drivers/net/cs89x0.c +++ b/drivers/net/cs89x0.c @@ -140,6 +140,7 @@ #include #include +#include #if ALLOW_DMA #include #endif diff --git a/drivers/net/declance.c b/drivers/net/declance.c index 521c83137bf6..f130bdab3fd3 100644 --- a/drivers/net/declance.c +++ b/drivers/net/declance.c @@ -5,7 +5,7 @@ * * adopted from sunlance.c by Richard van den Berg * - * Copyright (C) 2002, 2003 Maciej W. Rozycki + * Copyright (C) 2002, 2003, 2005 Maciej W. Rozycki * * additional sources: * - PMAD-AA TURBOchannel Ethernet Module Functional Specification, @@ -57,13 +57,15 @@ #include #include +#include + #include #include #include #include #include +#include #include -#include static char version[] __devinitdata = "declance.c: v0.009 by Linux MIPS DECstation task force\n"; @@ -79,10 +81,6 @@ MODULE_LICENSE("GPL"); #define PMAD_LANCE 2 #define PMAX_LANCE 3 -#ifndef CONFIG_TC -unsigned long system_base; -unsigned long dmaptr; -#endif #define LE_CSR0 0 #define LE_CSR1 1 @@ -237,7 +235,7 @@ struct lance_init_block { /* * This works *only* for the ring descriptors */ -#define LANCE_ADDR(x) (PHYSADDR(x) >> 1) +#define LANCE_ADDR(x) (CPHYSADDR(x) >> 1) struct lance_private { struct net_device *next; @@ -697,12 +695,13 @@ out: spin_unlock(&lp->lock); } -static void lance_dma_merr_int(const int irq, void *dev_id, - struct pt_regs *regs) +static irqreturn_t lance_dma_merr_int(const int irq, void *dev_id, + struct pt_regs *regs) { struct net_device *dev = (struct net_device *) dev_id; printk("%s: DMA error\n", dev->name); + return IRQ_HANDLED; } static irqreturn_t @@ -1026,10 +1025,6 @@ static int __init dec_lance_init(const int type, const int slot) unsigned long esar_base; unsigned char *esar; -#ifndef CONFIG_TC - system_base = KN01_LANCE_BASE; -#endif - if (dec_lance_debug && version_printed++ == 0) printk(version); @@ -1062,16 +1057,16 @@ static int __init dec_lance_init(const int type, const int slot) switch (type) { #ifdef CONFIG_TC case ASIC_LANCE: - dev->base_addr = system_base + IOASIC_LANCE; + dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE); /* buffer space for the on-board LANCE shared memory */ /* * FIXME: ugly hack! */ - dev->mem_start = KSEG1ADDR(0x00020000); + dev->mem_start = CKSEG1ADDR(0x00020000); dev->mem_end = dev->mem_start + 0x00020000; dev->irq = dec_interrupt[DEC_IRQ_LANCE]; - esar_base = system_base + IOASIC_ESAR; + esar_base = CKSEG1ADDR(dec_kn_slot_base + IOASIC_ESAR); /* Workaround crash with booting KN04 2.1k from Disk */ memset((void *)dev->mem_start, 0, @@ -1101,14 +1096,14 @@ static int __init dec_lance_init(const int type, const int slot) /* Setup I/O ASIC LANCE DMA. */ lp->dma_irq = dec_interrupt[DEC_IRQ_LANCE_MERR]; ioasic_write(IO_REG_LANCE_DMA_P, - PHYSADDR(dev->mem_start) << 3); + CPHYSADDR(dev->mem_start) << 3); break; case PMAD_LANCE: claim_tc_card(slot); - dev->mem_start = get_tc_base_addr(slot); + dev->mem_start = CKSEG1ADDR(get_tc_base_addr(slot)); dev->base_addr = dev->mem_start + 0x100000; dev->irq = get_tc_irq_nr(slot); esar_base = dev->mem_start + 0x1c0002; @@ -1137,9 +1132,9 @@ static int __init dec_lance_init(const int type, const int slot) case PMAX_LANCE: dev->irq = dec_interrupt[DEC_IRQ_LANCE]; - dev->base_addr = KN01_LANCE_BASE; - dev->mem_start = KN01_LANCE_BASE + 0x01000000; - esar_base = KN01_RTC_BASE + 1; + dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE); + dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM); + esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1); lp->dma_irq = -1; /* diff --git a/drivers/net/e100.c b/drivers/net/e100.c index fbf1c06ec5c1..eb169a8e8773 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -903,8 +903,8 @@ static void mdio_write(struct net_device *netdev, int addr, int reg, int data) static void e100_get_defaults(struct nic *nic) { - struct param_range rfds = { .min = 16, .max = 256, .count = 256 }; - struct param_range cbs = { .min = 64, .max = 256, .count = 128 }; + struct param_range rfds = { .min = 16, .max = 256, .count = 64 }; + struct param_range cbs = { .min = 64, .max = 256, .count = 64 }; pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id); /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ @@ -1007,213 +1007,25 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); } -/********************************************************/ -/* Micro code for 8086:1229 Rev 8 */ -/********************************************************/ - -/* Parameter values for the D101M B-step */ -#define D101M_CPUSAVER_TIMER_DWORD 78 -#define D101M_CPUSAVER_BUNDLE_DWORD 65 -#define D101M_CPUSAVER_MIN_SIZE_DWORD 126 - -#define D101M_B_RCVBUNDLE_UCODE \ -{\ -0x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \ -0x000C0001, 0x00101312, 0x000C0008, 0x00380216, \ -0x0010009C, 0x00204056, 0x002380CC, 0x00380056, \ -0x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \ -0x00380438, 0x00000000, 0x00140000, 0x00380555, \ -0x00308000, 0x00100662, 0x00100561, 0x000E0408, \ -0x00134861, 0x000C0002, 0x00103093, 0x00308000, \ -0x00100624, 0x00100561, 0x000E0408, 0x00100861, \ -0x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \ -0x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \ -0x003A0437, 0x00044010, 0x0038078A, 0x00000000, \ -0x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \ -0x00130824, 0x000C0001, 0x00101213, 0x00260C75, \ -0x00041000, 0x00010004, 0x00130826, 0x000C0006, \ -0x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00080600, 0x00101B10, 0x00050004, 0x00100826, \ -0x00101210, 0x00380C34, 0x00000000, 0x00000000, \ -0x0021155B, 0x00100099, 0x00206559, 0x0010009C, \ -0x00244559, 0x00130836, 0x000C0000, 0x00220C62, \ -0x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \ -0x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \ -0x00214C0E, 0x00380555, 0x00010004, 0x00041000, \ -0x00278C67, 0x00040800, 0x00018100, 0x003A0437, \ -0x00130826, 0x000C0001, 0x00220559, 0x00101313, \ -0x00380559, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00130831, 0x0010090B, 0x00124813, \ -0x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \ -0x003806A8, 0x00000000, 0x00000000, 0x00000000, \ -} - -/********************************************************/ -/* Micro code for 8086:1229 Rev 9 */ -/********************************************************/ - -/* Parameter values for the D101S */ -#define D101S_CPUSAVER_TIMER_DWORD 78 -#define D101S_CPUSAVER_BUNDLE_DWORD 67 -#define D101S_CPUSAVER_MIN_SIZE_DWORD 128 - -#define D101S_RCVBUNDLE_UCODE \ -{\ -0x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \ -0x000C0001, 0x00101312, 0x000C0008, 0x00380243, \ -0x0010009C, 0x00204056, 0x002380D0, 0x00380056, \ -0x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \ -0x0038047F, 0x00000000, 0x00140000, 0x003805A3, \ -0x00308000, 0x00100610, 0x00100561, 0x000E0408, \ -0x00134861, 0x000C0002, 0x00103093, 0x00308000, \ -0x00100624, 0x00100561, 0x000E0408, 0x00100861, \ -0x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \ -0x00380F90, 0x00080000, 0x00103090, 0x00380F90, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \ -0x003A047E, 0x00044010, 0x00380819, 0x00000000, \ -0x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \ -0x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \ -0x00101213, 0x00260FF7, 0x00041000, 0x00010004, \ -0x00130826, 0x000C0006, 0x00220700, 0x0013C926, \ -0x00101313, 0x00380700, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00080600, 0x00101B10, 0x00050004, 0x00100826, \ -0x00101210, 0x00380FB6, 0x00000000, 0x00000000, \ -0x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \ -0x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \ -0x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \ -0x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \ -0x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \ -0x00010004, 0x00041000, 0x00278FE9, 0x00040800, \ -0x00018100, 0x003A047E, 0x00130826, 0x000C0001, \ -0x002205A7, 0x00101313, 0x003805A7, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00130831, \ -0x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \ -0x00041000, 0x00010004, 0x00380700 \ -} - -/********************************************************/ -/* Micro code for the 8086:1229 Rev F/10 */ -/********************************************************/ - -/* Parameter values for the D102 E-step */ -#define D102_E_CPUSAVER_TIMER_DWORD 42 -#define D102_E_CPUSAVER_BUNDLE_DWORD 54 -#define D102_E_CPUSAVER_MIN_SIZE_DWORD 46 - -#define D102_E_RCVBUNDLE_UCODE \ -{\ -0x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \ -0x00E014B9, 0x00000000, 0x00000000, 0x00000000, \ -0x00E014BD, 0x00000000, 0x00000000, 0x00000000, \ -0x00E014D5, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00E014C1, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00E014C8, 0x00000000, 0x00000000, 0x00000000, \ -0x00200600, 0x00E014EE, 0x00000000, 0x00000000, \ -0x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \ -0x00E00E43, 0x00000000, 0x00000000, 0x00000000, \ -0x00300006, 0x00E014FB, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \ -0x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -0x00000000, 0x00000000, 0x00000000, 0x00000000, \ -} - static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) { -/* *INDENT-OFF* */ - static struct { - u32 ucode[UCODE_SIZE + 1]; - u8 mac; - u8 timer_dword; - u8 bundle_dword; - u8 min_size_dword; - } ucode_opts[] = { - { D101M_B_RCVBUNDLE_UCODE, - mac_82559_D101M, - D101M_CPUSAVER_TIMER_DWORD, - D101M_CPUSAVER_BUNDLE_DWORD, - D101M_CPUSAVER_MIN_SIZE_DWORD }, - { D101S_RCVBUNDLE_UCODE, - mac_82559_D101S, - D101S_CPUSAVER_TIMER_DWORD, - D101S_CPUSAVER_BUNDLE_DWORD, - D101S_CPUSAVER_MIN_SIZE_DWORD }, - { D102_E_RCVBUNDLE_UCODE, - mac_82551_F, - D102_E_CPUSAVER_TIMER_DWORD, - D102_E_CPUSAVER_BUNDLE_DWORD, - D102_E_CPUSAVER_MIN_SIZE_DWORD }, - { D102_E_RCVBUNDLE_UCODE, - mac_82551_10, - D102_E_CPUSAVER_TIMER_DWORD, - D102_E_CPUSAVER_BUNDLE_DWORD, - D102_E_CPUSAVER_MIN_SIZE_DWORD }, - { {0}, 0, 0, 0, 0} - }, *opts; -/* *INDENT-ON* */ + int i; + static const u32 ucode[UCODE_SIZE] = { + /* NFS packets are misinterpreted as TCO packets and + * incorrectly routed to the BMC over SMBus. This + * microcode patch checks the fragmented IP bit in the + * NFS/UDP header to distinguish between NFS and TCO. */ + 0x0EF70E36, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, + 0x1FFF1FFF, 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, + 0x00906EFD, 0x00900EFD, 0x00E00EF8, + }; -#define BUNDLESMALL 1 -#define BUNDLEMAX 50 -#define INTDELAY 15000 - - opts = ucode_opts; - - /* do not load u-code for ICH devices */ - if (nic->flags & ich) - return; - - /* Search for ucode match against h/w rev_id */ - while (opts->mac) { - if (nic->mac == opts->mac) { - int i; - u32 *ucode = opts->ucode; - - /* Insert user-tunable settings */ - ucode[opts->timer_dword] &= 0xFFFF0000; - ucode[opts->timer_dword] |= - (u16) INTDELAY; - ucode[opts->bundle_dword] &= 0xFFFF0000; - ucode[opts->bundle_dword] |= (u16) BUNDLEMAX; - ucode[opts->min_size_dword] &= 0xFFFF0000; - ucode[opts->min_size_dword] |= - (BUNDLESMALL) ? 0xFFFF : 0xFF80; - - for(i = 0; i < UCODE_SIZE; i++) - cb->u.ucode[i] = cpu_to_le32(ucode[i]); - cb->command = cpu_to_le16(cb_ucode); - return; - } - opts++; - } - - cb->command = cpu_to_le16(cb_nop); + if(nic->mac == mac_82551_F || nic->mac == mac_82551_10) { + for(i = 0; i < UCODE_SIZE; i++) + cb->u.ucode[i] = cpu_to_le32(ucode[i]); + cb->command = cpu_to_le16(cb_ucode); + } else + cb->command = cpu_to_le16(cb_nop); } static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, @@ -2389,6 +2201,7 @@ static struct ethtool_ops e100_ethtool_ops = { .phys_id = e100_phys_id, .get_stats_count = e100_get_stats_count, .get_ethtool_stats = e100_get_ethtool_stats, + .get_perm_addr = ethtool_op_get_perm_addr, }; static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) @@ -2539,7 +2352,8 @@ static int __devinit e100_probe(struct pci_dev *pdev, e100_phy_init(nic); memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); - if(!is_valid_ether_addr(netdev->dev_addr)) { + memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN); + if(!is_valid_ether_addr(netdev->perm_addr)) { DPRINTK(PROBE, ERR, "Invalid MAC address from " "EEPROM, aborting.\n"); err = -EAGAIN; diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h index 092757bc721f..3f653a93e1bc 100644 --- a/drivers/net/e1000/e1000.h +++ b/drivers/net/e1000/e1000.h @@ -72,6 +72,10 @@ #include #include #include +#ifdef CONFIG_E1000_MQ +#include +#include +#endif #define BAR_0 0 #define BAR_1 1 @@ -165,10 +169,33 @@ struct e1000_buffer { uint16_t next_to_watch; }; -struct e1000_ps_page { struct page *ps_page[MAX_PS_BUFFERS]; }; -struct e1000_ps_page_dma { uint64_t ps_page_dma[MAX_PS_BUFFERS]; }; +struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; +struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; }; -struct e1000_desc_ring { +struct e1000_tx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_buffer *buffer_info; + + struct e1000_buffer previous_buffer_info; + spinlock_t tx_lock; + uint16_t tdh; + uint16_t tdt; + uint64_t pkt; +}; + +struct e1000_rx_ring { /* pointer to the descriptor ring memory */ void *desc; /* physical address of the descriptor ring */ @@ -186,6 +213,10 @@ struct e1000_desc_ring { /* arrays of page information for packet split */ struct e1000_ps_page *ps_page; struct e1000_ps_page_dma *ps_page_dma; + + uint16_t rdh; + uint16_t rdt; + uint64_t pkt; }; #define E1000_DESC_UNUSED(R) \ @@ -227,9 +258,10 @@ struct e1000_adapter { unsigned long led_status; /* TX */ - struct e1000_desc_ring tx_ring; - struct e1000_buffer previous_buffer_info; - spinlock_t tx_lock; + struct e1000_tx_ring *tx_ring; /* One per active queue */ +#ifdef CONFIG_E1000_MQ + struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */ +#endif uint32_t txd_cmd; uint32_t tx_int_delay; uint32_t tx_abs_int_delay; @@ -246,19 +278,33 @@ struct e1000_adapter { /* RX */ #ifdef CONFIG_E1000_NAPI - boolean_t (*clean_rx) (struct e1000_adapter *adapter, int *work_done, - int work_to_do); + boolean_t (*clean_rx) (struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); #else - boolean_t (*clean_rx) (struct e1000_adapter *adapter); + boolean_t (*clean_rx) (struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); #endif - void (*alloc_rx_buf) (struct e1000_adapter *adapter); - struct e1000_desc_ring rx_ring; + void (*alloc_rx_buf) (struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); + struct e1000_rx_ring *rx_ring; /* One per active queue */ +#ifdef CONFIG_E1000_NAPI + struct net_device *polling_netdev; /* One per active queue */ +#endif +#ifdef CONFIG_E1000_MQ + struct net_device **cpu_netdev; /* per-cpu */ + struct call_async_data_struct rx_sched_call_data; + int cpu_for_queue[4]; +#endif + int num_queues; + uint64_t hw_csum_err; uint64_t hw_csum_good; + uint64_t rx_hdr_split; uint32_t rx_int_delay; uint32_t rx_abs_int_delay; boolean_t rx_csum; - boolean_t rx_ps; + unsigned int rx_ps_pages; uint32_t gorcl; uint64_t gorcl_old; uint16_t rx_ps_bsize0; @@ -278,8 +324,8 @@ struct e1000_adapter { struct e1000_phy_stats phy_stats; uint32_t test_icr; - struct e1000_desc_ring test_tx_ring; - struct e1000_desc_ring test_rx_ring; + struct e1000_tx_ring test_tx_ring; + struct e1000_rx_ring test_rx_ring; int msg_enable; diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index f133ff0b0b94..6b9acc7f94a3 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c @@ -39,10 +39,10 @@ extern int e1000_up(struct e1000_adapter *adapter); extern void e1000_down(struct e1000_adapter *adapter); extern void e1000_reset(struct e1000_adapter *adapter); extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); -extern int e1000_setup_rx_resources(struct e1000_adapter *adapter); -extern int e1000_setup_tx_resources(struct e1000_adapter *adapter); -extern void e1000_free_rx_resources(struct e1000_adapter *adapter); -extern void e1000_free_tx_resources(struct e1000_adapter *adapter); +extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter); extern void e1000_update_stats(struct e1000_adapter *adapter); struct e1000_stats { @@ -91,7 +91,8 @@ static const struct e1000_stats e1000_gstrings_stats[] = { { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, - { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) } + { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, + { "rx_header_split", E1000_STAT(rx_hdr_split) }, }; #define E1000_STATS_LEN \ sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) @@ -546,8 +547,10 @@ e1000_set_eeprom(struct net_device *netdev, ret_val = e1000_write_eeprom(hw, first_word, last_word - first_word + 1, eeprom_buff); - /* Update the checksum over the first part of the EEPROM if needed */ - if((ret_val == 0) && first_word <= EEPROM_CHECKSUM_REG) + /* Update the checksum over the first part of the EEPROM if needed + * and flush shadow RAM for 82573 conrollers */ + if((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) || + (hw->mac_type == e1000_82573))) e1000_update_eeprom_checksum(hw); kfree(eeprom_buff); @@ -576,8 +579,8 @@ e1000_get_ringparam(struct net_device *netdev, { struct e1000_adapter *adapter = netdev_priv(netdev); e1000_mac_type mac_type = adapter->hw.mac_type; - struct e1000_desc_ring *txdr = &adapter->tx_ring; - struct e1000_desc_ring *rxdr = &adapter->rx_ring; + struct e1000_tx_ring *txdr = adapter->tx_ring; + struct e1000_rx_ring *rxdr = adapter->rx_ring; ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD : E1000_MAX_82544_RXD; @@ -597,20 +600,40 @@ e1000_set_ringparam(struct net_device *netdev, { struct e1000_adapter *adapter = netdev_priv(netdev); e1000_mac_type mac_type = adapter->hw.mac_type; - struct e1000_desc_ring *txdr = &adapter->tx_ring; - struct e1000_desc_ring *rxdr = &adapter->rx_ring; - struct e1000_desc_ring tx_old, tx_new, rx_old, rx_new; - int err; + struct e1000_tx_ring *txdr, *tx_old, *tx_new; + struct e1000_rx_ring *rxdr, *rx_old, *rx_new; + int i, err, tx_ring_size, rx_ring_size; + + tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_queues; + rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_queues; + + if (netif_running(adapter->netdev)) + e1000_down(adapter); tx_old = adapter->tx_ring; rx_old = adapter->rx_ring; + adapter->tx_ring = kmalloc(tx_ring_size, GFP_KERNEL); + if (!adapter->tx_ring) { + err = -ENOMEM; + goto err_setup_rx; + } + memset(adapter->tx_ring, 0, tx_ring_size); + + adapter->rx_ring = kmalloc(rx_ring_size, GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + err = -ENOMEM; + goto err_setup_rx; + } + memset(adapter->rx_ring, 0, rx_ring_size); + + txdr = adapter->tx_ring; + rxdr = adapter->rx_ring; + if((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - if(netif_running(adapter->netdev)) - e1000_down(adapter); - rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD); rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD)); @@ -621,11 +644,16 @@ e1000_set_ringparam(struct net_device *netdev, E1000_MAX_TXD : E1000_MAX_82544_TXD)); E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); + for (i = 0; i < adapter->num_queues; i++) { + txdr[i].count = txdr->count; + rxdr[i].count = rxdr->count; + } + if(netif_running(adapter->netdev)) { /* Try to get new resources before deleting old */ - if((err = e1000_setup_rx_resources(adapter))) + if ((err = e1000_setup_all_rx_resources(adapter))) goto err_setup_rx; - if((err = e1000_setup_tx_resources(adapter))) + if ((err = e1000_setup_all_tx_resources(adapter))) goto err_setup_tx; /* save the new, restore the old in order to free it, @@ -635,8 +663,10 @@ e1000_set_ringparam(struct net_device *netdev, tx_new = adapter->tx_ring; adapter->rx_ring = rx_old; adapter->tx_ring = tx_old; - e1000_free_rx_resources(adapter); - e1000_free_tx_resources(adapter); + e1000_free_all_rx_resources(adapter); + e1000_free_all_tx_resources(adapter); + kfree(tx_old); + kfree(rx_old); adapter->rx_ring = rx_new; adapter->tx_ring = tx_new; if((err = e1000_up(adapter))) @@ -645,7 +675,7 @@ e1000_set_ringparam(struct net_device *netdev, return 0; err_setup_tx: - e1000_free_rx_resources(adapter); + e1000_free_all_rx_resources(adapter); err_setup_rx: adapter->rx_ring = rx_old; adapter->tx_ring = tx_old; @@ -696,6 +726,11 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) * Some bits that get toggled are ignored. */ switch (adapter->hw.mac_type) { + /* there are several bits on newer hardware that are r/w */ + case e1000_82571: + case e1000_82572: + toggle = 0x7FFFF3FF; + break; case e1000_82573: toggle = 0x7FFFF033; break; @@ -898,8 +933,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) static void e1000_free_desc_rings(struct e1000_adapter *adapter) { - struct e1000_desc_ring *txdr = &adapter->test_tx_ring; - struct e1000_desc_ring *rxdr = &adapter->test_rx_ring; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; struct pci_dev *pdev = adapter->pdev; int i; @@ -941,8 +976,8 @@ e1000_free_desc_rings(struct e1000_adapter *adapter) static int e1000_setup_desc_rings(struct e1000_adapter *adapter) { - struct e1000_desc_ring *txdr = &adapter->test_tx_ring; - struct e1000_desc_ring *rxdr = &adapter->test_rx_ring; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; struct pci_dev *pdev = adapter->pdev; uint32_t rctl; int size, i, ret_val; @@ -1245,6 +1280,8 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter) case e1000_82541_rev_2: case e1000_82547: case e1000_82547_rev_2: + case e1000_82571: + case e1000_82572: case e1000_82573: return e1000_integrated_phy_loopback(adapter); break; @@ -1340,8 +1377,8 @@ e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) static int e1000_run_loopback_test(struct e1000_adapter *adapter) { - struct e1000_desc_ring *txdr = &adapter->test_tx_ring; - struct e1000_desc_ring *rxdr = &adapter->test_rx_ring; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; struct pci_dev *pdev = adapter->pdev; int i, j, k, l, lc, good_cnt, ret_val=0; unsigned long time; @@ -1509,6 +1546,7 @@ e1000_diag_test(struct net_device *netdev, data[2] = 0; data[3] = 0; } + msleep_interruptible(4 * 1000); } static void @@ -1625,7 +1663,7 @@ e1000_phys_id(struct net_device *netdev, uint32_t data) if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ)) data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ); - if(adapter->hw.mac_type < e1000_82573) { + if(adapter->hw.mac_type < e1000_82571) { if(!adapter->blink_timer.function) { init_timer(&adapter->blink_timer); adapter->blink_timer.function = e1000_led_blink_callback; @@ -1739,6 +1777,7 @@ struct ethtool_ops e1000_ethtool_ops = { .phys_id = e1000_phys_id, .get_stats_count = e1000_get_stats_count, .get_ethtool_stats = e1000_get_ethtool_stats, + .get_perm_addr = ethtool_op_get_perm_addr, }; void e1000_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 045f5426ab9a..8fc876da43b4 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c @@ -83,14 +83,14 @@ uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = static const uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] = - { 8, 13, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, - 22, 24, 27, 30, 32, 35, 37, 40, 42, 44, 47, 49, 51, 54, 56, 58, - 32, 35, 38, 41, 44, 47, 50, 53, 55, 58, 61, 63, 66, 69, 71, 74, - 43, 47, 51, 54, 58, 61, 64, 67, 71, 74, 77, 80, 82, 85, 88, 90, - 57, 62, 66, 70, 74, 77, 81, 85, 88, 91, 94, 97, 100, 103, 106, 108, - 73, 78, 82, 87, 91, 95, 98, 102, 105, 109, 112, 114, 117, 119, 122, 124, - 91, 96, 101, 105, 109, 113, 116, 119, 122, 125, 127, 128, 128, 128, 128, 128, - 108, 113, 117, 121, 124, 127, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}; + { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, + 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, + 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, + 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, + 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, + 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, + 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124, + 104, 109, 114, 118, 121, 124}; /****************************************************************************** @@ -286,7 +286,6 @@ e1000_set_mac_type(struct e1000_hw *hw) case E1000_DEV_ID_82546GB_FIBER: case E1000_DEV_ID_82546GB_SERDES: case E1000_DEV_ID_82546GB_PCIE: - case E1000_DEV_ID_82546GB_QUAD_COPPER: hw->mac_type = e1000_82546_rev_3; break; case E1000_DEV_ID_82541EI: @@ -305,8 +304,19 @@ e1000_set_mac_type(struct e1000_hw *hw) case E1000_DEV_ID_82547GI: hw->mac_type = e1000_82547_rev_2; break; + case E1000_DEV_ID_82571EB_COPPER: + case E1000_DEV_ID_82571EB_FIBER: + case E1000_DEV_ID_82571EB_SERDES: + hw->mac_type = e1000_82571; + break; + case E1000_DEV_ID_82572EI_COPPER: + case E1000_DEV_ID_82572EI_FIBER: + case E1000_DEV_ID_82572EI_SERDES: + hw->mac_type = e1000_82572; + break; case E1000_DEV_ID_82573E: case E1000_DEV_ID_82573E_IAMT: + case E1000_DEV_ID_82573L: hw->mac_type = e1000_82573; break; default: @@ -315,6 +325,8 @@ e1000_set_mac_type(struct e1000_hw *hw) } switch(hw->mac_type) { + case e1000_82571: + case e1000_82572: case e1000_82573: hw->eeprom_semaphore_present = TRUE; /* fall through */ @@ -351,6 +363,8 @@ e1000_set_media_type(struct e1000_hw *hw) switch (hw->device_id) { case E1000_DEV_ID_82545GM_SERDES: case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82572EI_SERDES: hw->media_type = e1000_media_type_internal_serdes; break; default: @@ -523,6 +537,8 @@ e1000_reset_hw(struct e1000_hw *hw) E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); E1000_WRITE_FLUSH(hw); /* fall through */ + case e1000_82571: + case e1000_82572: ret_val = e1000_get_auto_rd_done(hw); if(ret_val) /* We don't want to continue accessing MAC registers. */ @@ -683,6 +699,9 @@ e1000_init_hw(struct e1000_hw *hw) switch (hw->mac_type) { default: break; + case e1000_82571: + case e1000_82572: + ctrl |= (1 << 22); case e1000_82573: ctrl |= E1000_TXDCTL_COUNT_DESC; break; @@ -694,6 +713,26 @@ e1000_init_hw(struct e1000_hw *hw) e1000_enable_tx_pkt_filtering(hw); } + switch (hw->mac_type) { + default: + break; + case e1000_82571: + case e1000_82572: + ctrl = E1000_READ_REG(hw, TXDCTL1); + ctrl &= ~E1000_TXDCTL_WTHRESH; + ctrl |= E1000_TXDCTL_COUNT_DESC | E1000_TXDCTL_FULL_TX_DESC_WB; + ctrl |= (1 << 22); + E1000_WRITE_REG(hw, TXDCTL1, ctrl); + break; + } + + + + if (hw->mac_type == e1000_82573) { + uint32_t gcr = E1000_READ_REG(hw, GCR); + gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; + E1000_WRITE_REG(hw, GCR, gcr); + } /* Clear all of the statistics registers (clear on read). It is * important that we do this after we have tried to establish link @@ -878,6 +917,14 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw) DEBUGFUNC("e1000_setup_fiber_serdes_link"); + /* On 82571 and 82572 Fiber connections, SerDes loopback mode persists + * until explicitly turned off or a power cycle is performed. A read to + * the register does not indicate its status. Therefore, we ensure + * loopback mode is disabled during initialization. + */ + if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) + E1000_WRITE_REG(hw, SCTL, E1000_DISABLE_SERDES_LOOPBACK); + /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be * set when the optics detect a signal. On older adapters, it will be * cleared when there is a signal. This applies to fiber media only. @@ -2943,6 +2990,8 @@ e1000_phy_reset(struct e1000_hw *hw) switch (hw->mac_type) { case e1000_82541_rev_2: + case e1000_82571: + case e1000_82572: ret_val = e1000_phy_hw_reset(hw); if(ret_val) return ret_val; @@ -2981,6 +3030,16 @@ e1000_detect_gig_phy(struct e1000_hw *hw) DEBUGFUNC("e1000_detect_gig_phy"); + /* The 82571 firmware may still be configuring the PHY. In this + * case, we cannot access the PHY until the configuration is done. So + * we explicitly set the PHY values. */ + if(hw->mac_type == e1000_82571 || + hw->mac_type == e1000_82572) { + hw->phy_id = IGP01E1000_I_PHY_ID; + hw->phy_type = e1000_phy_igp_2; + return E1000_SUCCESS; + } + /* Read the PHY ID Registers to identify which PHY is onboard. */ ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); if(ret_val) @@ -3334,6 +3393,21 @@ e1000_init_eeprom_params(struct e1000_hw *hw) eeprom->use_eerd = FALSE; eeprom->use_eewr = FALSE; break; + case e1000_82571: + case e1000_82572: + eeprom->type = e1000_eeprom_spi; + eeprom->opcode_bits = 8; + eeprom->delay_usec = 1; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->page_size = 32; + eeprom->address_bits = 16; + } else { + eeprom->page_size = 8; + eeprom->address_bits = 8; + } + eeprom->use_eerd = FALSE; + eeprom->use_eewr = FALSE; + break; case e1000_82573: eeprom->type = e1000_eeprom_spi; eeprom->opcode_bits = 8; @@ -3543,25 +3617,26 @@ e1000_acquire_eeprom(struct e1000_hw *hw) eecd = E1000_READ_REG(hw, EECD); if (hw->mac_type != e1000_82573) { - /* Request EEPROM Access */ - if(hw->mac_type > e1000_82544) { - eecd |= E1000_EECD_REQ; - E1000_WRITE_REG(hw, EECD, eecd); - eecd = E1000_READ_REG(hw, EECD); - while((!(eecd & E1000_EECD_GNT)) && - (i < E1000_EEPROM_GRANT_ATTEMPTS)) { - i++; - udelay(5); - eecd = E1000_READ_REG(hw, EECD); - } - if(!(eecd & E1000_EECD_GNT)) { - eecd &= ~E1000_EECD_REQ; + /* Request EEPROM Access */ + if(hw->mac_type > e1000_82544) { + eecd |= E1000_EECD_REQ; E1000_WRITE_REG(hw, EECD, eecd); - DEBUGOUT("Could not acquire EEPROM grant\n"); - return -E1000_ERR_EEPROM; + eecd = E1000_READ_REG(hw, EECD); + while((!(eecd & E1000_EECD_GNT)) && + (i < E1000_EEPROM_GRANT_ATTEMPTS)) { + i++; + udelay(5); + eecd = E1000_READ_REG(hw, EECD); + } + if(!(eecd & E1000_EECD_GNT)) { + eecd &= ~E1000_EECD_REQ; + E1000_WRITE_REG(hw, EECD, eecd); + DEBUGOUT("Could not acquire EEPROM grant\n"); + e1000_put_hw_eeprom_semaphore(hw); + return -E1000_ERR_EEPROM; + } } } - } /* Setup EEPROM for Read/Write */ @@ -4064,7 +4139,7 @@ e1000_write_eeprom(struct e1000_hw *hw, return -E1000_ERR_EEPROM; } - /* 82573 reads only through eerd */ + /* 82573 writes only through eewr */ if(eeprom->use_eewr == TRUE) return e1000_write_eeprom_eewr(hw, offset, words, data); @@ -4353,9 +4428,16 @@ e1000_read_mac_addr(struct e1000_hw * hw) hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF); hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8); } - if(((hw->mac_type == e1000_82546) || (hw->mac_type == e1000_82546_rev_3)) && - (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) + switch (hw->mac_type) { + default: + break; + case e1000_82546: + case e1000_82546_rev_3: + case e1000_82571: + if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) hw->perm_mac_addr[5] ^= 0x01; + break; + } for(i = 0; i < NODE_ADDRESS_SIZE; i++) hw->mac_addr[i] = hw->perm_mac_addr[i]; @@ -4385,6 +4467,12 @@ e1000_init_rx_addrs(struct e1000_hw *hw) e1000_rar_set(hw, hw->mac_addr, 0); rar_num = E1000_RAR_ENTRIES; + + /* Reserve a spot for the Locally Administered Address to work around + * an 82571 issue in which a reset on one port will reload the MAC on + * the other port. */ + if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE)) + rar_num -= 1; /* Zero out the other 15 receive addresses. */ DEBUGOUT("Clearing RAR[1-15]\n"); for(i = 1; i < rar_num; i++) { @@ -4427,6 +4515,12 @@ e1000_mc_addr_list_update(struct e1000_hw *hw, /* Clear RAR[1-15] */ DEBUGOUT(" Clearing RAR[1-15]\n"); num_rar_entry = E1000_RAR_ENTRIES; + /* Reserve a spot for the Locally Administered Address to work around + * an 82571 issue in which a reset on one port will reload the MAC on + * the other port. */ + if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE)) + num_rar_entry -= 1; + for(i = rar_used_count; i < num_rar_entry; i++) { E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); @@ -4984,7 +5078,6 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw) temp = E1000_READ_REG(hw, ICTXQEC); temp = E1000_READ_REG(hw, ICTXQMTC); temp = E1000_READ_REG(hw, ICRXDMTC); - } /****************************************************************************** @@ -5151,6 +5244,8 @@ e1000_get_bus_info(struct e1000_hw *hw) hw->bus_speed = e1000_bus_speed_unknown; hw->bus_width = e1000_bus_width_unknown; break; + case e1000_82571: + case e1000_82572: case e1000_82573: hw->bus_type = e1000_bus_type_pci_express; hw->bus_speed = e1000_bus_speed_2500; @@ -5250,6 +5345,7 @@ e1000_get_cable_length(struct e1000_hw *hw, int32_t ret_val; uint16_t agc_value = 0; uint16_t cur_agc, min_agc = IGP01E1000_AGC_LENGTH_TABLE_SIZE; + uint16_t max_agc = 0; uint16_t i, phy_data; uint16_t cable_length; @@ -5338,6 +5434,40 @@ e1000_get_cable_length(struct e1000_hw *hw, IGP01E1000_AGC_RANGE) : 0; *max_length = e1000_igp_cable_length_table[agc_value] + IGP01E1000_AGC_RANGE; + } else if (hw->phy_type == e1000_phy_igp_2) { + uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = + {IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D}; + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + /* Getting bits 15:9, which represent the combination of course and + * fine gain values. The result is a number that can be put into + * the lookup table to obtain the approximate cable length. */ + cur_agc = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK; + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc] > e1000_igp_2_cable_length_table[cur_agc]) + min_agc = cur_agc; + if (e1000_igp_2_cable_length_table[max_agc] < e1000_igp_2_cable_length_table[cur_agc]) + max_agc = cur_agc; + + agc_value += e1000_igp_2_cable_length_table[cur_agc]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc] + e1000_igp_2_cable_length_table[max_agc]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + *min_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0; + *max_length = agc_value + IGP02E1000_AGC_RANGE; } return E1000_SUCCESS; @@ -6465,6 +6595,8 @@ e1000_get_auto_rd_done(struct e1000_hw *hw) default: msec_delay(5); break; + case e1000_82571: + case e1000_82572: case e1000_82573: while(timeout) { if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break; @@ -6494,10 +6626,31 @@ e1000_get_auto_rd_done(struct e1000_hw *hw) int32_t e1000_get_phy_cfg_done(struct e1000_hw *hw) { + int32_t timeout = PHY_CFG_TIMEOUT; + uint32_t cfg_mask = E1000_EEPROM_CFG_DONE; + DEBUGFUNC("e1000_get_phy_cfg_done"); - /* Simply wait for 10ms */ - msec_delay(10); + switch (hw->mac_type) { + default: + msec_delay(10); + break; + case e1000_82571: + case e1000_82572: + while (timeout) { + if (E1000_READ_REG(hw, EEMNGCTL) & cfg_mask) + break; + else + msec_delay(1); + timeout--; + } + + if (!timeout) { + DEBUGOUT("MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + break; + } return E1000_SUCCESS; } @@ -6569,8 +6722,7 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) return; swsm = E1000_READ_REG(hw, SWSM); - /* Release both semaphores. */ - swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + swsm &= ~(E1000_SWSM_SWESMBI); E1000_WRITE_REG(hw, SWSM, swsm); } @@ -6606,6 +6758,8 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw) * if this is the case. We read FWSM to determine the manageability mode. */ switch (hw->mac_type) { + case e1000_82571: + case e1000_82572: case e1000_82573: fwsm = E1000_READ_REG(hw, FWSM); if((fwsm & E1000_FWSM_MODE_MASK) != 0) diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index 51c2b3a18b6f..4f2c196dc314 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h @@ -57,6 +57,8 @@ typedef enum { e1000_82541_rev_2, e1000_82547, e1000_82547_rev_2, + e1000_82571, + e1000_82572, e1000_82573, e1000_num_macs } e1000_mac_type; @@ -478,10 +480,16 @@ uint8_t e1000_arc_subsystem_valid(struct e1000_hw *hw); #define E1000_DEV_ID_82546GB_SERDES 0x107B #define E1000_DEV_ID_82546GB_PCIE 0x108A #define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82571EB_COPPER 0x105E +#define E1000_DEV_ID_82571EB_FIBER 0x105F +#define E1000_DEV_ID_82571EB_SERDES 0x1060 +#define E1000_DEV_ID_82572EI_COPPER 0x107D +#define E1000_DEV_ID_82572EI_FIBER 0x107E +#define E1000_DEV_ID_82572EI_SERDES 0x107F #define E1000_DEV_ID_82573E 0x108B #define E1000_DEV_ID_82573E_IAMT 0x108C +#define E1000_DEV_ID_82573L 0x109A -#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 #define NODE_ADDRESS_SIZE 6 #define ETH_LENGTH_OF_ADDRESS 6 @@ -833,6 +841,8 @@ struct e1000_ffvt_entry { #define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX #define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_DISABLE_SERDES_LOOPBACK 0x0400 + /* Register Set. (82543, 82544) * * Registers are defined to be 32 bits and should be accessed as 32 bit values. @@ -853,6 +863,7 @@ struct e1000_ffvt_entry { #define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ #define E1000_FLA 0x0001C /* Flash Access - RW */ #define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ #define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ #define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ #define E1000_FCT 0x00030 /* Flow Control Type - RW */ @@ -864,6 +875,12 @@ struct e1000_ffvt_entry { #define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ #define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ #define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ +#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ +#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ +#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ +#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ +#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ #define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ #define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ #define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ @@ -895,6 +912,12 @@ struct e1000_ffvt_entry { #define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ #define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ #define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ +#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */ +#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */ +#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */ +#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ +#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ +#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ #define E1000_RXDCTL 0x02828 /* RX Descriptor Control - RW */ #define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ #define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ @@ -980,15 +1003,15 @@ struct e1000_ffvt_entry { #define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ #define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ #define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ -#define E1000_IAC 0x4100 /* Interrupt Assertion Count */ -#define E1000_ICRXPTC 0x4104 /* Interrupt Cause Rx Packet Timer Expire Count */ -#define E1000_ICRXATC 0x4108 /* Interrupt Cause Rx Absolute Timer Expire Count */ -#define E1000_ICTXPTC 0x410C /* Interrupt Cause Tx Packet Timer Expire Count */ -#define E1000_ICTXATC 0x4110 /* Interrupt Cause Tx Absolute Timer Expire Count */ -#define E1000_ICTXQEC 0x4118 /* Interrupt Cause Tx Queue Empty Count */ -#define E1000_ICTXQMTC 0x411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ -#define E1000_ICRXDMTC 0x4120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ -#define E1000_ICRXOC 0x4124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ #define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ #define E1000_RFCTL 0x05008 /* Receive Filter Control*/ #define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ @@ -1018,6 +1041,14 @@ struct e1000_ffvt_entry { #define E1000_FWSM 0x05B54 /* FW Semaphore */ #define E1000_FFLT_DBG 0x05F04 /* Debug Register */ #define E1000_HICR 0x08F00 /* Host Inteface Control */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ +#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ /* Register Set (82542) * * Some of the 82542 registers are located at different offsets than they are @@ -1032,6 +1063,7 @@ struct e1000_ffvt_entry { #define E1000_82542_CTRL_EXT E1000_CTRL_EXT #define E1000_82542_FLA E1000_FLA #define E1000_82542_MDIC E1000_MDIC +#define E1000_82542_SCTL E1000_SCTL #define E1000_82542_FCAL E1000_FCAL #define E1000_82542_FCAH E1000_FCAH #define E1000_82542_FCT E1000_FCT @@ -1049,6 +1081,18 @@ struct e1000_ffvt_entry { #define E1000_82542_RDLEN 0x00118 #define E1000_82542_RDH 0x00120 #define E1000_82542_RDT 0x00128 +#define E1000_82542_RDTR0 E1000_82542_RDTR +#define E1000_82542_RDBAL0 E1000_82542_RDBAL +#define E1000_82542_RDBAH0 E1000_82542_RDBAH +#define E1000_82542_RDLEN0 E1000_82542_RDLEN +#define E1000_82542_RDH0 E1000_82542_RDH +#define E1000_82542_RDT0 E1000_82542_RDT +#define E1000_82542_RDTR1 0x00130 +#define E1000_82542_RDBAL1 0x00138 +#define E1000_82542_RDBAH1 0x0013C +#define E1000_82542_RDLEN1 0x00140 +#define E1000_82542_RDH1 0x00148 +#define E1000_82542_RDT1 0x00150 #define E1000_82542_FCRTH 0x00160 #define E1000_82542_FCRTL 0x00168 #define E1000_82542_FCTTV E1000_FCTTV @@ -1197,6 +1241,13 @@ struct e1000_ffvt_entry { #define E1000_82542_ICRXOC E1000_ICRXOC #define E1000_82542_HICR E1000_HICR +#define E1000_82542_CPUVEC E1000_CPUVEC +#define E1000_82542_MRQC E1000_MRQC +#define E1000_82542_RETA E1000_RETA +#define E1000_82542_RSSRK E1000_RSSRK +#define E1000_82542_RSSIM E1000_RSSIM +#define E1000_82542_RSSIR E1000_RSSIR + /* Statistics counters collected by the MAC */ struct e1000_hw_stats { uint64_t crcerrs; @@ -1336,6 +1387,7 @@ struct e1000_hw { boolean_t serdes_link_down; boolean_t tbi_compatibility_en; boolean_t tbi_compatibility_on; + boolean_t laa_is_present; boolean_t phy_reset_disable; boolean_t fc_send_xon; boolean_t fc_strict_ieee; @@ -1374,6 +1426,7 @@ struct e1000_hw { #define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ #define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ #define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ #define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ @@ -1491,6 +1544,8 @@ struct e1000_hw { #define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 #define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 #define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 +#define E1000_CTRL_EXT_CANC 0x04000000 /* Interrupt delay cancellation */ +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ #define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ #define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ @@ -1524,6 +1579,7 @@ struct e1000_hw { #define E1000_LEDCTL_LED2_BLINK 0x00800000 #define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 #define E1000_LEDCTL_LED3_MODE_SHIFT 24 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 #define E1000_LEDCTL_LED3_IVRT 0x40000000 #define E1000_LEDCTL_LED3_BLINK 0x80000000 @@ -1784,6 +1840,16 @@ struct e1000_hw { #define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ #define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ +/* Multiple Receive Queue Control */ +#define E1000_MRQC_ENABLE_MASK 0x00000003 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 /* Definitions for power management and wakeup registers */ /* Wake Up Control */ @@ -1928,6 +1994,7 @@ struct e1000_host_command_info { #define E1000_MDALIGN 4096 #define E1000_GCR_BEM32 0x00400000 +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 /* Function Active and Power State to MNG */ #define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 #define E1000_FACTPS_LAN0_VALID 0x00000004 @@ -1980,6 +2047,7 @@ struct e1000_host_command_info { /* EEPROM Word Offsets */ #define EEPROM_COMPAT 0x0003 #define EEPROM_ID_LED_SETTINGS 0x0004 +#define EEPROM_VERSION 0x0005 #define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ #define EEPROM_PHY_CLASS_WORD 0x0007 #define EEPROM_INIT_CONTROL1_REG 0x000A @@ -1990,6 +2058,8 @@ struct e1000_host_command_info { #define EEPROM_FLASH_VERSION 0x0032 #define EEPROM_CHECKSUM_REG 0x003F +#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ + /* Word definitions for ID LED Settings */ #define ID_LED_RESERVED_0000 0x0000 #define ID_LED_RESERVED_FFFF 0xFFFF @@ -2108,6 +2178,8 @@ struct e1000_host_command_info { #define E1000_PBA_22K 0x0016 #define E1000_PBA_24K 0x0018 #define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_38K 0x0026 #define E1000_PBA_40K 0x0028 #define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ @@ -2592,11 +2664,11 @@ struct e1000_host_command_info { /* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ #define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 -#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 128 +#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113 /* The precision error of the cable length is +/- 10 meters */ #define IGP01E1000_AGC_RANGE 10 -#define IGP02E1000_AGC_RANGE 10 +#define IGP02E1000_AGC_RANGE 15 /* IGP01E1000 PCS Initialization register */ /* bits 3:6 in the PCS registers stores the channels polarity */ diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index ee687c902a20..6b72f6acdd54 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -43,7 +43,7 @@ char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; #else #define DRIVERNAPI "-NAPI" #endif -#define DRV_VERSION "6.0.60-k2"DRIVERNAPI +#define DRV_VERSION "6.1.16-k2"DRIVERNAPI char e1000_driver_version[] = DRV_VERSION; char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; @@ -80,6 +80,9 @@ static struct pci_device_id e1000_pci_tbl[] = { INTEL_E1000_ETHERNET_DEVICE(0x1026), INTEL_E1000_ETHERNET_DEVICE(0x1027), INTEL_E1000_ETHERNET_DEVICE(0x1028), + INTEL_E1000_ETHERNET_DEVICE(0x105E), + INTEL_E1000_ETHERNET_DEVICE(0x105F), + INTEL_E1000_ETHERNET_DEVICE(0x1060), INTEL_E1000_ETHERNET_DEVICE(0x1075), INTEL_E1000_ETHERNET_DEVICE(0x1076), INTEL_E1000_ETHERNET_DEVICE(0x1077), @@ -88,10 +91,13 @@ static struct pci_device_id e1000_pci_tbl[] = { INTEL_E1000_ETHERNET_DEVICE(0x107A), INTEL_E1000_ETHERNET_DEVICE(0x107B), INTEL_E1000_ETHERNET_DEVICE(0x107C), + INTEL_E1000_ETHERNET_DEVICE(0x107D), + INTEL_E1000_ETHERNET_DEVICE(0x107E), + INTEL_E1000_ETHERNET_DEVICE(0x107F), INTEL_E1000_ETHERNET_DEVICE(0x108A), INTEL_E1000_ETHERNET_DEVICE(0x108B), INTEL_E1000_ETHERNET_DEVICE(0x108C), - INTEL_E1000_ETHERNET_DEVICE(0x1099), + INTEL_E1000_ETHERNET_DEVICE(0x109A), /* required last entry */ {0,} }; @@ -102,10 +108,18 @@ int e1000_up(struct e1000_adapter *adapter); void e1000_down(struct e1000_adapter *adapter); void e1000_reset(struct e1000_adapter *adapter); int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); -int e1000_setup_tx_resources(struct e1000_adapter *adapter); -int e1000_setup_rx_resources(struct e1000_adapter *adapter); -void e1000_free_tx_resources(struct e1000_adapter *adapter); -void e1000_free_rx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr); +int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr); +void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); void e1000_update_stats(struct e1000_adapter *adapter); /* Local Function Prototypes */ @@ -114,14 +128,22 @@ static int e1000_init_module(void); static void e1000_exit_module(void); static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void __devexit e1000_remove(struct pci_dev *pdev); +static int e1000_alloc_queues(struct e1000_adapter *adapter); +#ifdef CONFIG_E1000_MQ +static void e1000_setup_queue_mapping(struct e1000_adapter *adapter); +#endif static int e1000_sw_init(struct e1000_adapter *adapter); static int e1000_open(struct net_device *netdev); static int e1000_close(struct net_device *netdev); static void e1000_configure_tx(struct e1000_adapter *adapter); static void e1000_configure_rx(struct e1000_adapter *adapter); static void e1000_setup_rctl(struct e1000_adapter *adapter); -static void e1000_clean_tx_ring(struct e1000_adapter *adapter); -static void e1000_clean_rx_ring(struct e1000_adapter *adapter); +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); static void e1000_set_multi(struct net_device *netdev); static void e1000_update_phy_info(unsigned long data); static void e1000_watchdog(unsigned long data); @@ -132,19 +154,26 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev); static int e1000_change_mtu(struct net_device *netdev, int new_mtu); static int e1000_set_mac(struct net_device *netdev, void *p); static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs); -static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter); +static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); #ifdef CONFIG_E1000_NAPI -static int e1000_clean(struct net_device *netdev, int *budget); +static int e1000_clean(struct net_device *poll_dev, int *budget); static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, int *work_done, int work_to_do); static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, int *work_done, int work_to_do); #else -static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter); -static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter); +static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); #endif -static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter); -static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter); +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); @@ -172,6 +201,11 @@ static int e1000_resume(struct pci_dev *pdev); static void e1000_netpoll (struct net_device *netdev); #endif +#ifdef CONFIG_E1000_MQ +/* for multiple Rx queues */ +void e1000_rx_schedule(void *data); +#endif + /* Exported from other modules */ extern void e1000_check_options(struct e1000_adapter *adapter); @@ -289,7 +323,7 @@ int e1000_up(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; - int err; + int i, err; /* hardware has been reset, we need to reload some things */ @@ -308,7 +342,8 @@ e1000_up(struct e1000_adapter *adapter) e1000_configure_tx(adapter); e1000_setup_rctl(adapter); e1000_configure_rx(adapter); - adapter->alloc_rx_buf(adapter); + for (i = 0; i < adapter->num_queues; i++) + adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]); #ifdef CONFIG_PCI_MSI if(adapter->hw.mac_type > e1000_82547_rev_2) { @@ -344,6 +379,9 @@ e1000_down(struct e1000_adapter *adapter) struct net_device *netdev = adapter->netdev; e1000_irq_disable(adapter); +#ifdef CONFIG_E1000_MQ + while (atomic_read(&adapter->rx_sched_call_data.count) != 0); +#endif free_irq(adapter->pdev->irq, netdev); #ifdef CONFIG_PCI_MSI if(adapter->hw.mac_type > e1000_82547_rev_2 && @@ -363,11 +401,10 @@ e1000_down(struct e1000_adapter *adapter) netif_stop_queue(netdev); e1000_reset(adapter); - e1000_clean_tx_ring(adapter); - e1000_clean_rx_ring(adapter); + e1000_clean_all_tx_rings(adapter); + e1000_clean_all_rx_rings(adapter); - /* If WoL is not enabled - * and management mode is not IAMT + /* If WoL is not enabled and management mode is not IAMT * Power down the PHY so no link is implied when interface is down */ if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 && adapter->hw.media_type == e1000_media_type_copper && @@ -398,6 +435,10 @@ e1000_reset(struct e1000_adapter *adapter) case e1000_82547_rev_2: pba = E1000_PBA_30K; break; + case e1000_82571: + case e1000_82572: + pba = E1000_PBA_38K; + break; case e1000_82573: pba = E1000_PBA_12K; break; @@ -475,6 +516,7 @@ e1000_probe(struct pci_dev *pdev, struct net_device *netdev; struct e1000_adapter *adapter; unsigned long mmio_start, mmio_len; + uint32_t ctrl_ext; uint32_t swsm; static int cards_found = 0; @@ -614,8 +656,9 @@ e1000_probe(struct pci_dev *pdev, if(e1000_read_mac_addr(&adapter->hw)) DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len); - if(!is_valid_ether_addr(netdev->dev_addr)) { + if(!is_valid_ether_addr(netdev->perm_addr)) { DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); err = -EIO; goto err_eeprom; @@ -687,6 +730,12 @@ e1000_probe(struct pci_dev *pdev, /* Let firmware know the driver has taken over */ switch(adapter->hw.mac_type) { + case e1000_82571: + case e1000_82572: + ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); + E1000_WRITE_REG(&adapter->hw, CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + break; case e1000_82573: swsm = E1000_READ_REG(&adapter->hw, SWSM); E1000_WRITE_REG(&adapter->hw, SWSM, @@ -731,7 +780,11 @@ e1000_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); + uint32_t ctrl_ext; uint32_t manc, swsm; +#ifdef CONFIG_E1000_NAPI + int i; +#endif flush_scheduled_work(); @@ -745,6 +798,12 @@ e1000_remove(struct pci_dev *pdev) } switch(adapter->hw.mac_type) { + case e1000_82571: + case e1000_82572: + ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); + E1000_WRITE_REG(&adapter->hw, CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + break; case e1000_82573: swsm = E1000_READ_REG(&adapter->hw, SWSM); E1000_WRITE_REG(&adapter->hw, SWSM, @@ -756,13 +815,27 @@ e1000_remove(struct pci_dev *pdev) } unregister_netdev(netdev); +#ifdef CONFIG_E1000_NAPI + for (i = 0; i < adapter->num_queues; i++) + __dev_put(&adapter->polling_netdev[i]); +#endif if(!e1000_check_phy_reset_block(&adapter->hw)) e1000_phy_hw_reset(&adapter->hw); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +#ifdef CONFIG_E1000_NAPI + kfree(adapter->polling_netdev); +#endif + iounmap(adapter->hw.hw_addr); pci_release_regions(pdev); +#ifdef CONFIG_E1000_MQ + free_percpu(adapter->cpu_netdev); + free_percpu(adapter->cpu_tx_ring); +#endif free_netdev(netdev); pci_disable_device(pdev); @@ -783,6 +856,9 @@ e1000_sw_init(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; +#ifdef CONFIG_E1000_NAPI + int i; +#endif /* PCI config space info */ @@ -840,13 +916,122 @@ e1000_sw_init(struct e1000_adapter *adapter) hw->master_slave = E1000_MASTER_SLAVE; } +#ifdef CONFIG_E1000_MQ + /* Number of supported queues */ + switch (hw->mac_type) { + case e1000_82571: + case e1000_82572: + adapter->num_queues = 2; + break; + default: + adapter->num_queues = 1; + break; + } + adapter->num_queues = min(adapter->num_queues, num_online_cpus()); +#else + adapter->num_queues = 1; +#endif + + if (e1000_alloc_queues(adapter)) { + DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + +#ifdef CONFIG_E1000_NAPI + for (i = 0; i < adapter->num_queues; i++) { + adapter->polling_netdev[i].priv = adapter; + adapter->polling_netdev[i].poll = &e1000_clean; + adapter->polling_netdev[i].weight = 64; + dev_hold(&adapter->polling_netdev[i]); + set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state); + } +#endif + +#ifdef CONFIG_E1000_MQ + e1000_setup_queue_mapping(adapter); +#endif + atomic_set(&adapter->irq_sem, 1); spin_lock_init(&adapter->stats_lock); - spin_lock_init(&adapter->tx_lock); return 0; } +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. The polling_netdev array is + * intended for Multiqueue, but should work fine with a single queue. + **/ + +static int __devinit +e1000_alloc_queues(struct e1000_adapter *adapter) +{ + int size; + + size = sizeof(struct e1000_tx_ring) * adapter->num_queues; + adapter->tx_ring = kmalloc(size, GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + memset(adapter->tx_ring, 0, size); + + size = sizeof(struct e1000_rx_ring) * adapter->num_queues; + adapter->rx_ring = kmalloc(size, GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + memset(adapter->rx_ring, 0, size); + +#ifdef CONFIG_E1000_NAPI + size = sizeof(struct net_device) * adapter->num_queues; + adapter->polling_netdev = kmalloc(size, GFP_KERNEL); + if (!adapter->polling_netdev) { + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + return -ENOMEM; + } + memset(adapter->polling_netdev, 0, size); +#endif + + return E1000_SUCCESS; +} + +#ifdef CONFIG_E1000_MQ +static void __devinit +e1000_setup_queue_mapping(struct e1000_adapter *adapter) +{ + int i, cpu; + + adapter->rx_sched_call_data.func = e1000_rx_schedule; + adapter->rx_sched_call_data.info = adapter->netdev; + cpus_clear(adapter->rx_sched_call_data.cpumask); + + adapter->cpu_netdev = alloc_percpu(struct net_device *); + adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *); + + lock_cpu_hotplug(); + i = 0; + for_each_online_cpu(cpu) { + *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_queues]; + /* This is incomplete because we'd like to assign separate + * physical cpus to these netdev polling structures and + * avoid saturating a subset of cpus. + */ + if (i < adapter->num_queues) { + *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i]; + adapter->cpu_for_queue[i] = cpu; + } else + *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL; + + i++; + } + unlock_cpu_hotplug(); +} +#endif + /** * e1000_open - Called when a network interface is made active * @netdev: network interface device structure @@ -868,12 +1053,12 @@ e1000_open(struct net_device *netdev) /* allocate transmit descriptors */ - if((err = e1000_setup_tx_resources(adapter))) + if ((err = e1000_setup_all_tx_resources(adapter))) goto err_setup_tx; /* allocate receive descriptors */ - if((err = e1000_setup_rx_resources(adapter))) + if ((err = e1000_setup_all_rx_resources(adapter))) goto err_setup_rx; if((err = e1000_up(adapter))) @@ -887,9 +1072,9 @@ e1000_open(struct net_device *netdev) return E1000_SUCCESS; err_up: - e1000_free_rx_resources(adapter); + e1000_free_all_rx_resources(adapter); err_setup_rx: - e1000_free_tx_resources(adapter); + e1000_free_all_tx_resources(adapter); err_setup_tx: e1000_reset(adapter); @@ -915,8 +1100,8 @@ e1000_close(struct net_device *netdev) e1000_down(adapter); - e1000_free_tx_resources(adapter); - e1000_free_rx_resources(adapter); + e1000_free_all_tx_resources(adapter); + e1000_free_all_rx_resources(adapter); if((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { @@ -951,14 +1136,15 @@ e1000_check_64k_bound(struct e1000_adapter *adapter, /** * e1000_setup_tx_resources - allocate Tx resources (Descriptors) * @adapter: board private structure + * @txdr: tx descriptor ring (for a specific queue) to setup * * Return 0 on success, negative on failure **/ int -e1000_setup_tx_resources(struct e1000_adapter *adapter) +e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr) { - struct e1000_desc_ring *txdr = &adapter->tx_ring; struct pci_dev *pdev = adapter->pdev; int size; @@ -970,6 +1156,7 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter) return -ENOMEM; } memset(txdr->buffer_info, 0, size); + memset(&txdr->previous_buffer_info, 0, sizeof(struct e1000_buffer)); /* round up to nearest 4K */ @@ -1018,10 +1205,40 @@ setup_tx_desc_die: txdr->next_to_use = 0; txdr->next_to_clean = 0; + spin_lock_init(&txdr->tx_lock); return 0; } +/** + * e1000_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ + +int +e1000_setup_all_tx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_queues; i++) { + err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); + if (err) { + DPRINTK(PROBE, ERR, + "Allocation for Tx Queue %u failed\n", i); + break; + } + } + + return err; +} + /** * e1000_configure_tx - Configure 8254x Transmit Unit after Reset * @adapter: board private structure @@ -1032,23 +1249,43 @@ setup_tx_desc_die: static void e1000_configure_tx(struct e1000_adapter *adapter) { - uint64_t tdba = adapter->tx_ring.dma; - uint32_t tdlen = adapter->tx_ring.count * sizeof(struct e1000_tx_desc); - uint32_t tctl, tipg; - - E1000_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL)); - E1000_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32)); - - E1000_WRITE_REG(&adapter->hw, TDLEN, tdlen); + uint64_t tdba; + struct e1000_hw *hw = &adapter->hw; + uint32_t tdlen, tctl, tipg, tarc; /* Setup the HW Tx Head and Tail descriptor pointers */ - E1000_WRITE_REG(&adapter->hw, TDH, 0); - E1000_WRITE_REG(&adapter->hw, TDT, 0); + switch (adapter->num_queues) { + case 2: + tdba = adapter->tx_ring[1].dma; + tdlen = adapter->tx_ring[1].count * + sizeof(struct e1000_tx_desc); + E1000_WRITE_REG(hw, TDBAL1, (tdba & 0x00000000ffffffffULL)); + E1000_WRITE_REG(hw, TDBAH1, (tdba >> 32)); + E1000_WRITE_REG(hw, TDLEN1, tdlen); + E1000_WRITE_REG(hw, TDH1, 0); + E1000_WRITE_REG(hw, TDT1, 0); + adapter->tx_ring[1].tdh = E1000_TDH1; + adapter->tx_ring[1].tdt = E1000_TDT1; + /* Fall Through */ + case 1: + default: + tdba = adapter->tx_ring[0].dma; + tdlen = adapter->tx_ring[0].count * + sizeof(struct e1000_tx_desc); + E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); + E1000_WRITE_REG(hw, TDBAH, (tdba >> 32)); + E1000_WRITE_REG(hw, TDLEN, tdlen); + E1000_WRITE_REG(hw, TDH, 0); + E1000_WRITE_REG(hw, TDT, 0); + adapter->tx_ring[0].tdh = E1000_TDH; + adapter->tx_ring[0].tdt = E1000_TDT; + break; + } /* Set the default values for the Tx Inter Packet Gap timer */ - switch (adapter->hw.mac_type) { + switch (hw->mac_type) { case e1000_82542_rev2_0: case e1000_82542_rev2_1: tipg = DEFAULT_82542_TIPG_IPGT; @@ -1056,67 +1293,81 @@ e1000_configure_tx(struct e1000_adapter *adapter) tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; break; default: - if(adapter->hw.media_type == e1000_media_type_fiber || - adapter->hw.media_type == e1000_media_type_internal_serdes) + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) tipg = DEFAULT_82543_TIPG_IPGT_FIBER; else tipg = DEFAULT_82543_TIPG_IPGT_COPPER; tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; } - E1000_WRITE_REG(&adapter->hw, TIPG, tipg); + E1000_WRITE_REG(hw, TIPG, tipg); /* Set the Tx Interrupt Delay register */ - E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay); - if(adapter->hw.mac_type >= e1000_82540) - E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay); + E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay); + if (hw->mac_type >= e1000_82540) + E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay); /* Program the Transmit Control Register */ - tctl = E1000_READ_REG(&adapter->hw, TCTL); + tctl = E1000_READ_REG(hw, TCTL); tctl &= ~E1000_TCTL_CT; - tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | + tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_RTLC | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); - E1000_WRITE_REG(&adapter->hw, TCTL, tctl); + E1000_WRITE_REG(hw, TCTL, tctl); - e1000_config_collision_dist(&adapter->hw); + if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) { + tarc = E1000_READ_REG(hw, TARC0); + tarc |= ((1 << 25) | (1 << 21)); + E1000_WRITE_REG(hw, TARC0, tarc); + tarc = E1000_READ_REG(hw, TARC1); + tarc |= (1 << 25); + if (tctl & E1000_TCTL_MULR) + tarc &= ~(1 << 28); + else + tarc |= (1 << 28); + E1000_WRITE_REG(hw, TARC1, tarc); + } + + e1000_config_collision_dist(hw); /* Setup Transmit Descriptor Settings for eop descriptor */ adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; - if(adapter->hw.mac_type < e1000_82543) + if (hw->mac_type < e1000_82543) adapter->txd_cmd |= E1000_TXD_CMD_RPS; else adapter->txd_cmd |= E1000_TXD_CMD_RS; /* Cache if we're 82544 running in PCI-X because we'll * need this to apply a workaround later in the send path. */ - if(adapter->hw.mac_type == e1000_82544 && - adapter->hw.bus_type == e1000_bus_type_pcix) + if (hw->mac_type == e1000_82544 && + hw->bus_type == e1000_bus_type_pcix) adapter->pcix_82544 = 1; } /** * e1000_setup_rx_resources - allocate Rx resources (Descriptors) * @adapter: board private structure + * @rxdr: rx descriptor ring (for a specific queue) to setup * * Returns 0 on success, negative on failure **/ int -e1000_setup_rx_resources(struct e1000_adapter *adapter) +e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr) { - struct e1000_desc_ring *rxdr = &adapter->rx_ring; struct pci_dev *pdev = adapter->pdev; int size, desc_len; size = sizeof(struct e1000_buffer) * rxdr->count; rxdr->buffer_info = vmalloc(size); - if(!rxdr->buffer_info) { + if (!rxdr->buffer_info) { DPRINTK(PROBE, ERR, "Unable to allocate memory for the receive descriptor ring\n"); return -ENOMEM; @@ -1156,13 +1407,13 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter) rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); - if(!rxdr->desc) { + if (!rxdr->desc) { + DPRINTK(PROBE, ERR, + "Unable to allocate memory for the receive descriptor ring\n"); setup_rx_desc_die: vfree(rxdr->buffer_info); kfree(rxdr->ps_page); kfree(rxdr->ps_page_dma); - DPRINTK(PROBE, ERR, - "Unable to allocate memory for the receive descriptor ring\n"); return -ENOMEM; } @@ -1174,9 +1425,12 @@ setup_rx_desc_die: "at %p\n", rxdr->size, rxdr->desc); /* Try again, without freeing the previous */ rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); - if(!rxdr->desc) { /* Failed allocation, critical failure */ + if (!rxdr->desc) { pci_free_consistent(pdev, rxdr->size, olddesc, olddma); + DPRINTK(PROBE, ERR, + "Unable to allocate memory " + "for the receive descriptor ring\n"); goto setup_rx_desc_die; } @@ -1188,10 +1442,7 @@ setup_rx_desc_die: DPRINTK(PROBE, ERR, "Unable to allocate aligned memory " "for the receive descriptor ring\n"); - vfree(rxdr->buffer_info); - kfree(rxdr->ps_page); - kfree(rxdr->ps_page_dma); - return -ENOMEM; + goto setup_rx_desc_die; } else { /* Free old allocation, new allocation was successful */ pci_free_consistent(pdev, rxdr->size, olddesc, olddma); @@ -1205,16 +1456,49 @@ setup_rx_desc_die: return 0; } +/** + * e1000_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * If this function returns with an error, then it's possible one or + * more of the rings is populated (while the rest are not). It is the + * callers duty to clean those orphaned rings. + * + * Return 0 on success, negative on failure + **/ + +int +e1000_setup_all_rx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_queues; i++) { + err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); + if (err) { + DPRINTK(PROBE, ERR, + "Allocation for Rx Queue %u failed\n", i); + break; + } + } + + return err; +} + /** * e1000_setup_rctl - configure the receive control registers * @adapter: Board private structure **/ - +#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ + (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) static void e1000_setup_rctl(struct e1000_adapter *adapter) { uint32_t rctl, rfctl; uint32_t psrctl = 0; +#ifdef CONFIG_E1000_PACKET_SPLIT + uint32_t pages = 0; +#endif rctl = E1000_READ_REG(&adapter->hw, RCTL); @@ -1235,7 +1519,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter) rctl |= E1000_RCTL_LPE; /* Setup buffer sizes */ - if(adapter->hw.mac_type == e1000_82573) { + if(adapter->hw.mac_type >= e1000_82571) { /* We can now specify buffers in 1K increments. * BSIZE and BSEX are ignored in this case. */ rctl |= adapter->rx_buffer_len << 0x11; @@ -1268,11 +1552,14 @@ e1000_setup_rctl(struct e1000_adapter *adapter) * followed by the page buffers. Therefore, skb->data is * sized to hold the largest protocol header. */ - adapter->rx_ps = (adapter->hw.mac_type > e1000_82547_rev_2) - && (adapter->netdev->mtu - < ((3 * PAGE_SIZE) + adapter->rx_ps_bsize0)); + pages = PAGE_USE_COUNT(adapter->netdev->mtu); + if ((adapter->hw.mac_type > e1000_82547_rev_2) && (pages <= 3) && + PAGE_SIZE <= 16384) + adapter->rx_ps_pages = pages; + else + adapter->rx_ps_pages = 0; #endif - if(adapter->rx_ps) { + if (adapter->rx_ps_pages) { /* Configure extra packet-split registers */ rfctl = E1000_READ_REG(&adapter->hw, RFCTL); rfctl |= E1000_RFCTL_EXTEN; @@ -1284,12 +1571,19 @@ e1000_setup_rctl(struct e1000_adapter *adapter) psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT; - psrctl |= PAGE_SIZE >> - E1000_PSRCTL_BSIZE1_SHIFT; - psrctl |= PAGE_SIZE << - E1000_PSRCTL_BSIZE2_SHIFT; - psrctl |= PAGE_SIZE << - E1000_PSRCTL_BSIZE3_SHIFT; + + switch (adapter->rx_ps_pages) { + case 3: + psrctl |= PAGE_SIZE << + E1000_PSRCTL_BSIZE3_SHIFT; + case 2: + psrctl |= PAGE_SIZE << + E1000_PSRCTL_BSIZE2_SHIFT; + case 1: + psrctl |= PAGE_SIZE >> + E1000_PSRCTL_BSIZE1_SHIFT; + break; + } E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl); } @@ -1307,91 +1601,181 @@ e1000_setup_rctl(struct e1000_adapter *adapter) static void e1000_configure_rx(struct e1000_adapter *adapter) { - uint64_t rdba = adapter->rx_ring.dma; - uint32_t rdlen, rctl, rxcsum; + uint64_t rdba; + struct e1000_hw *hw = &adapter->hw; + uint32_t rdlen, rctl, rxcsum, ctrl_ext; +#ifdef CONFIG_E1000_MQ + uint32_t reta, mrqc; + int i; +#endif - if(adapter->rx_ps) { - rdlen = adapter->rx_ring.count * + if (adapter->rx_ps_pages) { + rdlen = adapter->rx_ring[0].count * sizeof(union e1000_rx_desc_packet_split); adapter->clean_rx = e1000_clean_rx_irq_ps; adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; } else { - rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc); + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); adapter->clean_rx = e1000_clean_rx_irq; adapter->alloc_rx_buf = e1000_alloc_rx_buffers; } /* disable receives while setting up the descriptors */ - rctl = E1000_READ_REG(&adapter->hw, RCTL); - E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN); + rctl = E1000_READ_REG(hw, RCTL); + E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); /* set the Receive Delay Timer Register */ - E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay); + E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay); - if(adapter->hw.mac_type >= e1000_82540) { - E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay); + if (hw->mac_type >= e1000_82540) { + E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay); if(adapter->itr > 1) - E1000_WRITE_REG(&adapter->hw, ITR, + E1000_WRITE_REG(hw, ITR, 1000000000 / (adapter->itr * 256)); } - /* Setup the Base and Length of the Rx Descriptor Ring */ - E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL)); - E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32)); + if (hw->mac_type >= e1000_82571) { + /* Reset delay timers after every interrupt */ + ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_CANC; + E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + } - E1000_WRITE_REG(&adapter->hw, RDLEN, rdlen); + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring */ + switch (adapter->num_queues) { +#ifdef CONFIG_E1000_MQ + case 2: + rdba = adapter->rx_ring[1].dma; + E1000_WRITE_REG(hw, RDBAL1, (rdba & 0x00000000ffffffffULL)); + E1000_WRITE_REG(hw, RDBAH1, (rdba >> 32)); + E1000_WRITE_REG(hw, RDLEN1, rdlen); + E1000_WRITE_REG(hw, RDH1, 0); + E1000_WRITE_REG(hw, RDT1, 0); + adapter->rx_ring[1].rdh = E1000_RDH1; + adapter->rx_ring[1].rdt = E1000_RDT1; + /* Fall Through */ +#endif + case 1: + default: + rdba = adapter->rx_ring[0].dma; + E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); + E1000_WRITE_REG(hw, RDBAH, (rdba >> 32)); + E1000_WRITE_REG(hw, RDLEN, rdlen); + E1000_WRITE_REG(hw, RDH, 0); + E1000_WRITE_REG(hw, RDT, 0); + adapter->rx_ring[0].rdh = E1000_RDH; + adapter->rx_ring[0].rdt = E1000_RDT; + break; + } - /* Setup the HW Rx Head and Tail Descriptor Pointers */ - E1000_WRITE_REG(&adapter->hw, RDH, 0); - E1000_WRITE_REG(&adapter->hw, RDT, 0); +#ifdef CONFIG_E1000_MQ + if (adapter->num_queues > 1) { + uint32_t random[10]; + + get_random_bytes(&random[0], 40); + + if (hw->mac_type <= e1000_82572) { + E1000_WRITE_REG(hw, RSSIR, 0); + E1000_WRITE_REG(hw, RSSIM, 0); + } + + switch (adapter->num_queues) { + case 2: + default: + reta = 0x00800080; + mrqc = E1000_MRQC_ENABLE_RSS_2Q; + break; + } + + /* Fill out redirection table */ + for (i = 0; i < 32; i++) + E1000_WRITE_REG_ARRAY(hw, RETA, i, reta); + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + E1000_WRITE_REG_ARRAY(hw, RSSRK, i, random[i]); + + mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP); + E1000_WRITE_REG(hw, MRQC, mrqc); + } + + /* Multiqueue and packet checksumming are mutually exclusive. */ + if (hw->mac_type >= e1000_82571) { + rxcsum = E1000_READ_REG(hw, RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + E1000_WRITE_REG(hw, RXCSUM, rxcsum); + } + +#else /* Enable 82543 Receive Checksum Offload for TCP and UDP */ - if(adapter->hw.mac_type >= e1000_82543) { - rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM); + if (hw->mac_type >= e1000_82543) { + rxcsum = E1000_READ_REG(hw, RXCSUM); if(adapter->rx_csum == TRUE) { rxcsum |= E1000_RXCSUM_TUOFL; - /* Enable 82573 IPv4 payload checksum for UDP fragments + /* Enable 82571 IPv4 payload checksum for UDP fragments * Must be used in conjunction with packet-split. */ - if((adapter->hw.mac_type > e1000_82547_rev_2) && - (adapter->rx_ps)) { + if ((hw->mac_type >= e1000_82571) && + (adapter->rx_ps_pages)) { rxcsum |= E1000_RXCSUM_IPPCSE; } } else { rxcsum &= ~E1000_RXCSUM_TUOFL; /* don't need to clear IPPCSE as it defaults to 0 */ } - E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum); + E1000_WRITE_REG(hw, RXCSUM, rxcsum); } +#endif /* CONFIG_E1000_MQ */ - if (adapter->hw.mac_type == e1000_82573) - E1000_WRITE_REG(&adapter->hw, ERT, 0x0100); + if (hw->mac_type == e1000_82573) + E1000_WRITE_REG(hw, ERT, 0x0100); /* Enable Receives */ - E1000_WRITE_REG(&adapter->hw, RCTL, rctl); + E1000_WRITE_REG(hw, RCTL, rctl); } /** - * e1000_free_tx_resources - Free Tx Resources + * e1000_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ + +void +e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_tx_ring(adapter, tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * e1000_free_all_tx_resources - Free Tx Resources for All Queues * @adapter: board private structure * * Free all transmit software resources **/ void -e1000_free_tx_resources(struct e1000_adapter *adapter) +e1000_free_all_tx_resources(struct e1000_adapter *adapter) { - struct pci_dev *pdev = adapter->pdev; + int i; - e1000_clean_tx_ring(adapter); - - vfree(adapter->tx_ring.buffer_info); - adapter->tx_ring.buffer_info = NULL; - - pci_free_consistent(pdev, adapter->tx_ring.size, - adapter->tx_ring.desc, adapter->tx_ring.dma); - - adapter->tx_ring.desc = NULL; + for (i = 0; i < adapter->num_queues; i++) + e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); } static inline void @@ -1414,21 +1798,22 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, /** * e1000_clean_tx_ring - Free Tx Buffers * @adapter: board private structure + * @tx_ring: ring to be cleaned **/ static void -e1000_clean_tx_ring(struct e1000_adapter *adapter) +e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) { - struct e1000_desc_ring *tx_ring = &adapter->tx_ring; struct e1000_buffer *buffer_info; unsigned long size; unsigned int i; /* Free all the Tx ring sk_buffs */ - if (likely(adapter->previous_buffer_info.skb != NULL)) { + if (likely(tx_ring->previous_buffer_info.skb != NULL)) { e1000_unmap_and_free_tx_resource(adapter, - &adapter->previous_buffer_info); + &tx_ring->previous_buffer_info); } for(i = 0; i < tx_ring->count; i++) { @@ -1446,24 +1831,39 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - E1000_WRITE_REG(&adapter->hw, TDH, 0); - E1000_WRITE_REG(&adapter->hw, TDT, 0); + writel(0, adapter->hw.hw_addr + tx_ring->tdh); + writel(0, adapter->hw.hw_addr + tx_ring->tdt); +} + +/** + * e1000_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ + +static void +e1000_clean_all_tx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_queues; i++) + e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); } /** * e1000_free_rx_resources - Free Rx Resources * @adapter: board private structure + * @rx_ring: ring to clean the resources from * * Free all receive software resources **/ void -e1000_free_rx_resources(struct e1000_adapter *adapter) +e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) { - struct e1000_desc_ring *rx_ring = &adapter->rx_ring; struct pci_dev *pdev = adapter->pdev; - e1000_clean_rx_ring(adapter); + e1000_clean_rx_ring(adapter, rx_ring); vfree(rx_ring->buffer_info); rx_ring->buffer_info = NULL; @@ -1478,14 +1878,31 @@ e1000_free_rx_resources(struct e1000_adapter *adapter) } /** - * e1000_clean_rx_ring - Free Rx Buffers + * e1000_free_all_rx_resources - Free Rx Resources for All Queues * @adapter: board private structure + * + * Free all receive software resources + **/ + +void +e1000_free_all_rx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_queues; i++) + e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + * @rx_ring: ring to free buffers from **/ static void -e1000_clean_rx_ring(struct e1000_adapter *adapter) +e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) { - struct e1000_desc_ring *rx_ring = &adapter->rx_ring; struct e1000_buffer *buffer_info; struct e1000_ps_page *ps_page; struct e1000_ps_page_dma *ps_page_dma; @@ -1508,7 +1925,7 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter) dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; - for(j = 0; j < PS_PAGE_BUFFERS; j++) { + for(j = 0; j < adapter->rx_ps_pages; j++) { if(!ps_page->ps_page[j]) break; pci_unmap_single(pdev, ps_page_dma->ps_page_dma[j], @@ -1534,8 +1951,22 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter) rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; - E1000_WRITE_REG(&adapter->hw, RDH, 0); - E1000_WRITE_REG(&adapter->hw, RDT, 0); + writel(0, adapter->hw.hw_addr + rx_ring->rdh); + writel(0, adapter->hw.hw_addr + rx_ring->rdt); +} + +/** + * e1000_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ + +static void +e1000_clean_all_rx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_queues; i++) + e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); } /* The 82542 2.0 (revision 2) needs to have the receive unit in reset @@ -1556,7 +1987,7 @@ e1000_enter_82542_rst(struct e1000_adapter *adapter) mdelay(5); if(netif_running(netdev)) - e1000_clean_rx_ring(adapter); + e1000_clean_all_rx_rings(adapter); } static void @@ -1576,7 +2007,7 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter) if(netif_running(netdev)) { e1000_configure_rx(adapter); - e1000_alloc_rx_buffers(adapter); + e1000_alloc_rx_buffers(adapter, &adapter->rx_ring[0]); } } @@ -1607,6 +2038,22 @@ e1000_set_mac(struct net_device *netdev, void *p) e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); + /* With 82571 controllers, LAA may be overwritten (with the default) + * due to controller reset from the other port. */ + if (adapter->hw.mac_type == e1000_82571) { + /* activate the work around */ + adapter->hw.laa_is_present = 1; + + /* Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed (in e1000_watchdog), the actual LAA is in one + * of the RARs and no incoming packets directed to this port + * are dropped. Eventaully the LAA will be in RAR[0] and + * RAR[14] */ + e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, + E1000_RAR_ENTRIES - 1); + } + if(adapter->hw.mac_type == e1000_82542_rev2_0) e1000_leave_82542_rst(adapter); @@ -1629,12 +2076,13 @@ e1000_set_multi(struct net_device *netdev) struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; struct dev_mc_list *mc_ptr; - unsigned long flags; uint32_t rctl; uint32_t hash_value; - int i; + int i, rar_entries = E1000_RAR_ENTRIES; - spin_lock_irqsave(&adapter->tx_lock, flags); + /* reserve RAR[14] for LAA over-write work-around */ + if (adapter->hw.mac_type == e1000_82571) + rar_entries--; /* Check for Promiscuous and All Multicast modes */ @@ -1659,11 +2107,12 @@ e1000_set_multi(struct net_device *netdev) /* load the first 14 multicast address into the exact filters 1-14 * RAR 0 is used for the station MAC adddress * if there are not 14 addresses, go ahead and clear the filters + * -- with 82571 controllers only 0-13 entries are filled here */ mc_ptr = netdev->mc_list; - for(i = 1; i < E1000_RAR_ENTRIES; i++) { - if(mc_ptr) { + for(i = 1; i < rar_entries; i++) { + if (mc_ptr) { e1000_rar_set(hw, mc_ptr->dmi_addr, i); mc_ptr = mc_ptr->next; } else { @@ -1686,8 +2135,6 @@ e1000_set_multi(struct net_device *netdev) if(hw->mac_type == e1000_82542_rev2_0) e1000_leave_82542_rst(adapter); - - spin_unlock_irqrestore(&adapter->tx_lock, flags); } /* Need to wait a few seconds after link up to get diagnostic information from @@ -1759,7 +2206,7 @@ static void e1000_watchdog_task(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; - struct e1000_desc_ring *txdr = &adapter->tx_ring; + struct e1000_tx_ring *txdr = &adapter->tx_ring[0]; uint32_t link; e1000_check_for_link(&adapter->hw); @@ -1818,8 +2265,8 @@ e1000_watchdog_task(struct e1000_adapter *adapter) e1000_update_adaptive(&adapter->hw); - if(!netif_carrier_ok(netdev)) { - if(E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { + if (adapter->num_queues == 1 && !netif_carrier_ok(netdev)) { + if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { /* We've lost link, so the controller stops DMA, * but we've got queued Tx work that's never going * to get done, so reset controller to flush Tx. @@ -1847,6 +2294,11 @@ e1000_watchdog_task(struct e1000_adapter *adapter) /* Force detection of hung controller every watchdog period */ adapter->detect_tx_hung = TRUE; + /* With 82571 controllers, LAA may be overwritten due to controller + * reset from the other port. Set the appropriate LAA in RAR[0] */ + if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present) + e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); + /* Reset the timer */ mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); } @@ -1859,7 +2311,8 @@ e1000_watchdog_task(struct e1000_adapter *adapter) #define E1000_TX_FLAGS_VLAN_SHIFT 16 static inline int -e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb) +e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, + struct sk_buff *skb) { #ifdef NETIF_F_TSO struct e1000_context_desc *context_desc; @@ -1910,8 +2363,8 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb) cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); - i = adapter->tx_ring.next_to_use; - context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i); + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); context_desc->lower_setup.ip_fields.ipcss = ipcss; context_desc->lower_setup.ip_fields.ipcso = ipcso; @@ -1923,8 +2376,8 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb) context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; context_desc->cmd_and_length = cpu_to_le32(cmd_length); - if(++i == adapter->tx_ring.count) i = 0; - adapter->tx_ring.next_to_use = i; + if (++i == tx_ring->count) i = 0; + tx_ring->next_to_use = i; return 1; } @@ -1934,7 +2387,8 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb) } static inline boolean_t -e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) +e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, + struct sk_buff *skb) { struct e1000_context_desc *context_desc; unsigned int i; @@ -1943,8 +2397,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) if(likely(skb->ip_summed == CHECKSUM_HW)) { css = skb->h.raw - skb->data; - i = adapter->tx_ring.next_to_use; - context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i); + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); context_desc->upper_setup.tcp_fields.tucss = css; context_desc->upper_setup.tcp_fields.tucso = css + skb->csum; @@ -1952,8 +2406,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) context_desc->tcp_seg_setup.data = 0; context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); - if(unlikely(++i == adapter->tx_ring.count)) i = 0; - adapter->tx_ring.next_to_use = i; + if (unlikely(++i == tx_ring->count)) i = 0; + tx_ring->next_to_use = i; return TRUE; } @@ -1965,11 +2419,10 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) #define E1000_MAX_DATA_PER_TXD (1<tx_ring; struct e1000_buffer *buffer_info; unsigned int len = skb->len; unsigned int offset = 0, size, count = 0, i; @@ -2065,9 +2518,9 @@ e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb, } static inline void -e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags) +e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, + int tx_flags, int count) { - struct e1000_desc_ring *tx_ring = &adapter->tx_ring; struct e1000_tx_desc *tx_desc = NULL; struct e1000_buffer *buffer_info; uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; @@ -2113,7 +2566,7 @@ e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags) wmb(); tx_ring->next_to_use = i; - E1000_WRITE_REG(&adapter->hw, TDT, i); + writel(i, adapter->hw.hw_addr + tx_ring->tdt); } /** @@ -2206,6 +2659,7 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_tx_ring *tx_ring; unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; unsigned int tx_flags = 0; @@ -2218,7 +2672,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) unsigned int f; len -= skb->data_len; - if(unlikely(skb->len <= 0)) { +#ifdef CONFIG_E1000_MQ + tx_ring = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id()); +#else + tx_ring = adapter->tx_ring; +#endif + + if (unlikely(skb->len <= 0)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -2262,21 +2722,42 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if(adapter->pcix_82544) count += nr_frags; - local_irq_save(flags); - if (!spin_trylock(&adapter->tx_lock)) { - /* Collision - tell upper layer to requeue */ - local_irq_restore(flags); - return NETDEV_TX_LOCKED; - } +#ifdef NETIF_F_TSO + /* TSO Workaround for 82571/2 Controllers -- if skb->data + * points to just header, pull a few bytes of payload from + * frags into skb->data */ + if (skb_shinfo(skb)->tso_size) { + uint8_t hdr_len; + hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); + if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) && + (adapter->hw.mac_type == e1000_82571 || + adapter->hw.mac_type == e1000_82572)) { + unsigned int pull_size; + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + printk(KERN_ERR "__pskb_pull_tail failed.\n"); + dev_kfree_skb_any(skb); + return -EFAULT; + } + } + } +#endif + if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) e1000_transfer_dhcp_info(adapter, skb); + local_irq_save(flags); + if (!spin_trylock(&tx_ring->tx_lock)) { + /* Collision - tell upper layer to requeue */ + local_irq_restore(flags); + return NETDEV_TX_LOCKED; + } /* need: count + 2 desc gap to keep tail from touching * head, otherwise try next time */ - if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) { + if (unlikely(E1000_DESC_UNUSED(tx_ring) < count + 2)) { netif_stop_queue(netdev); - spin_unlock_irqrestore(&adapter->tx_lock, flags); + spin_unlock_irqrestore(&tx_ring->tx_lock, flags); return NETDEV_TX_BUSY; } @@ -2284,7 +2765,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) { netif_stop_queue(netdev); mod_timer(&adapter->tx_fifo_stall_timer, jiffies); - spin_unlock_irqrestore(&adapter->tx_lock, flags); + spin_unlock_irqrestore(&tx_ring->tx_lock, flags); return NETDEV_TX_BUSY; } } @@ -2294,37 +2775,37 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); } - first = adapter->tx_ring.next_to_use; + first = tx_ring->next_to_use; - tso = e1000_tso(adapter, skb); + tso = e1000_tso(adapter, tx_ring, skb); if (tso < 0) { dev_kfree_skb_any(skb); - spin_unlock_irqrestore(&adapter->tx_lock, flags); + spin_unlock_irqrestore(&tx_ring->tx_lock, flags); return NETDEV_TX_OK; } if (likely(tso)) tx_flags |= E1000_TX_FLAGS_TSO; - else if(likely(e1000_tx_csum(adapter, skb))) + else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) tx_flags |= E1000_TX_FLAGS_CSUM; /* Old method was to assume IPv4 packet by default if TSO was enabled. - * 82573 hardware supports TSO capabilities for IPv6 as well... + * 82571 hardware supports TSO capabilities for IPv6 as well... * no longer assume, we must. */ - if(likely(skb->protocol == ntohs(ETH_P_IP))) + if (likely(skb->protocol == ntohs(ETH_P_IP))) tx_flags |= E1000_TX_FLAGS_IPV4; - e1000_tx_queue(adapter, - e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss), - tx_flags); + e1000_tx_queue(adapter, tx_ring, tx_flags, + e1000_tx_map(adapter, tx_ring, skb, first, + max_per_txd, nr_frags, mss)); netdev->trans_start = jiffies; /* Make sure there is space in the ring for the next send. */ - if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < MAX_SKB_FRAGS + 2)) + if (unlikely(E1000_DESC_UNUSED(tx_ring) < MAX_SKB_FRAGS + 2)) netif_stop_queue(netdev); - spin_unlock_irqrestore(&adapter->tx_lock, flags); + spin_unlock_irqrestore(&tx_ring->tx_lock, flags); return NETDEV_TX_OK; } @@ -2388,9 +2869,18 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) return -EINVAL; } -#define MAX_STD_JUMBO_FRAME_SIZE 9216 +#define MAX_STD_JUMBO_FRAME_SIZE 9234 /* might want this to be bigger enum check... */ - if (adapter->hw.mac_type == e1000_82573 && + /* 82571 controllers limit jumbo frame size to 10500 bytes */ + if ((adapter->hw.mac_type == e1000_82571 || + adapter->hw.mac_type == e1000_82572) && + max_frame > MAX_STD_JUMBO_FRAME_SIZE) { + DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported " + "on 82571 and 82572 controllers.\n"); + return -EINVAL; + } + + if(adapter->hw.mac_type == e1000_82573 && max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { DPRINTK(PROBE, ERR, "Jumbo Frames not supported " "on 82573\n"); @@ -2578,6 +3068,29 @@ e1000_update_stats(struct e1000_adapter *adapter) spin_unlock_irqrestore(&adapter->stats_lock, flags); } +#ifdef CONFIG_E1000_MQ +void +e1000_rx_schedule(void *data) +{ + struct net_device *poll_dev, *netdev = data; + struct e1000_adapter *adapter = netdev->priv; + int this_cpu = get_cpu(); + + poll_dev = *per_cpu_ptr(adapter->cpu_netdev, this_cpu); + if (poll_dev == NULL) { + put_cpu(); + return; + } + + if (likely(netif_rx_schedule_prep(poll_dev))) + __netif_rx_schedule(poll_dev); + else + e1000_irq_enable(adapter); + + put_cpu(); +} +#endif + /** * e1000_intr - Interrupt Handler * @irq: interrupt number @@ -2592,8 +3105,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; uint32_t icr = E1000_READ_REG(hw, ICR); -#ifndef CONFIG_E1000_NAPI - unsigned int i; +#if defined(CONFIG_E1000_NAPI) && defined(CONFIG_E1000_MQ) || !defined(CONFIG_E1000_NAPI) + int i; #endif if(unlikely(!icr)) @@ -2605,17 +3118,31 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) } #ifdef CONFIG_E1000_NAPI - if(likely(netif_rx_schedule_prep(netdev))) { - - /* Disable interrupts and register for poll. The flush - of the posted write is intentionally left out. - */ - - atomic_inc(&adapter->irq_sem); - E1000_WRITE_REG(hw, IMC, ~0); - __netif_rx_schedule(netdev); + atomic_inc(&adapter->irq_sem); + E1000_WRITE_REG(hw, IMC, ~0); + E1000_WRITE_FLUSH(hw); +#ifdef CONFIG_E1000_MQ + if (atomic_read(&adapter->rx_sched_call_data.count) == 0) { + cpu_set(adapter->cpu_for_queue[0], + adapter->rx_sched_call_data.cpumask); + for (i = 1; i < adapter->num_queues; i++) { + cpu_set(adapter->cpu_for_queue[i], + adapter->rx_sched_call_data.cpumask); + atomic_inc(&adapter->irq_sem); + } + atomic_set(&adapter->rx_sched_call_data.count, i); + smp_call_async_mask(&adapter->rx_sched_call_data); + } else { + printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count)); } -#else +#else /* if !CONFIG_E1000_MQ */ + if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0]))) + __netif_rx_schedule(&adapter->polling_netdev[0]); + else + e1000_irq_enable(adapter); +#endif /* CONFIG_E1000_MQ */ + +#else /* if !CONFIG_E1000_NAPI */ /* Writing IMC and IMS is needed for 82547. Due to Hub Link bus being occupied, an interrupt de-assertion message is not able to be sent. @@ -2632,13 +3159,14 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) } for(i = 0; i < E1000_MAX_INTR; i++) - if(unlikely(!adapter->clean_rx(adapter) & - !e1000_clean_tx_irq(adapter))) + if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) & + !e1000_clean_tx_irq(adapter, adapter->tx_ring))) break; if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) e1000_irq_enable(adapter); -#endif + +#endif /* CONFIG_E1000_NAPI */ return IRQ_HANDLED; } @@ -2650,22 +3178,37 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) **/ static int -e1000_clean(struct net_device *netdev, int *budget) +e1000_clean(struct net_device *poll_dev, int *budget) { - struct e1000_adapter *adapter = netdev_priv(netdev); - int work_to_do = min(*budget, netdev->quota); - int tx_cleaned; - int work_done = 0; + struct e1000_adapter *adapter; + int work_to_do = min(*budget, poll_dev->quota); + int tx_cleaned, i = 0, work_done = 0; - tx_cleaned = e1000_clean_tx_irq(adapter); - adapter->clean_rx(adapter, &work_done, work_to_do); + /* Must NOT use netdev_priv macro here. */ + adapter = poll_dev->priv; + + /* Keep link state information with original netdev */ + if (!netif_carrier_ok(adapter->netdev)) + goto quit_polling; + + while (poll_dev != &adapter->polling_netdev[i]) { + i++; + if (unlikely(i == adapter->num_queues)) + BUG(); + } + + tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]); + adapter->clean_rx(adapter, &adapter->rx_ring[i], + &work_done, work_to_do); *budget -= work_done; - netdev->quota -= work_done; + poll_dev->quota -= work_done; - if ((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) { /* If no Tx and not enough Rx work done, exit the polling mode */ - netif_rx_complete(netdev); + if((!tx_cleaned && (work_done == 0)) || + !netif_running(adapter->netdev)) { +quit_polling: + netif_rx_complete(poll_dev); e1000_irq_enable(adapter); return 0; } @@ -2680,9 +3223,9 @@ e1000_clean(struct net_device *netdev, int *budget) **/ static boolean_t -e1000_clean_tx_irq(struct e1000_adapter *adapter) +e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) { - struct e1000_desc_ring *tx_ring = &adapter->tx_ring; struct net_device *netdev = adapter->netdev; struct e1000_tx_desc *tx_desc, *eop_desc; struct e1000_buffer *buffer_info; @@ -2693,12 +3236,12 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter) eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); - while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { + while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { /* Premature writeback of Tx descriptors clear (free buffers * and unmap pci_mapping) previous_buffer_info */ - if (likely(adapter->previous_buffer_info.skb != NULL)) { + if (likely(tx_ring->previous_buffer_info.skb != NULL)) { e1000_unmap_and_free_tx_resource(adapter, - &adapter->previous_buffer_info); + &tx_ring->previous_buffer_info); } for(cleaned = FALSE; !cleaned; ) { @@ -2714,7 +3257,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter) #ifdef NETIF_F_TSO } else { if (cleaned) { - memcpy(&adapter->previous_buffer_info, + memcpy(&tx_ring->previous_buffer_info, buffer_info, sizeof(struct e1000_buffer)); memset(buffer_info, 0, @@ -2732,6 +3275,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter) if(unlikely(++i == tx_ring->count)) i = 0; } + + tx_ring->pkt++; eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); @@ -2739,15 +3284,15 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter) tx_ring->next_to_clean = i; - spin_lock(&adapter->tx_lock); + spin_lock(&tx_ring->tx_lock); if(unlikely(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev))) netif_wake_queue(netdev); - spin_unlock(&adapter->tx_lock); - if(adapter->detect_tx_hung) { + spin_unlock(&tx_ring->tx_lock); + if (adapter->detect_tx_hung) { /* Detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of i */ adapter->detect_tx_hung = FALSE; @@ -2771,8 +3316,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter) " next_to_watch <%x>\n" " jiffies <%lx>\n" " next_to_watch.status <%x>\n", - E1000_READ_REG(&adapter->hw, TDH), - E1000_READ_REG(&adapter->hw, TDT), + readl(adapter->hw.hw_addr + tx_ring->tdh), + readl(adapter->hw.hw_addr + tx_ring->tdt), tx_ring->next_to_use, i, (unsigned long long)tx_ring->buffer_info[i].dma, @@ -2784,12 +3329,10 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter) } } #ifdef NETIF_F_TSO - - if( unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && - time_after(jiffies, adapter->previous_buffer_info.time_stamp + HZ))) + if (unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + time_after(jiffies, tx_ring->previous_buffer_info.time_stamp + HZ))) e1000_unmap_and_free_tx_resource( - adapter, &adapter->previous_buffer_info); - + adapter, &tx_ring->previous_buffer_info); #endif return cleaned; } @@ -2852,13 +3395,14 @@ e1000_rx_checksum(struct e1000_adapter *adapter, static boolean_t #ifdef CONFIG_E1000_NAPI -e1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done, - int work_to_do) +e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) #else -e1000_clean_rx_irq(struct e1000_adapter *adapter) +e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) #endif { - struct e1000_desc_ring *rx_ring = &adapter->rx_ring; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_rx_desc *rx_desc; @@ -2944,6 +3488,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter) } #endif /* CONFIG_E1000_NAPI */ netdev->last_rx = jiffies; + rx_ring->pkt++; next_desc: rx_desc->status = 0; @@ -2953,7 +3498,7 @@ next_desc: rx_desc = E1000_RX_DESC(*rx_ring, i); } rx_ring->next_to_clean = i; - adapter->alloc_rx_buf(adapter); + adapter->alloc_rx_buf(adapter, rx_ring); return cleaned; } @@ -2965,13 +3510,14 @@ next_desc: static boolean_t #ifdef CONFIG_E1000_NAPI -e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, int *work_done, - int work_to_do) +e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) #else -e1000_clean_rx_irq_ps(struct e1000_adapter *adapter) +e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) #endif { - struct e1000_desc_ring *rx_ring = &adapter->rx_ring; union e1000_rx_desc_packet_split *rx_desc; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; @@ -3027,7 +3573,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter) /* Good Receive */ skb_put(skb, length); - for(j = 0; j < PS_PAGE_BUFFERS; j++) { + for(j = 0; j < adapter->rx_ps_pages; j++) { if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) break; @@ -3048,11 +3594,13 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter) rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); skb->protocol = eth_type_trans(skb, netdev); -#ifdef HAVE_RX_ZERO_COPY if(likely(rx_desc->wb.upper.header_status & - E1000_RXDPS_HDRSTAT_HDRSP)) + E1000_RXDPS_HDRSTAT_HDRSP)) { + adapter->rx_hdr_split++; +#ifdef HAVE_RX_ZERO_COPY skb_shinfo(skb)->zero_copy = TRUE; #endif + } #ifdef CONFIG_E1000_NAPI if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { vlan_hwaccel_receive_skb(skb, adapter->vlgrp, @@ -3071,6 +3619,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter) } #endif /* CONFIG_E1000_NAPI */ netdev->last_rx = jiffies; + rx_ring->pkt++; next_desc: rx_desc->wb.middle.status_error &= ~0xFF; @@ -3081,7 +3630,7 @@ next_desc: staterr = le32_to_cpu(rx_desc->wb.middle.status_error); } rx_ring->next_to_clean = i; - adapter->alloc_rx_buf(adapter); + adapter->alloc_rx_buf(adapter, rx_ring); return cleaned; } @@ -3092,9 +3641,9 @@ next_desc: **/ static void -e1000_alloc_rx_buffers(struct e1000_adapter *adapter) +e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) { - struct e1000_desc_ring *rx_ring = &adapter->rx_ring; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_rx_desc *rx_desc; @@ -3178,7 +3727,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter) * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); - E1000_WRITE_REG(&adapter->hw, RDT, i); + writel(i, adapter->hw.hw_addr + rx_ring->rdt); } if(unlikely(++i == rx_ring->count)) i = 0; @@ -3194,9 +3743,9 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter) **/ static void -e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter) +e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) { - struct e1000_desc_ring *rx_ring = &adapter->rx_ring; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; union e1000_rx_desc_packet_split *rx_desc; @@ -3215,22 +3764,26 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter) rx_desc = E1000_RX_DESC_PS(*rx_ring, i); for(j = 0; j < PS_PAGE_BUFFERS; j++) { - if(unlikely(!ps_page->ps_page[j])) { - ps_page->ps_page[j] = - alloc_page(GFP_ATOMIC); - if(unlikely(!ps_page->ps_page[j])) - goto no_buffers; - ps_page_dma->ps_page_dma[j] = - pci_map_page(pdev, - ps_page->ps_page[j], - 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); - } - /* Refresh the desc even if buffer_addrs didn't - * change because each write-back erases this info. - */ - rx_desc->read.buffer_addr[j+1] = - cpu_to_le64(ps_page_dma->ps_page_dma[j]); + if (j < adapter->rx_ps_pages) { + if (likely(!ps_page->ps_page[j])) { + ps_page->ps_page[j] = + alloc_page(GFP_ATOMIC); + if (unlikely(!ps_page->ps_page[j])) + goto no_buffers; + ps_page_dma->ps_page_dma[j] = + pci_map_page(pdev, + ps_page->ps_page[j], + 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + } + /* Refresh the desc even if buffer_addrs didn't + * change because each write-back erases + * this info. + */ + rx_desc->read.buffer_addr[j+1] = + cpu_to_le64(ps_page_dma->ps_page_dma[j]); + } else + rx_desc->read.buffer_addr[j+1] = ~0; } skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); @@ -3264,7 +3817,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter) * descriptors are 32 bytes...so we increment tail * twice as much. */ - E1000_WRITE_REG(&adapter->hw, RDT, i<<1); + writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt); } if(unlikely(++i == rx_ring->count)) i = 0; @@ -3715,6 +4268,12 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) } switch(adapter->hw.mac_type) { + case e1000_82571: + case e1000_82572: + ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); + E1000_WRITE_REG(&adapter->hw, CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + break; case e1000_82573: swsm = E1000_READ_REG(&adapter->hw, SWSM); E1000_WRITE_REG(&adapter->hw, SWSM, @@ -3737,6 +4296,7 @@ e1000_resume(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); uint32_t manc, ret_val, swsm; + uint32_t ctrl_ext; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); @@ -3762,6 +4322,12 @@ e1000_resume(struct pci_dev *pdev) } switch(adapter->hw.mac_type) { + case e1000_82571: + case e1000_82572: + ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); + E1000_WRITE_REG(&adapter->hw, CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + break; case e1000_82573: swsm = E1000_READ_REG(&adapter->hw, SWSM); E1000_WRITE_REG(&adapter->hw, SWSM, @@ -3786,7 +4352,7 @@ e1000_netpoll(struct net_device *netdev) struct e1000_adapter *adapter = netdev_priv(netdev); disable_irq(adapter->pdev->irq); e1000_intr(adapter->pdev->irq, netdev, NULL); - e1000_clean_tx_irq(adapter); + e1000_clean_tx_irq(adapter, adapter->tx_ring); enable_irq(adapter->pdev->irq); } #endif diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c index 676247f9f1cc..38695d5b4637 100644 --- a/drivers/net/e1000/e1000_param.c +++ b/drivers/net/e1000/e1000_param.c @@ -306,7 +306,8 @@ e1000_check_options(struct e1000_adapter *adapter) .def = E1000_DEFAULT_TXD, .arg = { .r = { .min = E1000_MIN_TXD }} }; - struct e1000_desc_ring *tx_ring = &adapter->tx_ring; + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + int i; e1000_mac_type mac_type = adapter->hw.mac_type; opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD; @@ -319,6 +320,8 @@ e1000_check_options(struct e1000_adapter *adapter) } else { tx_ring->count = opt.def; } + for (i = 0; i < adapter->num_queues; i++) + tx_ring[i].count = tx_ring->count; } { /* Receive Descriptor Count */ struct e1000_option opt = { @@ -329,7 +332,8 @@ e1000_check_options(struct e1000_adapter *adapter) .def = E1000_DEFAULT_RXD, .arg = { .r = { .min = E1000_MIN_RXD }} }; - struct e1000_desc_ring *rx_ring = &adapter->rx_ring; + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; e1000_mac_type mac_type = adapter->hw.mac_type; opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD; @@ -342,6 +346,8 @@ e1000_check_options(struct e1000_adapter *adapter) } else { rx_ring->count = opt.def; } + for (i = 0; i < adapter->num_queues; i++) + rx_ring[i].count = rx_ring->count; } { /* Checksum Offload Enable/Disable */ struct e1000_option opt = { diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c index 87f522738bfc..f119ec4e89ea 100644 --- a/drivers/net/epic100.c +++ b/drivers/net/epic100.c @@ -1334,7 +1334,7 @@ static void epic_rx_err(struct net_device *dev, struct epic_private *ep) static int epic_poll(struct net_device *dev, int *budget) { struct epic_private *ep = dev->priv; - int work_done, orig_budget; + int work_done = 0, orig_budget; long ioaddr = dev->base_addr; orig_budget = (*budget > dev->quota) ? dev->quota : *budget; @@ -1343,7 +1343,7 @@ rx_action: epic_tx(dev, ep); - work_done = epic_rx(dev, *budget); + work_done += epic_rx(dev, *budget); epic_rx_err(dev, ep); diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index d6eefdb71c17..22aec6ed80f5 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -95,6 +95,8 @@ * of nv_remove * 0.42: 06 Aug 2005: Fix lack of link speed initialization * in the second (and later) nv_open call + * 0.43: 10 Aug 2005: Add support for tx checksum. + * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation. * * Known bugs: * We suspect that on some hardware no TX done interrupts are generated. @@ -106,7 +108,7 @@ * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few * superfluous timer interrupts from the nic. */ -#define FORCEDETH_VERSION "0.41" +#define FORCEDETH_VERSION "0.44" #define DRV_NAME "forcedeth" #include @@ -145,6 +147,7 @@ #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */ #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ +#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ enum { NvRegIrqStatus = 0x000, @@ -241,6 +244,9 @@ enum { #define NVREG_TXRXCTL_IDLE 0x0008 #define NVREG_TXRXCTL_RESET 0x0010 #define NVREG_TXRXCTL_RXCHECK 0x0400 +#define NVREG_TXRXCTL_DESC_1 0 +#define NVREG_TXRXCTL_DESC_2 0x02100 +#define NVREG_TXRXCTL_DESC_3 0x02200 NvRegMIIStatus = 0x180, #define NVREG_MIISTAT_ERROR 0x0001 #define NVREG_MIISTAT_LINKCHANGE 0x0008 @@ -335,6 +341,10 @@ typedef union _ring_type { /* error and valid are the same for both */ #define NV_TX2_ERROR (1<<30) #define NV_TX2_VALID (1<<31) +#define NV_TX2_TSO (1<<28) +#define NV_TX2_TSO_SHIFT 14 +#define NV_TX2_CHECKSUM_L3 (1<<27) +#define NV_TX2_CHECKSUM_L4 (1<<26) #define NV_RX_DESCRIPTORVALID (1<<16) #define NV_RX_MISSEDFRAME (1<<17) @@ -417,14 +427,14 @@ typedef union _ring_type { /* * desc_ver values: - * This field has two purposes: - * - Newer nics uses a different ring layout. The layout is selected by - * comparing np->desc_ver with DESC_VER_xy. - * - It contains bits that are forced on when writing to NvRegTxRxControl. + * The nic supports three different descriptor types: + * - DESC_VER_1: Original + * - DESC_VER_2: support for jumbo frames. + * - DESC_VER_3: 64-bit format. */ -#define DESC_VER_1 0x0 -#define DESC_VER_2 (0x02100|NVREG_TXRXCTL_RXCHECK) -#define DESC_VER_3 (0x02200|NVREG_TXRXCTL_RXCHECK) +#define DESC_VER_1 1 +#define DESC_VER_2 2 +#define DESC_VER_3 3 /* PHY defines */ #define PHY_OUI_MARVELL 0x5043 @@ -491,6 +501,7 @@ struct fe_priv { u32 orig_mac[2]; u32 irqmask; u32 desc_ver; + u32 txrxctl_bits; void __iomem *base; @@ -534,7 +545,7 @@ static inline struct fe_priv *get_nvpriv(struct net_device *dev) static inline u8 __iomem *get_hwbase(struct net_device *dev) { - return get_nvpriv(dev)->base; + return ((struct fe_priv *)netdev_priv(dev))->base; } static inline void pci_push(u8 __iomem *base) @@ -623,7 +634,7 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value) static int phy_reset(struct net_device *dev) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); u32 miicontrol; unsigned int tries = 0; @@ -726,7 +737,7 @@ static int phy_init(struct net_device *dev) static void nv_start_rx(struct net_device *dev) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); @@ -782,14 +793,14 @@ static void nv_stop_tx(struct net_device *dev) static void nv_txrx_reset(struct net_device *dev) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); - writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->desc_ver, base + NvRegTxRxControl); + writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); pci_push(base); udelay(NV_TXRX_RESET_DELAY); - writel(NVREG_TXRXCTL_BIT2 | np->desc_ver, base + NvRegTxRxControl); + writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl); pci_push(base); } @@ -801,7 +812,7 @@ static void nv_txrx_reset(struct net_device *dev) */ static struct net_device_stats *nv_get_stats(struct net_device *dev) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); /* It seems that the nic always generates interrupts and doesn't * accumulate errors internally. Thus the current values in np->stats @@ -817,7 +828,7 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev) */ static int nv_alloc_rx(struct net_device *dev) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); unsigned int refill_rx = np->refill_rx; int nr; @@ -861,7 +872,7 @@ static int nv_alloc_rx(struct net_device *dev) static void nv_do_rx_refill(unsigned long data) { struct net_device *dev = (struct net_device *) data; - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); disable_irq(dev->irq); if (nv_alloc_rx(dev)) { @@ -875,7 +886,7 @@ static void nv_do_rx_refill(unsigned long data) static void nv_init_rx(struct net_device *dev) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); int i; np->cur_rx = RX_RING; @@ -889,15 +900,17 @@ static void nv_init_rx(struct net_device *dev) static void nv_init_tx(struct net_device *dev) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); int i; np->next_tx = np->nic_tx = 0; - for (i = 0; i < TX_RING; i++) + for (i = 0; i < TX_RING; i++) { if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) np->tx_ring.orig[i].FlagLen = 0; else np->tx_ring.ex[i].FlagLen = 0; + np->tx_skbuff[i] = NULL; + } } static int nv_init_ring(struct net_device *dev) @@ -907,21 +920,44 @@ static int nv_init_ring(struct net_device *dev) return nv_alloc_rx(dev); } +static void nv_release_txskb(struct net_device *dev, unsigned int skbnr) +{ + struct fe_priv *np = netdev_priv(dev); + struct sk_buff *skb = np->tx_skbuff[skbnr]; + unsigned int j, entry, fragments; + + dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d, skb %p\n", + dev->name, skbnr, np->tx_skbuff[skbnr]); + + entry = skbnr; + if ((fragments = skb_shinfo(skb)->nr_frags) != 0) { + for (j = fragments; j >= 1; j--) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[j-1]; + pci_unmap_page(np->pci_dev, np->tx_dma[entry], + frag->size, + PCI_DMA_TODEVICE); + entry = (entry - 1) % TX_RING; + } + } + pci_unmap_single(np->pci_dev, np->tx_dma[entry], + skb->len - skb->data_len, + PCI_DMA_TODEVICE); + dev_kfree_skb_irq(skb); + np->tx_skbuff[skbnr] = NULL; +} + static void nv_drain_tx(struct net_device *dev) { - struct fe_priv *np = get_nvpriv(dev); - int i; + struct fe_priv *np = netdev_priv(dev); + unsigned int i; + for (i = 0; i < TX_RING; i++) { if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) np->tx_ring.orig[i].FlagLen = 0; else np->tx_ring.ex[i].FlagLen = 0; if (np->tx_skbuff[i]) { - pci_unmap_single(np->pci_dev, np->tx_dma[i], - np->tx_skbuff[i]->len, - PCI_DMA_TODEVICE); - dev_kfree_skb(np->tx_skbuff[i]); - np->tx_skbuff[i] = NULL; + nv_release_txskb(dev, i); np->stats.tx_dropped++; } } @@ -929,7 +965,7 @@ static void nv_drain_tx(struct net_device *dev) static void nv_drain_rx(struct net_device *dev) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); int i; for (i = 0; i < RX_RING; i++) { if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) @@ -959,28 +995,69 @@ static void drain_ring(struct net_device *dev) */ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) { - struct fe_priv *np = get_nvpriv(dev); - int nr = np->next_tx % TX_RING; - - np->tx_skbuff[nr] = skb; - np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len, - PCI_DMA_TODEVICE); - - if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) - np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); - else { - np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; - np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; - } + struct fe_priv *np = netdev_priv(dev); + u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); + unsigned int fragments = skb_shinfo(skb)->nr_frags; + unsigned int nr = (np->next_tx + fragments) % TX_RING; + unsigned int i; spin_lock_irq(&np->lock); - wmb(); - if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) - np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags ); + + if ((np->next_tx - np->nic_tx + fragments) > TX_LIMIT_STOP) { + spin_unlock_irq(&np->lock); + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + + np->tx_skbuff[nr] = skb; + + if (fragments) { + dprintk(KERN_DEBUG "%s: nv_start_xmit: buffer contains %d fragments\n", dev->name, fragments); + /* setup descriptors in reverse order */ + for (i = fragments; i >= 1; i--) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1]; + np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset, frag->size, + PCI_DMA_TODEVICE); + + if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { + np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); + np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (frag->size-1) | np->tx_flags | tx_flags_extra); + } else { + np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; + np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; + np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (frag->size-1) | np->tx_flags | tx_flags_extra); + } + + nr = (nr - 1) % TX_RING; + + if (np->desc_ver == DESC_VER_1) + tx_flags_extra &= ~NV_TX_LASTPACKET; + else + tx_flags_extra &= ~NV_TX2_LASTPACKET; + } + } + +#ifdef NETIF_F_TSO + if (skb_shinfo(skb)->tso_size) + tx_flags_extra |= NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT); else - np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags ); - dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n", - dev->name, np->next_tx); +#endif + tx_flags_extra |= (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); + + np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len-skb->data_len, + PCI_DMA_TODEVICE); + + if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { + np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); + np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-skb->data_len-1) | np->tx_flags | tx_flags_extra); + } else { + np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; + np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; + np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-skb->data_len-1) | np->tx_flags | tx_flags_extra); + } + + dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission. tx_flags_extra: %x\n", + dev->name, np->next_tx, tx_flags_extra); { int j; for (j=0; j<64; j++) { @@ -991,15 +1068,13 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) dprintk("\n"); } - np->next_tx++; + np->next_tx += 1 + fragments; dev->trans_start = jiffies; - if (np->next_tx - np->nic_tx >= TX_LIMIT_STOP) - netif_stop_queue(dev); spin_unlock_irq(&np->lock); - writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl); + writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); pci_push(get_hwbase(dev)); - return 0; + return NETDEV_TX_OK; } /* @@ -1009,9 +1084,10 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) */ static void nv_tx_done(struct net_device *dev) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); u32 Flags; - int i; + unsigned int i; + struct sk_buff *skb; while (np->nic_tx != np->next_tx) { i = np->nic_tx % TX_RING; @@ -1026,35 +1102,38 @@ static void nv_tx_done(struct net_device *dev) if (Flags & NV_TX_VALID) break; if (np->desc_ver == DESC_VER_1) { - if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| - NV_TX_UNDERFLOW|NV_TX_ERROR)) { - if (Flags & NV_TX_UNDERFLOW) - np->stats.tx_fifo_errors++; - if (Flags & NV_TX_CARRIERLOST) - np->stats.tx_carrier_errors++; - np->stats.tx_errors++; - } else { - np->stats.tx_packets++; - np->stats.tx_bytes += np->tx_skbuff[i]->len; + if (Flags & NV_TX_LASTPACKET) { + skb = np->tx_skbuff[i]; + if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| + NV_TX_UNDERFLOW|NV_TX_ERROR)) { + if (Flags & NV_TX_UNDERFLOW) + np->stats.tx_fifo_errors++; + if (Flags & NV_TX_CARRIERLOST) + np->stats.tx_carrier_errors++; + np->stats.tx_errors++; + } else { + np->stats.tx_packets++; + np->stats.tx_bytes += skb->len; + } + nv_release_txskb(dev, i); } } else { - if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| - NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { - if (Flags & NV_TX2_UNDERFLOW) - np->stats.tx_fifo_errors++; - if (Flags & NV_TX2_CARRIERLOST) - np->stats.tx_carrier_errors++; - np->stats.tx_errors++; - } else { - np->stats.tx_packets++; - np->stats.tx_bytes += np->tx_skbuff[i]->len; + if (Flags & NV_TX2_LASTPACKET) { + skb = np->tx_skbuff[i]; + if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| + NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { + if (Flags & NV_TX2_UNDERFLOW) + np->stats.tx_fifo_errors++; + if (Flags & NV_TX2_CARRIERLOST) + np->stats.tx_carrier_errors++; + np->stats.tx_errors++; + } else { + np->stats.tx_packets++; + np->stats.tx_bytes += skb->len; + } + nv_release_txskb(dev, i); } } - pci_unmap_single(np->pci_dev, np->tx_dma[i], - np->tx_skbuff[i]->len, - PCI_DMA_TODEVICE); - dev_kfree_skb_irq(np->tx_skbuff[i]); - np->tx_skbuff[i] = NULL; np->nic_tx++; } if (np->next_tx - np->nic_tx < TX_LIMIT_START) @@ -1067,7 +1146,7 @@ static void nv_tx_done(struct net_device *dev) */ static void nv_tx_timeout(struct net_device *dev) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, @@ -1200,7 +1279,7 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen) static void nv_rx_process(struct net_device *dev) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); u32 Flags; for (;;) { @@ -1355,7 +1434,7 @@ static void set_bufsize(struct net_device *dev) */ static int nv_change_mtu(struct net_device *dev, int new_mtu) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); int old_mtu; if (new_mtu < 64 || new_mtu > np->pkt_limit) @@ -1408,7 +1487,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), base + NvRegRingSizes); pci_push(base); - writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl); + writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); pci_push(base); /* restart rx engine */ @@ -1440,7 +1519,7 @@ static void nv_copy_mac_to_hw(struct net_device *dev) */ static int nv_set_mac_address(struct net_device *dev, void *addr) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); struct sockaddr *macaddr = (struct sockaddr*)addr; if(!is_valid_ether_addr(macaddr->sa_data)) @@ -1475,7 +1554,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr) */ static void nv_set_multicast(struct net_device *dev) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u32 addr[2]; u32 mask[2]; @@ -1535,7 +1614,7 @@ static void nv_set_multicast(struct net_device *dev) static int nv_update_linkspeed(struct net_device *dev) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); int adv, lpa; int newls = np->linkspeed; @@ -1705,7 +1784,7 @@ static void nv_link_irq(struct net_device *dev) static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) { struct net_device *dev = (struct net_device *) data; - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u32 events; int i; @@ -1777,7 +1856,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) static void nv_do_nic_poll(unsigned long data) { struct net_device *dev = (struct net_device *) data; - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); disable_irq(dev->irq); @@ -1801,7 +1880,7 @@ static void nv_poll_controller(struct net_device *dev) static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); strcpy(info->driver, "forcedeth"); strcpy(info->version, FORCEDETH_VERSION); strcpy(info->bus_info, pci_name(np->pci_dev)); @@ -1809,7 +1888,7 @@ static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); wolinfo->supported = WAKE_MAGIC; spin_lock_irq(&np->lock); @@ -1820,7 +1899,7 @@ static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); spin_lock_irq(&np->lock); @@ -2021,7 +2100,7 @@ static int nv_get_regs_len(struct net_device *dev) static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); u32 *rbuf = buf; int i; @@ -2035,7 +2114,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void static int nv_nway_reset(struct net_device *dev) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); int ret; spin_lock_irq(&np->lock); @@ -2065,11 +2144,12 @@ static struct ethtool_ops ops = { .get_regs_len = nv_get_regs_len, .get_regs = nv_get_regs, .nway_reset = nv_nway_reset, + .get_perm_addr = ethtool_op_get_perm_addr, }; static int nv_open(struct net_device *dev) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); u8 __iomem *base = get_hwbase(dev); int ret, oom, i; @@ -2114,9 +2194,9 @@ static int nv_open(struct net_device *dev) /* 5) continue setup */ writel(np->linkspeed, base + NvRegLinkSpeed); writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3); - writel(np->desc_ver, base + NvRegTxRxControl); + writel(np->txrxctl_bits, base + NvRegTxRxControl); pci_push(base); - writel(NVREG_TXRXCTL_BIT1|np->desc_ver, base + NvRegTxRxControl); + writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); @@ -2205,7 +2285,7 @@ out_drain: static int nv_close(struct net_device *dev) { - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); u8 __iomem *base; spin_lock_irq(&np->lock); @@ -2261,7 +2341,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i if (!dev) goto out; - np = get_nvpriv(dev); + np = netdev_priv(dev); np->pci_dev = pci_dev; spin_lock_init(&np->lock); SET_MODULE_OWNER(dev); @@ -2313,19 +2393,32 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i if (pci_set_dma_mask(pci_dev, 0x0000007fffffffffULL)) { printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", pci_name(pci_dev)); + } else { + dev->features |= NETIF_F_HIGHDMA; } + np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; } else if (id->driver_data & DEV_HAS_LARGEDESC) { /* packet format 2: supports jumbo frames */ np->desc_ver = DESC_VER_2; + np->txrxctl_bits = NVREG_TXRXCTL_DESC_2; } else { /* original packet format */ np->desc_ver = DESC_VER_1; + np->txrxctl_bits = NVREG_TXRXCTL_DESC_1; } np->pkt_limit = NV_PKTLIMIT_1; if (id->driver_data & DEV_HAS_LARGEDESC) np->pkt_limit = NV_PKTLIMIT_2; + if (id->driver_data & DEV_HAS_CHECKSUM) { + np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; + dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; +#ifdef NETIF_F_TSO + dev->features |= NETIF_F_TSO; +#endif + } + err = -ENOMEM; np->base = ioremap(addr, NV_PCI_REGSZ); if (!np->base) @@ -2377,8 +2470,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - if (!is_valid_ether_addr(dev->dev_addr)) { + if (!is_valid_ether_addr(dev->perm_addr)) { /* * Bad mac address. At least one bios sets the mac address * to 01:23:45:67:89:ab @@ -2403,9 +2497,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i np->wolenabled = 0; if (np->desc_ver == DESC_VER_1) { - np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID; + np->tx_flags = NV_TX_VALID; } else { - np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID; + np->tx_flags = NV_TX2_VALID; } np->irqmask = NVREG_IRQMASK_WANTED; if (id->driver_data & DEV_NEED_TIMERIRQ) @@ -2494,7 +2588,7 @@ out: static void __devexit nv_remove(struct pci_dev *pci_dev) { struct net_device *dev = pci_get_drvdata(pci_dev); - struct fe_priv *np = get_nvpriv(dev); + struct fe_priv *np = netdev_priv(dev); unregister_netdev(dev); @@ -2525,35 +2619,35 @@ static struct pci_device_id pci_tbl[] = { }, { /* nForce3 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, }, { /* nForce3 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, }, { /* nForce3 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, }, { /* nForce3 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM, }, { /* CK804 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, }, { /* CK804 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, }, { /* MCP04 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, }, { /* MCP04 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, }, { /* MCP51 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), @@ -2565,11 +2659,11 @@ static struct pci_device_id pci_tbl[] = { }, { /* MCP55 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, }, { /* MCP55 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA, }, {0,}, }; diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 6518334b9280..ae5a2ed3b264 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -29,12 +29,7 @@ * define the configuration needed by the board are defined in a * board structure in arch/ppc/platforms (though I do not * discount the possibility that other architectures could one - * day be supported. One assumption the driver currently makes - * is that the PHY is configured in such a way to advertise all - * capabilities. This is a sensible default, and on certain - * PHYs, changing this default encounters substantial errata - * issues. Future versions may remove this requirement, but for - * now, it is best for the firmware to ensure this is the case. + * day be supported. * * The Gianfar Ethernet Controller uses a ring of buffer * descriptors. The beginning is indicated by a register @@ -47,7 +42,7 @@ * corresponding bit in the IMASK register is also set (if * interrupt coalescing is active, then the interrupt may not * happen immediately, but will wait until either a set number - * of frames or amount of time have passed.). In NAPI, the + * of frames or amount of time have passed). In NAPI, the * interrupt handler will signal there is work to be done, and * exit. Without NAPI, the packet(s) will be handled * immediately. Both methods will start at the last known empty @@ -75,6 +70,7 @@ #include #include #include +#include #include #include #include @@ -97,9 +93,11 @@ #include #include #include +#include +#include #include "gianfar.h" -#include "gianfar_phy.h" +#include "gianfar_mii.h" #define TX_TIMEOUT (1*HZ) #define SKB_ALLOC_TIMEOUT 1000000 @@ -113,9 +111,8 @@ #endif const char gfar_driver_name[] = "Gianfar Ethernet"; -const char gfar_driver_version[] = "1.1"; +const char gfar_driver_version[] = "1.2"; -int startup_gfar(struct net_device *dev); static int gfar_enet_open(struct net_device *dev); static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); static void gfar_timeout(struct net_device *dev); @@ -126,17 +123,13 @@ static int gfar_set_mac_address(struct net_device *dev); static int gfar_change_mtu(struct net_device *dev, int new_mtu); static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs); static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs); -static irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs); static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs); -static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs); -static void gfar_phy_change(void *data); -static void gfar_phy_timer(unsigned long data); static void adjust_link(struct net_device *dev); static void init_registers(struct net_device *dev); static int init_phy(struct net_device *dev); static int gfar_probe(struct device *device); static int gfar_remove(struct device *device); -void free_skb_resources(struct gfar_private *priv); +static void free_skb_resources(struct gfar_private *priv); static void gfar_set_multi(struct net_device *dev); static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); #ifdef CONFIG_GFAR_NAPI @@ -144,7 +137,6 @@ static int gfar_poll(struct net_device *dev, int *budget); #endif int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); -static void gfar_phy_startup_timer(unsigned long data); static void gfar_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); static void gfar_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); @@ -162,6 +154,9 @@ int gfar_uses_fcb(struct gfar_private *priv) else return 0; } + +/* Set up the ethernet device structure, private data, + * and anything else we need before we start */ static int gfar_probe(struct device *device) { u32 tempval; @@ -175,7 +170,7 @@ static int gfar_probe(struct device *device) einfo = (struct gianfar_platform_data *) pdev->dev.platform_data; - if (einfo == NULL) { + if (NULL == einfo) { printk(KERN_ERR "gfar %d: Missing additional data!\n", pdev->id); @@ -185,7 +180,7 @@ static int gfar_probe(struct device *device) /* Create an ethernet device instance */ dev = alloc_etherdev(sizeof (*priv)); - if (dev == NULL) + if (NULL == dev) return -ENOMEM; priv = netdev_priv(dev); @@ -207,20 +202,11 @@ static int gfar_probe(struct device *device) priv->regs = (struct gfar *) ioremap(r->start, sizeof (struct gfar)); - if (priv->regs == NULL) { + if (NULL == priv->regs) { err = -ENOMEM; goto regs_fail; } - /* Set the PHY base address */ - priv->phyregs = (struct gfar *) - ioremap(einfo->phy_reg_addr, sizeof (struct gfar)); - - if (priv->phyregs == NULL) { - err = -ENOMEM; - goto phy_regs_fail; - } - spin_lock_init(&priv->lock); dev_set_drvdata(device, dev); @@ -386,12 +372,10 @@ static int gfar_probe(struct device *device) return 0; register_fail: - iounmap((void *) priv->phyregs); -phy_regs_fail: iounmap((void *) priv->regs); regs_fail: free_netdev(dev); - return -ENOMEM; + return err; } static int gfar_remove(struct device *device) @@ -402,108 +386,41 @@ static int gfar_remove(struct device *device) dev_set_drvdata(device, NULL); iounmap((void *) priv->regs); - iounmap((void *) priv->phyregs); free_netdev(dev); return 0; } -/* Configure the PHY for dev. - * returns 0 if success. -1 if failure +/* Initializes driver's PHY state, and attaches to the PHY. + * Returns 0 on success. */ static int init_phy(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - struct phy_info *curphy; - unsigned int timeout = PHY_INIT_TIMEOUT; - struct gfar *phyregs = priv->phyregs; - struct gfar_mii_info *mii_info; - int err; + uint gigabit_support = + priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? + SUPPORTED_1000baseT_Full : 0; + struct phy_device *phydev; priv->oldlink = 0; priv->oldspeed = 0; priv->oldduplex = -1; - mii_info = kmalloc(sizeof(struct gfar_mii_info), - GFP_KERNEL); + phydev = phy_connect(dev, priv->einfo->bus_id, &adjust_link, 0); - if(NULL == mii_info) { - if (netif_msg_ifup(priv)) - printk(KERN_ERR "%s: Could not allocate mii_info\n", - dev->name); - return -ENOMEM; + if (IS_ERR(phydev)) { + printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); + return PTR_ERR(phydev); } - mii_info->speed = SPEED_1000; - mii_info->duplex = DUPLEX_FULL; - mii_info->pause = 0; - mii_info->link = 1; + /* Remove any features not supported by the controller */ + phydev->supported &= (GFAR_SUPPORTED | gigabit_support); + phydev->advertising = phydev->supported; - mii_info->advertising = (ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Full); - mii_info->autoneg = 1; - - spin_lock_init(&mii_info->mdio_lock); - - mii_info->mii_id = priv->einfo->phyid; - - mii_info->dev = dev; - - mii_info->mdio_read = &read_phy_reg; - mii_info->mdio_write = &write_phy_reg; - - priv->mii_info = mii_info; - - /* Reset the management interface */ - gfar_write(&phyregs->miimcfg, MIIMCFG_RESET); - - /* Setup the MII Mgmt clock speed */ - gfar_write(&phyregs->miimcfg, MIIMCFG_INIT_VALUE); - - /* Wait until the bus is free */ - while ((gfar_read(&phyregs->miimind) & MIIMIND_BUSY) && - timeout--) - cpu_relax(); - - if(timeout <= 0) { - printk(KERN_ERR "%s: The MII Bus is stuck!\n", - dev->name); - err = -1; - goto bus_fail; - } - - /* get info for this PHY */ - curphy = get_phy_info(priv->mii_info); - - if (curphy == NULL) { - if (netif_msg_ifup(priv)) - printk(KERN_ERR "%s: No PHY found\n", dev->name); - err = -1; - goto no_phy; - } - - mii_info->phyinfo = curphy; - - /* Run the commands which initialize the PHY */ - if(curphy->init) { - err = curphy->init(priv->mii_info); - - if (err) - goto phy_init_fail; - } + priv->phydev = phydev; return 0; - -phy_init_fail: -no_phy: -bus_fail: - kfree(mii_info); - - return err; } static void init_registers(struct net_device *dev) @@ -603,24 +520,13 @@ void stop_gfar(struct net_device *dev) struct gfar *regs = priv->regs; unsigned long flags; + phy_stop(priv->phydev); + /* Lock it down */ spin_lock_irqsave(&priv->lock, flags); - /* Tell the kernel the link is down */ - priv->mii_info->link = 0; - adjust_link(dev); - gfar_halt(dev); - if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) { - /* Clear any pending interrupts */ - mii_clear_phy_interrupt(priv->mii_info); - - /* Disable PHY Interrupts */ - mii_configure_phy_interrupt(priv->mii_info, - MII_INTERRUPT_DISABLED); - } - spin_unlock_irqrestore(&priv->lock, flags); /* Free the IRQs */ @@ -629,13 +535,7 @@ void stop_gfar(struct net_device *dev) free_irq(priv->interruptTransmit, dev); free_irq(priv->interruptReceive, dev); } else { - free_irq(priv->interruptTransmit, dev); - } - - if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) { - free_irq(priv->einfo->interruptPHY, dev); - } else { - del_timer_sync(&priv->phy_info_timer); + free_irq(priv->interruptTransmit, dev); } free_skb_resources(priv); @@ -649,7 +549,7 @@ void stop_gfar(struct net_device *dev) /* If there are any tx skbs or rx skbs still around, free them. * Then free tx_skbuff and rx_skbuff */ -void free_skb_resources(struct gfar_private *priv) +static void free_skb_resources(struct gfar_private *priv) { struct rxbd8 *rxbdp; struct txbd8 *txbdp; @@ -770,7 +670,7 @@ int startup_gfar(struct net_device *dev) (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * priv->tx_ring_size, GFP_KERNEL); - if (priv->tx_skbuff == NULL) { + if (NULL == priv->tx_skbuff) { if (netif_msg_ifup(priv)) printk(KERN_ERR "%s: Could not allocate tx_skbuff\n", dev->name); @@ -785,7 +685,7 @@ int startup_gfar(struct net_device *dev) (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * priv->rx_ring_size, GFP_KERNEL); - if (priv->rx_skbuff == NULL) { + if (NULL == priv->rx_skbuff) { if (netif_msg_ifup(priv)) printk(KERN_ERR "%s: Could not allocate rx_skbuff\n", dev->name); @@ -879,13 +779,7 @@ int startup_gfar(struct net_device *dev) } } - /* Set up the PHY change work queue */ - INIT_WORK(&priv->tq, gfar_phy_change, dev); - - init_timer(&priv->phy_info_timer); - priv->phy_info_timer.function = &gfar_phy_startup_timer; - priv->phy_info_timer.data = (unsigned long) priv->mii_info; - mod_timer(&priv->phy_info_timer, jiffies + HZ); + phy_start(priv->phydev); /* Configure the coalescing support */ if (priv->txcoalescing) @@ -933,11 +827,6 @@ tx_skb_fail: priv->tx_bd_base, gfar_read(®s->tbase0)); - if (priv->mii_info->phyinfo->close) - priv->mii_info->phyinfo->close(priv->mii_info); - - kfree(priv->mii_info); - return err; } @@ -1035,7 +924,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) txbdp->status &= TXBD_WRAP; /* Set up checksumming */ - if ((dev->features & NETIF_F_IP_CSUM) + if ((dev->features & NETIF_F_IP_CSUM) && (CHECKSUM_HW == skb->ip_summed)) { fcb = gfar_add_fcb(skb, txbdp); gfar_tx_checksum(skb, fcb); @@ -1103,11 +992,9 @@ static int gfar_close(struct net_device *dev) struct gfar_private *priv = netdev_priv(dev); stop_gfar(dev); - /* Shutdown the PHY */ - if (priv->mii_info->phyinfo->close) - priv->mii_info->phyinfo->close(priv->mii_info); - - kfree(priv->mii_info); + /* Disconnect from the PHY */ + phy_disconnect(priv->phydev); + priv->phydev = NULL; netif_stop_queue(dev); @@ -1343,7 +1230,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) while ((!skb) && timeout--) skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT); - if (skb == NULL) + if (NULL == skb) return NULL; /* We need the data buffer to be aligned properly. We will reserve @@ -1490,7 +1377,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, struct gfar_private *priv = netdev_priv(dev); struct rxfcb *fcb = NULL; - if (skb == NULL) { + if (NULL == skb) { if (netif_msg_rx_err(priv)) printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name); priv->stats.rx_dropped++; @@ -1718,131 +1605,9 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs) return IRQ_HANDLED; } -static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs) -{ - struct net_device *dev = (struct net_device *) dev_id; - struct gfar_private *priv = netdev_priv(dev); - - /* Clear the interrupt */ - mii_clear_phy_interrupt(priv->mii_info); - - /* Disable PHY interrupts */ - mii_configure_phy_interrupt(priv->mii_info, - MII_INTERRUPT_DISABLED); - - /* Schedule the phy change */ - schedule_work(&priv->tq); - - return IRQ_HANDLED; -} - -/* Scheduled by the phy_interrupt/timer to handle PHY changes */ -static void gfar_phy_change(void *data) -{ - struct net_device *dev = (struct net_device *) data; - struct gfar_private *priv = netdev_priv(dev); - int result = 0; - - /* Delay to give the PHY a chance to change the - * register state */ - msleep(1); - - /* Update the link, speed, duplex */ - result = priv->mii_info->phyinfo->read_status(priv->mii_info); - - /* Adjust the known status as long as the link - * isn't still coming up */ - if((0 == result) || (priv->mii_info->link == 0)) - adjust_link(dev); - - /* Reenable interrupts, if needed */ - if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) - mii_configure_phy_interrupt(priv->mii_info, - MII_INTERRUPT_ENABLED); -} - -/* Called every so often on systems that don't interrupt - * the core for PHY changes */ -static void gfar_phy_timer(unsigned long data) -{ - struct net_device *dev = (struct net_device *) data; - struct gfar_private *priv = netdev_priv(dev); - - schedule_work(&priv->tq); - - mod_timer(&priv->phy_info_timer, jiffies + - GFAR_PHY_CHANGE_TIME * HZ); -} - -/* Keep trying aneg for some time - * If, after GFAR_AN_TIMEOUT seconds, it has not - * finished, we switch to forced. - * Either way, once the process has completed, we either - * request the interrupt, or switch the timer over to - * using gfar_phy_timer to check status */ -static void gfar_phy_startup_timer(unsigned long data) -{ - int result; - static int secondary = GFAR_AN_TIMEOUT; - struct gfar_mii_info *mii_info = (struct gfar_mii_info *)data; - struct gfar_private *priv = netdev_priv(mii_info->dev); - - /* Configure the Auto-negotiation */ - result = mii_info->phyinfo->config_aneg(mii_info); - - /* If autonegotiation failed to start, and - * we haven't timed out, reset the timer, and return */ - if (result && secondary--) { - mod_timer(&priv->phy_info_timer, jiffies + HZ); - return; - } else if (result) { - /* Couldn't start autonegotiation. - * Try switching to forced */ - mii_info->autoneg = 0; - result = mii_info->phyinfo->config_aneg(mii_info); - - /* Forcing failed! Give up */ - if(result) { - if (netif_msg_link(priv)) - printk(KERN_ERR "%s: Forcing failed!\n", - mii_info->dev->name); - return; - } - } - - /* Kill the timer so it can be restarted */ - del_timer_sync(&priv->phy_info_timer); - - /* Grab the PHY interrupt, if necessary/possible */ - if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) { - if (request_irq(priv->einfo->interruptPHY, - phy_interrupt, - SA_SHIRQ, - "phy_interrupt", - mii_info->dev) < 0) { - if (netif_msg_intr(priv)) - printk(KERN_ERR "%s: Can't get IRQ %d (PHY)\n", - mii_info->dev->name, - priv->einfo->interruptPHY); - } else { - mii_configure_phy_interrupt(priv->mii_info, - MII_INTERRUPT_ENABLED); - return; - } - } - - /* Start the timer again, this time in order to - * handle a change in status */ - init_timer(&priv->phy_info_timer); - priv->phy_info_timer.function = &gfar_phy_timer; - priv->phy_info_timer.data = (unsigned long) mii_info->dev; - mod_timer(&priv->phy_info_timer, jiffies + - GFAR_PHY_CHANGE_TIME * HZ); -} - /* Called every time the controller might need to be made * aware of new link state. The PHY code conveys this - * information through variables in the priv structure, and this + * information through variables in the phydev structure, and this * function converts those variables into the appropriate * register values, and can bring down the device if needed. */ @@ -1850,84 +1615,68 @@ static void adjust_link(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); struct gfar *regs = priv->regs; - u32 tempval; - struct gfar_mii_info *mii_info = priv->mii_info; + unsigned long flags; + struct phy_device *phydev = priv->phydev; + int new_state = 0; + + spin_lock_irqsave(&priv->lock, flags); + if (phydev->link) { + u32 tempval = gfar_read(®s->maccfg2); - if (mii_info->link) { /* Now we make sure that we can be in full duplex mode. * If not, we operate in half-duplex mode. */ - if (mii_info->duplex != priv->oldduplex) { - if (!(mii_info->duplex)) { - tempval = gfar_read(®s->maccfg2); + if (phydev->duplex != priv->oldduplex) { + new_state = 1; + if (!(phydev->duplex)) tempval &= ~(MACCFG2_FULL_DUPLEX); - gfar_write(®s->maccfg2, tempval); - - if (netif_msg_link(priv)) - printk(KERN_INFO "%s: Half Duplex\n", - dev->name); - } else { - tempval = gfar_read(®s->maccfg2); + else tempval |= MACCFG2_FULL_DUPLEX; - gfar_write(®s->maccfg2, tempval); - if (netif_msg_link(priv)) - printk(KERN_INFO "%s: Full Duplex\n", - dev->name); - } - - priv->oldduplex = mii_info->duplex; + priv->oldduplex = phydev->duplex; } - if (mii_info->speed != priv->oldspeed) { - switch (mii_info->speed) { + if (phydev->speed != priv->oldspeed) { + new_state = 1; + switch (phydev->speed) { case 1000: - tempval = gfar_read(®s->maccfg2); tempval = ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); - gfar_write(®s->maccfg2, tempval); break; case 100: case 10: - tempval = gfar_read(®s->maccfg2); tempval = ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); - gfar_write(®s->maccfg2, tempval); break; default: if (netif_msg_link(priv)) printk(KERN_WARNING - "%s: Ack! Speed (%d) is not 10/100/1000!\n", - dev->name, mii_info->speed); + "%s: Ack! Speed (%d) is not 10/100/1000!\n", + dev->name, phydev->speed); break; } - if (netif_msg_link(priv)) - printk(KERN_INFO "%s: Speed %dBT\n", dev->name, - mii_info->speed); - - priv->oldspeed = mii_info->speed; + priv->oldspeed = phydev->speed; } + gfar_write(®s->maccfg2, tempval); + if (!priv->oldlink) { - if (netif_msg_link(priv)) - printk(KERN_INFO "%s: Link is up\n", dev->name); + new_state = 1; priv->oldlink = 1; - netif_carrier_on(dev); netif_schedule(dev); } - } else { - if (priv->oldlink) { - if (netif_msg_link(priv)) - printk(KERN_INFO "%s: Link is down\n", - dev->name); - priv->oldlink = 0; - priv->oldspeed = 0; - priv->oldduplex = -1; - netif_carrier_off(dev); - } + } else if (priv->oldlink) { + new_state = 1; + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; } -} + if (new_state && netif_msg_link(priv)) + phy_print_status(phydev); + + spin_unlock_irqrestore(&priv->lock, flags); +} /* Update the hash table based on the current list of multicast * addresses we subscribe to. Also, change the promiscuity of @@ -2122,12 +1871,23 @@ static struct device_driver gfar_driver = { static int __init gfar_init(void) { - return driver_register(&gfar_driver); + int err = gfar_mdio_init(); + + if (err) + return err; + + err = driver_register(&gfar_driver); + + if (err) + gfar_mdio_exit(); + + return err; } static void __exit gfar_exit(void) { driver_unregister(&gfar_driver); + gfar_mdio_exit(); } module_init(gfar_init); diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index 28af087d9fbb..c77ca6c0d04a 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h @@ -17,7 +17,6 @@ * * Still left to do: * -Add support for module parameters - * -Add support for ethtool -s * -Add patch for ethtool phys id */ #ifndef __GIANFAR_H @@ -37,7 +36,8 @@ #include #include #include -#include +#include +#include #include #include @@ -48,7 +48,8 @@ #include #include #include -#include "gianfar_phy.h" +#include +#include "gianfar_mii.h" /* The maximum number of packets to be handled in one call of gfar_poll */ #define GFAR_DEV_WEIGHT 64 @@ -73,7 +74,7 @@ #define PHY_INIT_TIMEOUT 100000 #define GFAR_PHY_CHANGE_TIME 2 -#define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.1, " +#define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.2, " #define DRV_NAME "gfar-enet" extern const char gfar_driver_name[]; extern const char gfar_driver_version[]; @@ -578,12 +579,7 @@ struct gfar { u32 hafdup; /* 0x.50c - Half Duplex Register */ u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */ u8 res18[12]; - u32 miimcfg; /* 0x.520 - MII Management Configuration Register */ - u32 miimcom; /* 0x.524 - MII Management Command Register */ - u32 miimadd; /* 0x.528 - MII Management Address Register */ - u32 miimcon; /* 0x.52c - MII Management Control Register */ - u32 miimstat; /* 0x.530 - MII Management Status Register */ - u32 miimind; /* 0x.534 - MII Management Indicator Register */ + u8 gfar_mii_regs[24]; /* See gianfar_phy.h */ u8 res19[4]; u32 ifstat; /* 0x.53c - Interface Status Register */ u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */ @@ -688,9 +684,6 @@ struct gfar_private { struct gfar *regs; /* Pointer to the GFAR memory mapped Registers */ u32 *hash_regs[16]; int hash_width; - struct gfar *phyregs; - struct work_struct tq; - struct timer_list phy_info_timer; struct net_device_stats stats; /* linux network statistics */ struct gfar_extra_stats extra_stats; spinlock_t lock; @@ -710,7 +703,8 @@ struct gfar_private { unsigned int interruptError; struct gianfar_platform_data *einfo; - struct gfar_mii_info *mii_info; + struct phy_device *phydev; + struct mii_bus *mii_bus; int oldspeed; int oldduplex; int oldlink; @@ -732,4 +726,12 @@ extern inline void gfar_write(volatile unsigned *addr, u32 val) extern struct ethtool_ops *gfar_op_array[]; +extern irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs); +extern int startup_gfar(struct net_device *dev); +extern void stop_gfar(struct net_device *dev); +extern void gfar_halt(struct net_device *dev); +extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, + int enable, u32 regnum, u32 read); +void gfar_setup_stashing(struct net_device *dev); + #endif /* __GIANFAR_H */ diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index a451de629197..68e3578e7613 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c @@ -39,17 +39,18 @@ #include #include #include +#include +#include #include "gianfar.h" #define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0)) -extern int startup_gfar(struct net_device *dev); -extern void stop_gfar(struct net_device *dev); -extern void gfar_halt(struct net_device *dev); extern void gfar_start(struct net_device *dev); extern int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); +#define GFAR_MAX_COAL_USECS 0xffff +#define GFAR_MAX_COAL_FRAMES 0xff static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf); static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf); @@ -182,38 +183,32 @@ static void gfar_gdrvinfo(struct net_device *dev, struct drvinfo->eedump_len = 0; } + +static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct gfar_private *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + + if (NULL == phydev) + return -ENODEV; + + return phy_ethtool_sset(phydev, cmd); +} + + /* Return the current settings in the ethtool_cmd structure */ static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd) { struct gfar_private *priv = netdev_priv(dev); - uint gigabit_support = - priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? - SUPPORTED_1000baseT_Full : 0; - uint gigabit_advert = - priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? - ADVERTISED_1000baseT_Full: 0; + struct phy_device *phydev = priv->phydev; - cmd->supported = (SUPPORTED_10baseT_Half - | SUPPORTED_100baseT_Half - | SUPPORTED_100baseT_Full - | gigabit_support | SUPPORTED_Autoneg); - - /* For now, we always advertise everything */ - cmd->advertising = (ADVERTISED_10baseT_Half - | ADVERTISED_100baseT_Half - | ADVERTISED_100baseT_Full - | gigabit_advert | ADVERTISED_Autoneg); - - cmd->speed = priv->mii_info->speed; - cmd->duplex = priv->mii_info->duplex; - cmd->port = PORT_MII; - cmd->phy_address = priv->mii_info->mii_id; - cmd->transceiver = XCVR_EXTERNAL; - cmd->autoneg = AUTONEG_ENABLE; + if (NULL == phydev) + return -ENODEV; + cmd->maxtxpkt = priv->txcount; cmd->maxrxpkt = priv->rxcount; - return 0; + return phy_ethtool_gset(phydev, cmd); } /* Return the length of the register structure */ @@ -241,14 +236,14 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int use unsigned int count; /* The timer is different, depending on the interface speed */ - switch (priv->mii_info->speed) { - case 1000: + switch (priv->phydev->speed) { + case SPEED_1000: count = GFAR_GBIT_TIME; break; - case 100: + case SPEED_100: count = GFAR_100_TIME; break; - case 10: + case SPEED_10: default: count = GFAR_10_TIME; break; @@ -265,14 +260,14 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic unsigned int count; /* The timer is different, depending on the interface speed */ - switch (priv->mii_info->speed) { - case 1000: + switch (priv->phydev->speed) { + case SPEED_1000: count = GFAR_GBIT_TIME; break; - case 100: + case SPEED_100: count = GFAR_100_TIME; break; - case 10: + case SPEED_10: default: count = GFAR_10_TIME; break; @@ -292,6 +287,9 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) return -EOPNOTSUPP; + if (NULL == priv->phydev) + return -ENODEV; + cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, priv->rxtime); cvals->rx_max_coalesced_frames = priv->rxcount; @@ -348,6 +346,22 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals else priv->rxcoalescing = 1; + if (NULL == priv->phydev) + return -ENODEV; + + /* Check the bounds of the values */ + if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) { + pr_info("Coalescing is limited to %d microseconds\n", + GFAR_MAX_COAL_USECS); + return -EINVAL; + } + + if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { + pr_info("Coalescing is limited to %d frames\n", + GFAR_MAX_COAL_FRAMES); + return -EINVAL; + } + priv->rxtime = gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs); priv->rxcount = cvals->rx_max_coalesced_frames; @@ -358,6 +372,19 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals else priv->txcoalescing = 1; + /* Check the bounds of the values */ + if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) { + pr_info("Coalescing is limited to %d microseconds\n", + GFAR_MAX_COAL_USECS); + return -EINVAL; + } + + if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) { + pr_info("Coalescing is limited to %d frames\n", + GFAR_MAX_COAL_FRAMES); + return -EINVAL; + } + priv->txtime = gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs); priv->txcount = cvals->tx_max_coalesced_frames; @@ -536,6 +563,7 @@ static void gfar_set_msglevel(struct net_device *dev, uint32_t data) struct ethtool_ops gfar_ethtool_ops = { .get_settings = gfar_gsettings, + .set_settings = gfar_ssettings, .get_drvinfo = gfar_gdrvinfo, .get_regs_len = gfar_reglen, .get_regs = gfar_get_regs, diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c new file mode 100644 index 000000000000..1eca1dbca7f1 --- /dev/null +++ b/drivers/net/gianfar_mii.c @@ -0,0 +1,219 @@ +/* + * drivers/net/gianfar_mii.c + * + * Gianfar Ethernet Driver -- MIIM bus implementation + * Provides Bus interface for MIIM regs + * + * Author: Andy Fleming + * Maintainer: Kumar Gala (kumar.gala@freescale.com) + * + * Copyright (c) 2002-2004 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "gianfar.h" +#include "gianfar_mii.h" + +/* Write value to the PHY at mii_id at register regnum, + * on the bus, waiting until the write is done before returning. + * All PHY configuration is done through the TSEC1 MIIM regs */ +int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) +{ + struct gfar_mii *regs = bus->priv; + + /* Set the PHY address and the register address we want to write */ + gfar_write(®s->miimadd, (mii_id << 8) | regnum); + + /* Write out the value we want */ + gfar_write(®s->miimcon, value); + + /* Wait for the transaction to finish */ + while (gfar_read(®s->miimind) & MIIMIND_BUSY) + cpu_relax(); + + return 0; +} + +/* Read the bus for PHY at addr mii_id, register regnum, and + * return the value. Clears miimcom first. All PHY + * configuration has to be done through the TSEC1 MIIM regs */ +int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct gfar_mii *regs = bus->priv; + u16 value; + + /* Set the PHY address and the register address we want to read */ + gfar_write(®s->miimadd, (mii_id << 8) | regnum); + + /* Clear miimcom, and then initiate a read */ + gfar_write(®s->miimcom, 0); + gfar_write(®s->miimcom, MII_READ_COMMAND); + + /* Wait for the transaction to finish */ + while (gfar_read(®s->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY)) + cpu_relax(); + + /* Grab the value of the register from miimstat */ + value = gfar_read(®s->miimstat); + + return value; +} + + +/* Reset the MIIM registers, and wait for the bus to free */ +int gfar_mdio_reset(struct mii_bus *bus) +{ + struct gfar_mii *regs = bus->priv; + unsigned int timeout = PHY_INIT_TIMEOUT; + + spin_lock_bh(&bus->mdio_lock); + + /* Reset the management interface */ + gfar_write(®s->miimcfg, MIIMCFG_RESET); + + /* Setup the MII Mgmt clock speed */ + gfar_write(®s->miimcfg, MIIMCFG_INIT_VALUE); + + /* Wait until the bus is free */ + while ((gfar_read(®s->miimind) & MIIMIND_BUSY) && + timeout--) + cpu_relax(); + + spin_unlock_bh(&bus->mdio_lock); + + if(timeout <= 0) { + printk(KERN_ERR "%s: The MII Bus is stuck!\n", + bus->name); + return -EBUSY; + } + + return 0; +} + + +int gfar_mdio_probe(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct gianfar_mdio_data *pdata; + struct gfar_mii *regs; + struct mii_bus *new_bus; + int err = 0; + + if (NULL == dev) + return -EINVAL; + + new_bus = kmalloc(sizeof(struct mii_bus), GFP_KERNEL); + + if (NULL == new_bus) + return -ENOMEM; + + new_bus->name = "Gianfar MII Bus", + new_bus->read = &gfar_mdio_read, + new_bus->write = &gfar_mdio_write, + new_bus->reset = &gfar_mdio_reset, + new_bus->id = pdev->id; + + pdata = (struct gianfar_mdio_data *)pdev->dev.platform_data; + + if (NULL == pdata) { + printk(KERN_ERR "gfar mdio %d: Missing platform data!\n", pdev->id); + return -ENODEV; + } + + /* Set the PHY base address */ + regs = (struct gfar_mii *) ioremap(pdata->paddr, + sizeof (struct gfar_mii)); + + if (NULL == regs) { + err = -ENOMEM; + goto reg_map_fail; + } + + new_bus->priv = regs; + + new_bus->irq = pdata->irq; + + new_bus->dev = dev; + dev_set_drvdata(dev, new_bus); + + err = mdiobus_register(new_bus); + + if (0 != err) { + printk (KERN_ERR "%s: Cannot register as MDIO bus\n", + new_bus->name); + goto bus_register_fail; + } + + return 0; + +bus_register_fail: + iounmap((void *) regs); +reg_map_fail: + kfree(new_bus); + + return err; +} + + +int gfar_mdio_remove(struct device *dev) +{ + struct mii_bus *bus = dev_get_drvdata(dev); + + mdiobus_unregister(bus); + + dev_set_drvdata(dev, NULL); + + iounmap((void *) (&bus->priv)); + bus->priv = NULL; + kfree(bus); + + return 0; +} + +static struct device_driver gianfar_mdio_driver = { + .name = "fsl-gianfar_mdio", + .bus = &platform_bus_type, + .probe = gfar_mdio_probe, + .remove = gfar_mdio_remove, +}; + +int __init gfar_mdio_init(void) +{ + return driver_register(&gianfar_mdio_driver); +} + +void __exit gfar_mdio_exit(void) +{ + driver_unregister(&gianfar_mdio_driver); +} diff --git a/drivers/net/gianfar_mii.h b/drivers/net/gianfar_mii.h new file mode 100644 index 000000000000..56e5665d5c9b --- /dev/null +++ b/drivers/net/gianfar_mii.h @@ -0,0 +1,45 @@ +/* + * drivers/net/gianfar_mii.h + * + * Gianfar Ethernet Driver -- MII Management Bus Implementation + * Driver for the MDIO bus controller in the Gianfar register space + * + * Author: Andy Fleming + * Maintainer: Kumar Gala (kumar.gala@freescale.com) + * + * Copyright (c) 2002-2004 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __GIANFAR_MII_H +#define __GIANFAR_MII_H + +#define MIIMIND_BUSY 0x00000001 +#define MIIMIND_NOTVALID 0x00000004 + +#define MII_READ_COMMAND 0x00000001 + +#define GFAR_SUPPORTED (SUPPORTED_10baseT_Half \ + | SUPPORTED_100baseT_Half \ + | SUPPORTED_100baseT_Full \ + | SUPPORTED_Autoneg \ + | SUPPORTED_MII) + +struct gfar_mii { + u32 miimcfg; /* 0x.520 - MII Management Config Register */ + u32 miimcom; /* 0x.524 - MII Management Command Register */ + u32 miimadd; /* 0x.528 - MII Management Address Register */ + u32 miimcon; /* 0x.52c - MII Management Control Register */ + u32 miimstat; /* 0x.530 - MII Management Status Register */ + u32 miimind; /* 0x.534 - MII Management Indicator Register */ +}; + +int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum); +int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); +int __init gfar_mdio_init(void); +void __exit gfar_mdio_exit(void); +#endif /* GIANFAR_PHY_H */ diff --git a/drivers/net/gianfar_phy.c b/drivers/net/gianfar_phy.c deleted file mode 100644 index 7c965f268a82..000000000000 --- a/drivers/net/gianfar_phy.c +++ /dev/null @@ -1,661 +0,0 @@ -/* - * drivers/net/gianfar_phy.c - * - * Gianfar Ethernet Driver -- PHY handling - * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560 - * Based on 8260_io/fcc_enet.c - * - * Author: Andy Fleming - * Maintainer: Kumar Gala (kumar.gala@freescale.com) - * - * Copyright (c) 2002-2004 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "gianfar.h" -#include "gianfar_phy.h" - -static void config_genmii_advert(struct gfar_mii_info *mii_info); -static void genmii_setup_forced(struct gfar_mii_info *mii_info); -static void genmii_restart_aneg(struct gfar_mii_info *mii_info); -static int gbit_config_aneg(struct gfar_mii_info *mii_info); -static int genmii_config_aneg(struct gfar_mii_info *mii_info); -static int genmii_update_link(struct gfar_mii_info *mii_info); -static int genmii_read_status(struct gfar_mii_info *mii_info); -u16 phy_read(struct gfar_mii_info *mii_info, u16 regnum); -void phy_write(struct gfar_mii_info *mii_info, u16 regnum, u16 val); - -/* Write value to the PHY for this device to the register at regnum, */ -/* waiting until the write is done before it returns. All PHY */ -/* configuration has to be done through the TSEC1 MIIM regs */ -void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value) -{ - struct gfar_private *priv = netdev_priv(dev); - struct gfar *regbase = priv->phyregs; - - /* Set the PHY address and the register address we want to write */ - gfar_write(®base->miimadd, (mii_id << 8) | regnum); - - /* Write out the value we want */ - gfar_write(®base->miimcon, value); - - /* Wait for the transaction to finish */ - while (gfar_read(®base->miimind) & MIIMIND_BUSY) - cpu_relax(); -} - -/* Reads from register regnum in the PHY for device dev, */ -/* returning the value. Clears miimcom first. All PHY */ -/* configuration has to be done through the TSEC1 MIIM regs */ -int read_phy_reg(struct net_device *dev, int mii_id, int regnum) -{ - struct gfar_private *priv = netdev_priv(dev); - struct gfar *regbase = priv->phyregs; - u16 value; - - /* Set the PHY address and the register address we want to read */ - gfar_write(®base->miimadd, (mii_id << 8) | regnum); - - /* Clear miimcom, and then initiate a read */ - gfar_write(®base->miimcom, 0); - gfar_write(®base->miimcom, MII_READ_COMMAND); - - /* Wait for the transaction to finish */ - while (gfar_read(®base->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY)) - cpu_relax(); - - /* Grab the value of the register from miimstat */ - value = gfar_read(®base->miimstat); - - return value; -} - -void mii_clear_phy_interrupt(struct gfar_mii_info *mii_info) -{ - if(mii_info->phyinfo->ack_interrupt) - mii_info->phyinfo->ack_interrupt(mii_info); -} - - -void mii_configure_phy_interrupt(struct gfar_mii_info *mii_info, u32 interrupts) -{ - mii_info->interrupts = interrupts; - if(mii_info->phyinfo->config_intr) - mii_info->phyinfo->config_intr(mii_info); -} - - -/* Writes MII_ADVERTISE with the appropriate values, after - * sanitizing advertise to make sure only supported features - * are advertised - */ -static void config_genmii_advert(struct gfar_mii_info *mii_info) -{ - u32 advertise; - u16 adv; - - /* Only allow advertising what this PHY supports */ - mii_info->advertising &= mii_info->phyinfo->features; - advertise = mii_info->advertising; - - /* Setup standard advertisement */ - adv = phy_read(mii_info, MII_ADVERTISE); - adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); - if (advertise & ADVERTISED_10baseT_Half) - adv |= ADVERTISE_10HALF; - if (advertise & ADVERTISED_10baseT_Full) - adv |= ADVERTISE_10FULL; - if (advertise & ADVERTISED_100baseT_Half) - adv |= ADVERTISE_100HALF; - if (advertise & ADVERTISED_100baseT_Full) - adv |= ADVERTISE_100FULL; - phy_write(mii_info, MII_ADVERTISE, adv); -} - -static void genmii_setup_forced(struct gfar_mii_info *mii_info) -{ - u16 ctrl; - u32 features = mii_info->phyinfo->features; - - ctrl = phy_read(mii_info, MII_BMCR); - - ctrl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPEED1000|BMCR_ANENABLE); - ctrl |= BMCR_RESET; - - switch(mii_info->speed) { - case SPEED_1000: - if(features & (SUPPORTED_1000baseT_Half - | SUPPORTED_1000baseT_Full)) { - ctrl |= BMCR_SPEED1000; - break; - } - mii_info->speed = SPEED_100; - case SPEED_100: - if (features & (SUPPORTED_100baseT_Half - | SUPPORTED_100baseT_Full)) { - ctrl |= BMCR_SPEED100; - break; - } - mii_info->speed = SPEED_10; - case SPEED_10: - if (features & (SUPPORTED_10baseT_Half - | SUPPORTED_10baseT_Full)) - break; - default: /* Unsupported speed! */ - printk(KERN_ERR "%s: Bad speed!\n", - mii_info->dev->name); - break; - } - - phy_write(mii_info, MII_BMCR, ctrl); -} - - -/* Enable and Restart Autonegotiation */ -static void genmii_restart_aneg(struct gfar_mii_info *mii_info) -{ - u16 ctl; - - ctl = phy_read(mii_info, MII_BMCR); - ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); - phy_write(mii_info, MII_BMCR, ctl); -} - - -static int gbit_config_aneg(struct gfar_mii_info *mii_info) -{ - u16 adv; - u32 advertise; - - if(mii_info->autoneg) { - /* Configure the ADVERTISE register */ - config_genmii_advert(mii_info); - advertise = mii_info->advertising; - - adv = phy_read(mii_info, MII_1000BASETCONTROL); - adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP | - MII_1000BASETCONTROL_HALFDUPLEXCAP); - if (advertise & SUPPORTED_1000baseT_Half) - adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP; - if (advertise & SUPPORTED_1000baseT_Full) - adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP; - phy_write(mii_info, MII_1000BASETCONTROL, adv); - - /* Start/Restart aneg */ - genmii_restart_aneg(mii_info); - } else - genmii_setup_forced(mii_info); - - return 0; -} - -static int marvell_config_aneg(struct gfar_mii_info *mii_info) -{ - /* The Marvell PHY has an errata which requires - * that certain registers get written in order - * to restart autonegotiation */ - phy_write(mii_info, MII_BMCR, BMCR_RESET); - - phy_write(mii_info, 0x1d, 0x1f); - phy_write(mii_info, 0x1e, 0x200c); - phy_write(mii_info, 0x1d, 0x5); - phy_write(mii_info, 0x1e, 0); - phy_write(mii_info, 0x1e, 0x100); - - gbit_config_aneg(mii_info); - - return 0; -} -static int genmii_config_aneg(struct gfar_mii_info *mii_info) -{ - if (mii_info->autoneg) { - config_genmii_advert(mii_info); - genmii_restart_aneg(mii_info); - } else - genmii_setup_forced(mii_info); - - return 0; -} - - -static int genmii_update_link(struct gfar_mii_info *mii_info) -{ - u16 status; - - /* Do a fake read */ - phy_read(mii_info, MII_BMSR); - - /* Read link and autonegotiation status */ - status = phy_read(mii_info, MII_BMSR); - if ((status & BMSR_LSTATUS) == 0) - mii_info->link = 0; - else - mii_info->link = 1; - - /* If we are autonegotiating, and not done, - * return an error */ - if (mii_info->autoneg && !(status & BMSR_ANEGCOMPLETE)) - return -EAGAIN; - - return 0; -} - -static int genmii_read_status(struct gfar_mii_info *mii_info) -{ - u16 status; - int err; - - /* Update the link, but return if there - * was an error */ - err = genmii_update_link(mii_info); - if (err) - return err; - - if (mii_info->autoneg) { - status = phy_read(mii_info, MII_LPA); - - if (status & (LPA_10FULL | LPA_100FULL)) - mii_info->duplex = DUPLEX_FULL; - else - mii_info->duplex = DUPLEX_HALF; - if (status & (LPA_100FULL | LPA_100HALF)) - mii_info->speed = SPEED_100; - else - mii_info->speed = SPEED_10; - mii_info->pause = 0; - } - /* On non-aneg, we assume what we put in BMCR is the speed, - * though magic-aneg shouldn't prevent this case from occurring - */ - - return 0; -} -static int marvell_read_status(struct gfar_mii_info *mii_info) -{ - u16 status; - int err; - - /* Update the link, but return if there - * was an error */ - err = genmii_update_link(mii_info); - if (err) - return err; - - /* If the link is up, read the speed and duplex */ - /* If we aren't autonegotiating, assume speeds - * are as set */ - if (mii_info->autoneg && mii_info->link) { - int speed; - status = phy_read(mii_info, MII_M1011_PHY_SPEC_STATUS); - -#if 0 - /* If speed and duplex aren't resolved, - * return an error. Isn't this handled - * by checking aneg? - */ - if ((status & MII_M1011_PHY_SPEC_STATUS_RESOLVED) == 0) - return -EAGAIN; -#endif - - /* Get the duplexity */ - if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX) - mii_info->duplex = DUPLEX_FULL; - else - mii_info->duplex = DUPLEX_HALF; - - /* Get the speed */ - speed = status & MII_M1011_PHY_SPEC_STATUS_SPD_MASK; - switch(speed) { - case MII_M1011_PHY_SPEC_STATUS_1000: - mii_info->speed = SPEED_1000; - break; - case MII_M1011_PHY_SPEC_STATUS_100: - mii_info->speed = SPEED_100; - break; - default: - mii_info->speed = SPEED_10; - break; - } - mii_info->pause = 0; - } - - return 0; -} - - -static int cis820x_read_status(struct gfar_mii_info *mii_info) -{ - u16 status; - int err; - - /* Update the link, but return if there - * was an error */ - err = genmii_update_link(mii_info); - if (err) - return err; - - /* If the link is up, read the speed and duplex */ - /* If we aren't autonegotiating, assume speeds - * are as set */ - if (mii_info->autoneg && mii_info->link) { - int speed; - - status = phy_read(mii_info, MII_CIS8201_AUX_CONSTAT); - if (status & MII_CIS8201_AUXCONSTAT_DUPLEX) - mii_info->duplex = DUPLEX_FULL; - else - mii_info->duplex = DUPLEX_HALF; - - speed = status & MII_CIS8201_AUXCONSTAT_SPEED; - - switch (speed) { - case MII_CIS8201_AUXCONSTAT_GBIT: - mii_info->speed = SPEED_1000; - break; - case MII_CIS8201_AUXCONSTAT_100: - mii_info->speed = SPEED_100; - break; - default: - mii_info->speed = SPEED_10; - break; - } - } - - return 0; -} - -static int marvell_ack_interrupt(struct gfar_mii_info *mii_info) -{ - /* Clear the interrupts by reading the reg */ - phy_read(mii_info, MII_M1011_IEVENT); - - return 0; -} - -static int marvell_config_intr(struct gfar_mii_info *mii_info) -{ - if(mii_info->interrupts == MII_INTERRUPT_ENABLED) - phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_INIT); - else - phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR); - - return 0; -} - -static int cis820x_init(struct gfar_mii_info *mii_info) -{ - phy_write(mii_info, MII_CIS8201_AUX_CONSTAT, - MII_CIS8201_AUXCONSTAT_INIT); - phy_write(mii_info, MII_CIS8201_EXT_CON1, - MII_CIS8201_EXTCON1_INIT); - - return 0; -} - -static int cis820x_ack_interrupt(struct gfar_mii_info *mii_info) -{ - phy_read(mii_info, MII_CIS8201_ISTAT); - - return 0; -} - -static int cis820x_config_intr(struct gfar_mii_info *mii_info) -{ - if(mii_info->interrupts == MII_INTERRUPT_ENABLED) - phy_write(mii_info, MII_CIS8201_IMASK, MII_CIS8201_IMASK_MASK); - else - phy_write(mii_info, MII_CIS8201_IMASK, 0); - - return 0; -} - -#define DM9161_DELAY 10 - -static int dm9161_read_status(struct gfar_mii_info *mii_info) -{ - u16 status; - int err; - - /* Update the link, but return if there - * was an error */ - err = genmii_update_link(mii_info); - if (err) - return err; - - /* If the link is up, read the speed and duplex */ - /* If we aren't autonegotiating, assume speeds - * are as set */ - if (mii_info->autoneg && mii_info->link) { - status = phy_read(mii_info, MII_DM9161_SCSR); - if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_100H)) - mii_info->speed = SPEED_100; - else - mii_info->speed = SPEED_10; - - if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_10F)) - mii_info->duplex = DUPLEX_FULL; - else - mii_info->duplex = DUPLEX_HALF; - } - - return 0; -} - - -static int dm9161_config_aneg(struct gfar_mii_info *mii_info) -{ - struct dm9161_private *priv = mii_info->priv; - - if(0 == priv->resetdone) - return -EAGAIN; - - return 0; -} - -static void dm9161_timer(unsigned long data) -{ - struct gfar_mii_info *mii_info = (struct gfar_mii_info *)data; - struct dm9161_private *priv = mii_info->priv; - u16 status = phy_read(mii_info, MII_BMSR); - - if (status & BMSR_ANEGCOMPLETE) { - priv->resetdone = 1; - } else - mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ); -} - -static int dm9161_init(struct gfar_mii_info *mii_info) -{ - struct dm9161_private *priv; - - /* Allocate the private data structure */ - priv = kmalloc(sizeof(struct dm9161_private), GFP_KERNEL); - - if (NULL == priv) - return -ENOMEM; - - mii_info->priv = priv; - - /* Reset is not done yet */ - priv->resetdone = 0; - - /* Isolate the PHY */ - phy_write(mii_info, MII_BMCR, BMCR_ISOLATE); - - /* Do not bypass the scrambler/descrambler */ - phy_write(mii_info, MII_DM9161_SCR, MII_DM9161_SCR_INIT); - - /* Clear 10BTCSR to default */ - phy_write(mii_info, MII_DM9161_10BTCSR, MII_DM9161_10BTCSR_INIT); - - /* Reconnect the PHY, and enable Autonegotiation */ - phy_write(mii_info, MII_BMCR, BMCR_ANENABLE); - - /* Start a timer for DM9161_DELAY seconds to wait - * for the PHY to be ready */ - init_timer(&priv->timer); - priv->timer.function = &dm9161_timer; - priv->timer.data = (unsigned long) mii_info; - mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ); - - return 0; -} - -static void dm9161_close(struct gfar_mii_info *mii_info) -{ - struct dm9161_private *priv = mii_info->priv; - - del_timer_sync(&priv->timer); - kfree(priv); -} - -#if 0 -static int dm9161_ack_interrupt(struct gfar_mii_info *mii_info) -{ - phy_read(mii_info, MII_DM9161_INTR); - - return 0; -} -#endif - -/* Cicada 820x */ -static struct phy_info phy_info_cis820x = { - 0x000fc440, - "Cicada Cis8204", - 0x000fffc0, - .features = MII_GBIT_FEATURES, - .init = &cis820x_init, - .config_aneg = &gbit_config_aneg, - .read_status = &cis820x_read_status, - .ack_interrupt = &cis820x_ack_interrupt, - .config_intr = &cis820x_config_intr, -}; - -static struct phy_info phy_info_dm9161 = { - .phy_id = 0x0181b880, - .name = "Davicom DM9161E", - .phy_id_mask = 0x0ffffff0, - .init = dm9161_init, - .config_aneg = dm9161_config_aneg, - .read_status = dm9161_read_status, - .close = dm9161_close, -}; - -static struct phy_info phy_info_marvell = { - .phy_id = 0x01410c00, - .phy_id_mask = 0xffffff00, - .name = "Marvell 88E1101/88E1111", - .features = MII_GBIT_FEATURES, - .config_aneg = &marvell_config_aneg, - .read_status = &marvell_read_status, - .ack_interrupt = &marvell_ack_interrupt, - .config_intr = &marvell_config_intr, -}; - -static struct phy_info phy_info_genmii= { - .phy_id = 0x00000000, - .phy_id_mask = 0x00000000, - .name = "Generic MII", - .features = MII_BASIC_FEATURES, - .config_aneg = genmii_config_aneg, - .read_status = genmii_read_status, -}; - -static struct phy_info *phy_info[] = { - &phy_info_cis820x, - &phy_info_marvell, - &phy_info_dm9161, - &phy_info_genmii, - NULL -}; - -u16 phy_read(struct gfar_mii_info *mii_info, u16 regnum) -{ - u16 retval; - unsigned long flags; - - spin_lock_irqsave(&mii_info->mdio_lock, flags); - retval = mii_info->mdio_read(mii_info->dev, mii_info->mii_id, regnum); - spin_unlock_irqrestore(&mii_info->mdio_lock, flags); - - return retval; -} - -void phy_write(struct gfar_mii_info *mii_info, u16 regnum, u16 val) -{ - unsigned long flags; - - spin_lock_irqsave(&mii_info->mdio_lock, flags); - mii_info->mdio_write(mii_info->dev, - mii_info->mii_id, - regnum, val); - spin_unlock_irqrestore(&mii_info->mdio_lock, flags); -} - -/* Use the PHY ID registers to determine what type of PHY is attached - * to device dev. return a struct phy_info structure describing that PHY - */ -struct phy_info * get_phy_info(struct gfar_mii_info *mii_info) -{ - u16 phy_reg; - u32 phy_ID; - int i; - struct phy_info *theInfo = NULL; - struct net_device *dev = mii_info->dev; - - /* Grab the bits from PHYIR1, and put them in the upper half */ - phy_reg = phy_read(mii_info, MII_PHYSID1); - phy_ID = (phy_reg & 0xffff) << 16; - - /* Grab the bits from PHYIR2, and put them in the lower half */ - phy_reg = phy_read(mii_info, MII_PHYSID2); - phy_ID |= (phy_reg & 0xffff); - - /* loop through all the known PHY types, and find one that */ - /* matches the ID we read from the PHY. */ - for (i = 0; phy_info[i]; i++) - if (phy_info[i]->phy_id == - (phy_ID & phy_info[i]->phy_id_mask)) { - theInfo = phy_info[i]; - break; - } - - /* This shouldn't happen, as we have generic PHY support */ - if (theInfo == NULL) { - printk("%s: PHY id %x is not supported!\n", dev->name, phy_ID); - return NULL; - } else { - printk("%s: PHY is %s (%x)\n", dev->name, theInfo->name, - phy_ID); - } - - return theInfo; -} diff --git a/drivers/net/gianfar_phy.h b/drivers/net/gianfar_phy.h deleted file mode 100644 index 1e9b3abf1e6d..000000000000 --- a/drivers/net/gianfar_phy.h +++ /dev/null @@ -1,213 +0,0 @@ -/* - * drivers/net/gianfar_phy.h - * - * Gianfar Ethernet Driver -- PHY handling - * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560 - * Based on 8260_io/fcc_enet.c - * - * Author: Andy Fleming - * Maintainer: Kumar Gala (kumar.gala@freescale.com) - * - * Copyright (c) 2002-2004 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - */ -#ifndef __GIANFAR_PHY_H -#define __GIANFAR_PHY_H - -#define MII_end ((u32)-2) -#define MII_read ((u32)-1) - -#define MIIMIND_BUSY 0x00000001 -#define MIIMIND_NOTVALID 0x00000004 - -#define GFAR_AN_TIMEOUT 2000 - -/* 1000BT control (Marvell & BCM54xx at least) */ -#define MII_1000BASETCONTROL 0x09 -#define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200 -#define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100 - -/* Cicada Extended Control Register 1 */ -#define MII_CIS8201_EXT_CON1 0x17 -#define MII_CIS8201_EXTCON1_INIT 0x0000 - -/* Cicada Interrupt Mask Register */ -#define MII_CIS8201_IMASK 0x19 -#define MII_CIS8201_IMASK_IEN 0x8000 -#define MII_CIS8201_IMASK_SPEED 0x4000 -#define MII_CIS8201_IMASK_LINK 0x2000 -#define MII_CIS8201_IMASK_DUPLEX 0x1000 -#define MII_CIS8201_IMASK_MASK 0xf000 - -/* Cicada Interrupt Status Register */ -#define MII_CIS8201_ISTAT 0x1a -#define MII_CIS8201_ISTAT_STATUS 0x8000 -#define MII_CIS8201_ISTAT_SPEED 0x4000 -#define MII_CIS8201_ISTAT_LINK 0x2000 -#define MII_CIS8201_ISTAT_DUPLEX 0x1000 - -/* Cicada Auxiliary Control/Status Register */ -#define MII_CIS8201_AUX_CONSTAT 0x1c -#define MII_CIS8201_AUXCONSTAT_INIT 0x0004 -#define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020 -#define MII_CIS8201_AUXCONSTAT_SPEED 0x0018 -#define MII_CIS8201_AUXCONSTAT_GBIT 0x0010 -#define MII_CIS8201_AUXCONSTAT_100 0x0008 - -/* 88E1011 PHY Status Register */ -#define MII_M1011_PHY_SPEC_STATUS 0x11 -#define MII_M1011_PHY_SPEC_STATUS_1000 0x8000 -#define MII_M1011_PHY_SPEC_STATUS_100 0x4000 -#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000 -#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000 -#define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800 -#define MII_M1011_PHY_SPEC_STATUS_LINK 0x0400 - -#define MII_M1011_IEVENT 0x13 -#define MII_M1011_IEVENT_CLEAR 0x0000 - -#define MII_M1011_IMASK 0x12 -#define MII_M1011_IMASK_INIT 0x6400 -#define MII_M1011_IMASK_CLEAR 0x0000 - -#define MII_DM9161_SCR 0x10 -#define MII_DM9161_SCR_INIT 0x0610 - -/* DM9161 Specified Configuration and Status Register */ -#define MII_DM9161_SCSR 0x11 -#define MII_DM9161_SCSR_100F 0x8000 -#define MII_DM9161_SCSR_100H 0x4000 -#define MII_DM9161_SCSR_10F 0x2000 -#define MII_DM9161_SCSR_10H 0x1000 - -/* DM9161 Interrupt Register */ -#define MII_DM9161_INTR 0x15 -#define MII_DM9161_INTR_PEND 0x8000 -#define MII_DM9161_INTR_DPLX_MASK 0x0800 -#define MII_DM9161_INTR_SPD_MASK 0x0400 -#define MII_DM9161_INTR_LINK_MASK 0x0200 -#define MII_DM9161_INTR_MASK 0x0100 -#define MII_DM9161_INTR_DPLX_CHANGE 0x0010 -#define MII_DM9161_INTR_SPD_CHANGE 0x0008 -#define MII_DM9161_INTR_LINK_CHANGE 0x0004 -#define MII_DM9161_INTR_INIT 0x0000 -#define MII_DM9161_INTR_STOP \ -(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \ - | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK) - -/* DM9161 10BT Configuration/Status */ -#define MII_DM9161_10BTCSR 0x12 -#define MII_DM9161_10BTCSR_INIT 0x7800 - -#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | \ - SUPPORTED_10baseT_Full | \ - SUPPORTED_100baseT_Half | \ - SUPPORTED_100baseT_Full | \ - SUPPORTED_Autoneg | \ - SUPPORTED_TP | \ - SUPPORTED_MII) - -#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \ - SUPPORTED_1000baseT_Half | \ - SUPPORTED_1000baseT_Full) - -#define MII_READ_COMMAND 0x00000001 - -#define MII_INTERRUPT_DISABLED 0x0 -#define MII_INTERRUPT_ENABLED 0x1 -/* Taken from mii_if_info and sungem_phy.h */ -struct gfar_mii_info { - /* Information about the PHY type */ - /* And management functions */ - struct phy_info *phyinfo; - - /* forced speed & duplex (no autoneg) - * partner speed & duplex & pause (autoneg) - */ - int speed; - int duplex; - int pause; - - /* The most recently read link state */ - int link; - - /* Enabled Interrupts */ - u32 interrupts; - - u32 advertising; - int autoneg; - int mii_id; - - /* private data pointer */ - /* For use by PHYs to maintain extra state */ - void *priv; - - /* Provided by host chip */ - struct net_device *dev; - - /* A lock to ensure that only one thing can read/write - * the MDIO bus at a time */ - spinlock_t mdio_lock; - - /* Provided by ethernet driver */ - int (*mdio_read) (struct net_device *dev, int mii_id, int reg); - void (*mdio_write) (struct net_device *dev, int mii_id, int reg, int val); -}; - -/* struct phy_info: a structure which defines attributes for a PHY - * - * id will contain a number which represents the PHY. During - * startup, the driver will poll the PHY to find out what its - * UID--as defined by registers 2 and 3--is. The 32-bit result - * gotten from the PHY will be ANDed with phy_id_mask to - * discard any bits which may change based on revision numbers - * unimportant to functionality - * - * There are 6 commands which take a gfar_mii_info structure. - * Each PHY must declare config_aneg, and read_status. - */ -struct phy_info { - u32 phy_id; - char *name; - unsigned int phy_id_mask; - u32 features; - - /* Called to initialize the PHY */ - int (*init)(struct gfar_mii_info *mii_info); - - /* Called to suspend the PHY for power */ - int (*suspend)(struct gfar_mii_info *mii_info); - - /* Reconfigures autonegotiation (or disables it) */ - int (*config_aneg)(struct gfar_mii_info *mii_info); - - /* Determines the negotiated speed and duplex */ - int (*read_status)(struct gfar_mii_info *mii_info); - - /* Clears any pending interrupts */ - int (*ack_interrupt)(struct gfar_mii_info *mii_info); - - /* Enables or disables interrupts */ - int (*config_intr)(struct gfar_mii_info *mii_info); - - /* Clears up any memory if needed */ - void (*close)(struct gfar_mii_info *mii_info); -}; - -struct phy_info *get_phy_info(struct gfar_mii_info *mii_info); -int read_phy_reg(struct net_device *dev, int mii_id, int regnum); -void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value); -void mii_clear_phy_interrupt(struct gfar_mii_info *mii_info); -void mii_configure_phy_interrupt(struct gfar_mii_info *mii_info, u32 interrupts); - -struct dm9161_private { - struct timer_list timer; - int resetdone; -}; - -#endif /* GIANFAR_PHY_H */ diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig index de087cd609d9..896aa02000d7 100644 --- a/drivers/net/hamradio/Kconfig +++ b/drivers/net/hamradio/Kconfig @@ -1,6 +1,7 @@ config MKISS tristate "Serial port KISS driver" depends on AX25 + select CRC16 ---help--- KISS is a protocol used for the exchange of data between a computer and a Terminal Node Controller (a small embedded system commonly diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 1756f0ed54cc..cb43a9d28774 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -144,7 +144,7 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev) { struct bpqdev *bpq; - list_for_each_entry(bpq, &bpq_devices, bpq_list) { + list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list) { if (bpq->ethdev == dev) return bpq->axdev; } @@ -399,7 +399,7 @@ static void *bpq_seq_start(struct seq_file *seq, loff_t *pos) if (*pos == 0) return SEQ_START_TOKEN; - list_for_each_entry(bpqdev, &bpq_devices, bpq_list) { + list_for_each_entry_rcu(bpqdev, &bpq_devices, bpq_list) { if (i == *pos) return bpqdev; } @@ -418,7 +418,7 @@ static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos) p = ((struct bpqdev *)v)->bpq_list.next; return (p == &bpq_devices) ? NULL - : list_entry(p, struct bpqdev, bpq_list); + : rcu_dereference(list_entry(p, struct bpqdev, bpq_list)); } static void bpq_seq_stop(struct seq_file *seq, void *v) @@ -561,8 +561,6 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi if (!dev_is_ethdev(dev)) return NOTIFY_DONE; - rcu_read_lock(); - switch (event) { case NETDEV_UP: /* new ethernet device -> new BPQ interface */ if (bpq_get_ax25_dev(dev) == NULL) @@ -581,7 +579,6 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi default: break; } - rcu_read_unlock(); return NOTIFY_DONE; } diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index d9fe64b46f4b..85d6dc005be0 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -14,13 +14,14 @@ * * Copyright (C) Hans Alblas PE1AYX * Copyright (C) 2004, 05 Ralf Baechle DL5RB + * Copyright (C) 2004, 05 Thomas Osterried DL9SAU */ - #include #include #include #include #include +#include #include #include #include @@ -39,11 +40,6 @@ #include -#ifdef CONFIG_INET -#include -#include -#endif - #define AX_MTU 236 /* SLIP/KISS protocol characters. */ @@ -80,9 +76,13 @@ struct mkiss { int mode; int crcmode; /* MW: for FlexNet, SMACK etc. */ -#define CRC_MODE_NONE 0 -#define CRC_MODE_FLEX 1 -#define CRC_MODE_SMACK 2 + int crcauto; /* CRC auto mode */ + +#define CRC_MODE_NONE 0 +#define CRC_MODE_FLEX 1 +#define CRC_MODE_SMACK 2 +#define CRC_MODE_FLEX_TEST 3 +#define CRC_MODE_SMACK_TEST 4 atomic_t refcnt; struct semaphore dead_sem; @@ -151,6 +151,21 @@ static int check_crc_flex(unsigned char *cp, int size) return 0; } +static int check_crc_16(unsigned char *cp, int size) +{ + unsigned short crc = 0x0000; + + if (size < 3) + return -1; + + crc = crc16(0, cp, size); + + if (crc != 0x0000) + return -1; + + return 0; +} + /* * Standard encapsulation */ @@ -237,19 +252,42 @@ static void ax_bump(struct mkiss *ax) spin_lock_bh(&ax->buflock); if (ax->rbuff[0] > 0x0f) { - if (ax->rbuff[0] & 0x20) { - ax->crcmode = CRC_MODE_FLEX; - if (check_crc_flex(ax->rbuff, ax->rcount) < 0) { - ax->stats.rx_errors++; + if (ax->rbuff[0] & 0x80) { + if (check_crc_16(ax->rbuff, ax->rcount) < 0) { + ax->stats.rx_errors++; + spin_unlock_bh(&ax->buflock); + return; } + if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) { + printk(KERN_INFO + "mkiss: %s: Switchting to crc-smack\n", + ax->dev->name); + ax->crcmode = CRC_MODE_SMACK; + } ax->rcount -= 2; - /* dl9sau bugfix: the trailling two bytes flexnet crc - * will not be passed to the kernel. thus we have - * to correct the kissparm signature, because it - * indicates a crc but there's none + *ax->rbuff &= ~0x80; + } else if (ax->rbuff[0] & 0x20) { + if (check_crc_flex(ax->rbuff, ax->rcount) < 0) { + ax->stats.rx_errors++; + spin_unlock_bh(&ax->buflock); + return; + } + if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) { + printk(KERN_INFO + "mkiss: %s: Switchting to crc-flexnet\n", + ax->dev->name); + ax->crcmode = CRC_MODE_FLEX; + } + ax->rcount -= 2; + + /* + * dl9sau bugfix: the trailling two bytes flexnet crc + * will not be passed to the kernel. thus we have to + * correct the kissparm signature, because it indicates + * a crc but there's none */ - *ax->rbuff &= ~0x20; + *ax->rbuff &= ~0x20; } } spin_unlock_bh(&ax->buflock); @@ -417,20 +455,69 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len) p = icp; spin_lock_bh(&ax->buflock); - switch (ax->crcmode) { - unsigned short crc; + if ((*p & 0x0f) != 0) { + /* Configuration Command (kissparms(1). + * Protocol spec says: never append CRC. + * This fixes a very old bug in the linux + * kiss driver. -- dl9sau */ + switch (*p & 0xff) { + case 0x85: + /* command from userspace especially for us, + * not for delivery to the tnc */ + if (len > 1) { + int cmd = (p[1] & 0xff); + switch(cmd) { + case 3: + ax->crcmode = CRC_MODE_SMACK; + break; + case 2: + ax->crcmode = CRC_MODE_FLEX; + break; + case 1: + ax->crcmode = CRC_MODE_NONE; + break; + case 0: + default: + ax->crcmode = CRC_MODE_SMACK_TEST; + cmd = 0; + } + ax->crcauto = (cmd ? 0 : 1); + printk(KERN_INFO "mkiss: %s: crc mode %s %d\n", ax->dev->name, (len) ? "set to" : "is", cmd); + } + spin_unlock_bh(&ax->buflock); + netif_start_queue(dev); - case CRC_MODE_FLEX: - *p |= 0x20; - crc = calc_crc_flex(p, len); - count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2); - break; + return; + default: + count = kiss_esc(p, (unsigned char *)ax->xbuff, len); + } + } else { + unsigned short crc; + switch (ax->crcmode) { + case CRC_MODE_SMACK_TEST: + ax->crcmode = CRC_MODE_FLEX_TEST; + printk(KERN_INFO "mkiss: %s: Trying crc-smack\n", ax->dev->name); + // fall through + case CRC_MODE_SMACK: + *p |= 0x80; + crc = swab16(crc16(0, p, len)); + count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2); + break; + case CRC_MODE_FLEX_TEST: + ax->crcmode = CRC_MODE_NONE; + printk(KERN_INFO "mkiss: %s: Trying crc-flexnet\n", ax->dev->name); + // fall through + case CRC_MODE_FLEX: + *p |= 0x20; + crc = calc_crc_flex(p, len); + count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2); + break; + + default: + count = kiss_esc(p, (unsigned char *)ax->xbuff, len); + } + } - default: - count = kiss_esc(p, (unsigned char *)ax->xbuff, len); - break; - } - set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); actual = ax->tty->driver->write(ax->tty, ax->xbuff, count); ax->stats.tx_packets++; @@ -439,8 +526,6 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len) ax->dev->trans_start = jiffies; ax->xleft = count - actual; ax->xhead = ax->xbuff + actual; - - spin_unlock_bh(&ax->buflock); } /* Encapsulate an AX.25 packet and kick it into a TTY queue. */ @@ -622,7 +707,7 @@ static void ax_setup(struct net_device *dev) * best way to fix this is to use a rwlock in the tty struct, but for now we * use a single global rwlock for all ttys in ppp line discipline. */ -static rwlock_t disc_data_lock = RW_LOCK_UNLOCKED; +static DEFINE_RWLOCK(disc_data_lock); static struct mkiss *mkiss_get(struct tty_struct *tty) { @@ -643,6 +728,8 @@ static void mkiss_put(struct mkiss *ax) up(&ax->dead_sem); } +static int crc_force = 0; /* Can be overridden with insmod */ + static int mkiss_open(struct tty_struct *tty) { struct net_device *dev; @@ -682,6 +769,33 @@ static int mkiss_open(struct tty_struct *tty) if (register_netdev(dev)) goto out_free_buffers; + /* after register_netdev() - because else printk smashes the kernel */ + switch (crc_force) { + case 3: + ax->crcmode = CRC_MODE_SMACK; + printk(KERN_INFO "mkiss: %s: crc mode smack forced.\n", + ax->dev->name); + break; + case 2: + ax->crcmode = CRC_MODE_FLEX; + printk(KERN_INFO "mkiss: %s: crc mode flexnet forced.\n", + ax->dev->name); + break; + case 1: + ax->crcmode = CRC_MODE_NONE; + printk(KERN_INFO "mkiss: %s: crc mode disabled.\n", + ax->dev->name); + break; + case 0: + /* fall through */ + default: + crc_force = 0; + printk(KERN_INFO "mkiss: %s: crc mode is auto.\n", + ax->dev->name); + ax->crcmode = CRC_MODE_SMACK_TEST; + } + ax->crcauto = (crc_force ? 0 : 1); + netif_start_queue(dev); /* Done. We have linked the TTY line to a channel. */ @@ -765,7 +879,6 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file, case SIOCSIFHWADDR: { char addr[AX25_ADDR_LEN]; -printk(KERN_INFO "In SIOCSIFHWADDR"); if (copy_from_user(&addr, (void __user *) arg, AX25_ADDR_LEN)) { @@ -864,6 +977,7 @@ out: } static struct tty_ldisc ax_ldisc = { + .owner = THIS_MODULE, .magic = TTY_LDISC_MAGIC, .name = "mkiss", .open = mkiss_open, @@ -904,6 +1018,8 @@ static void __exit mkiss_exit_driver(void) MODULE_AUTHOR("Ralf Baechle DL5RB "); MODULE_DESCRIPTION("KISS driver for AX.25 over TTYs"); +MODULE_PARM(crc_force, "i"); +MODULE_PARM_DESC(crc_force, "crc [0 = auto | 1 = none | 2 = flexnet | 3 = smack]"); MODULE_LICENSE("GPL"); MODULE_ALIAS_LDISC(N_AX25); diff --git a/drivers/net/hamradio/mkiss.h b/drivers/net/hamradio/mkiss.h deleted file mode 100644 index 4ab700478598..000000000000 --- a/drivers/net/hamradio/mkiss.h +++ /dev/null @@ -1,62 +0,0 @@ -/**************************************************************************** - * Defines for the Multi-KISS driver. - ****************************************************************************/ - -#define AX25_MAXDEV 16 /* MAX number of AX25 channels; - This can be overridden with - insmod -oax25_maxdev=nnn */ -#define AX_MTU 236 - -/* SLIP/KISS protocol characters. */ -#define END 0300 /* indicates end of frame */ -#define ESC 0333 /* indicates byte stuffing */ -#define ESC_END 0334 /* ESC ESC_END means END 'data' */ -#define ESC_ESC 0335 /* ESC ESC_ESC means ESC 'data' */ - -struct ax_disp { - int magic; - - /* Various fields. */ - struct tty_struct *tty; /* ptr to TTY structure */ - struct net_device *dev; /* easy for intr handling */ - - /* These are pointers to the malloc()ed frame buffers. */ - unsigned char *rbuff; /* receiver buffer */ - int rcount; /* received chars counter */ - unsigned char *xbuff; /* transmitter buffer */ - unsigned char *xhead; /* pointer to next byte to XMIT */ - int xleft; /* bytes left in XMIT queue */ - - /* SLIP interface statistics. */ - unsigned long rx_packets; /* inbound frames counter */ - unsigned long tx_packets; /* outbound frames counter */ - unsigned long rx_bytes; /* inbound bytes counter */ - unsigned long tx_bytes; /* outbound bytes counter */ - unsigned long rx_errors; /* Parity, etc. errors */ - unsigned long tx_errors; /* Planned stuff */ - unsigned long rx_dropped; /* No memory for skb */ - unsigned long tx_dropped; /* When MTU change */ - unsigned long rx_over_errors; /* Frame bigger then SLIP buf. */ - - /* Detailed SLIP statistics. */ - int mtu; /* Our mtu (to spot changes!) */ - int buffsize; /* Max buffers sizes */ - - - unsigned long flags; /* Flag values/ mode etc */ - /* long req'd: used by set_bit --RR */ -#define AXF_INUSE 0 /* Channel in use */ -#define AXF_ESCAPE 1 /* ESC received */ -#define AXF_ERROR 2 /* Parity, etc. error */ -#define AXF_KEEPTEST 3 /* Keepalive test flag */ -#define AXF_OUTWAIT 4 /* is outpacket was flag */ - - int mode; - int crcmode; /* MW: for FlexNet, SMACK etc. */ -#define CRC_MODE_NONE 0 -#define CRC_MODE_FLEX 1 -#define CRC_MODE_SMACK 2 - spinlock_t buflock; /* lock for rbuf and xbuf */ -}; - -#define AX25_MAGIC 0x5316 diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c index cf0ac6fda1a1..b71fab6e34f4 100644 --- a/drivers/net/hp100.c +++ b/drivers/net/hp100.c @@ -2517,10 +2517,8 @@ static int hp100_down_vg_link(struct net_device *dev) do { if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) break; - if (!in_interrupt()) { - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(1); - } + if (!in_interrupt()) + schedule_timeout_interruptible(1); } while (time_after(time, jiffies)); if (time_after_eq(jiffies, time)) /* no signal->no logout */ @@ -2536,10 +2534,8 @@ static int hp100_down_vg_link(struct net_device *dev) do { if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) break; - if (!in_interrupt()) { - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(1); - } + if (!in_interrupt()) + schedule_timeout_interruptible(1); } while (time_after(time, jiffies)); #ifdef HP100_DEBUG @@ -2577,10 +2573,8 @@ static int hp100_down_vg_link(struct net_device *dev) do { if (!(hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST)) break; - if (!in_interrupt()) { - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(1); - } + if (!in_interrupt()) + schedule_timeout_interruptible(1); } while (time_after(time, jiffies)); hp100_orb(HP100_AUTO_MODE, MAC_CFG_3); /* Autosel back on */ @@ -2591,10 +2585,8 @@ static int hp100_down_vg_link(struct net_device *dev) do { if ((hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) == 0) break; - if (!in_interrupt()) { - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(1); - } + if (!in_interrupt()) + schedule_timeout_interruptible(1); } while (time_after(time, jiffies)); if (time_before_eq(time, jiffies)) { @@ -2606,10 +2598,8 @@ static int hp100_down_vg_link(struct net_device *dev) time = jiffies + (2 * HZ); /* This seems to take a while.... */ do { - if (!in_interrupt()) { - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(1); - } + if (!in_interrupt()) + schedule_timeout_interruptible(1); } while (time_after(time, jiffies)); return 0; @@ -2659,10 +2649,8 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin) do { if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) break; - if (!in_interrupt()) { - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(1); - } + if (!in_interrupt()) + schedule_timeout_interruptible(1); } while (time_after(time, jiffies)); /* Start an addressed training and optionally request promiscuous port */ @@ -2697,10 +2685,8 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin) do { if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) break; - if (!in_interrupt()) { - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(1); - } + if (!in_interrupt()) + schedule_timeout_interruptible(1); } while (time_before(jiffies, time)); if (time_after_eq(jiffies, time)) { @@ -2723,10 +2709,8 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin) #endif break; } - if (!in_interrupt()) { - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(1); - } + if (!in_interrupt()) + schedule_timeout_interruptible(1); } while (time_after(time, jiffies)); } diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c index 0de3bb906174..14e9b6315f20 100644 --- a/drivers/net/ibm_emac/ibm_emac_core.c +++ b/drivers/net/ibm_emac/ibm_emac_core.c @@ -1875,6 +1875,9 @@ static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal) rc = -ENODEV; goto bail; } + + /* Disable any PHY features not supported by the platform */ + ep->phy_mii.def->features &= ~emacdata->phy_feat_exc; /* Setup initial PHY config & startup aneg */ if (ep->phy_mii.def->ops->init) @@ -1882,6 +1885,34 @@ static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal) netif_carrier_off(ndev); if (ep->phy_mii.def->features & SUPPORTED_Autoneg) ep->want_autoneg = 1; + else { + ep->want_autoneg = 0; + + /* Select highest supported speed/duplex */ + if (ep->phy_mii.def->features & SUPPORTED_1000baseT_Full) { + ep->phy_mii.speed = SPEED_1000; + ep->phy_mii.duplex = DUPLEX_FULL; + } else if (ep->phy_mii.def->features & + SUPPORTED_1000baseT_Half) { + ep->phy_mii.speed = SPEED_1000; + ep->phy_mii.duplex = DUPLEX_HALF; + } else if (ep->phy_mii.def->features & + SUPPORTED_100baseT_Full) { + ep->phy_mii.speed = SPEED_100; + ep->phy_mii.duplex = DUPLEX_FULL; + } else if (ep->phy_mii.def->features & + SUPPORTED_100baseT_Half) { + ep->phy_mii.speed = SPEED_100; + ep->phy_mii.duplex = DUPLEX_HALF; + } else if (ep->phy_mii.def->features & + SUPPORTED_10baseT_Full) { + ep->phy_mii.speed = SPEED_10; + ep->phy_mii.duplex = DUPLEX_FULL; + } else { + ep->phy_mii.speed = SPEED_10; + ep->phy_mii.duplex = DUPLEX_HALF; + } + } emac_start_link(ep, NULL); /* read the MAC Address */ diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 32d5fabd4b10..a2c4dd4fb221 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -99,7 +99,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*); #ifdef CONFIG_PROC_FS -#define IBMVETH_PROC_DIR "ibmveth" +#define IBMVETH_PROC_DIR "net/ibmveth" static struct proc_dir_entry *ibmveth_proc_dir; #endif @@ -1010,7 +1010,7 @@ static int __devexit ibmveth_remove(struct vio_dev *dev) #ifdef CONFIG_PROC_FS static void ibmveth_proc_register_driver(void) { - ibmveth_proc_dir = create_proc_entry(IBMVETH_PROC_DIR, S_IFDIR, proc_net); + ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, NULL); if (ibmveth_proc_dir) { SET_MODULE_OWNER(ibmveth_proc_dir); } @@ -1018,7 +1018,7 @@ static void ibmveth_proc_register_driver(void) static void ibmveth_proc_unregister_driver(void) { - remove_proc_entry(IBMVETH_PROC_DIR, proc_net); + remove_proc_entry(IBMVETH_PROC_DIR, NULL); } static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos) diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c index 15f207323d97..3961a754e920 100644 --- a/drivers/net/irda/stir4200.c +++ b/drivers/net/irda/stir4200.c @@ -678,10 +678,9 @@ static void turnaround_delay(const struct stir_cb *stir, long us) return; ticks = us / (1000000 / HZ); - if (ticks > 0) { - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1 + ticks); - } else + if (ticks > 0) + schedule_timeout_interruptible(1 + ticks); + else udelay(us); } diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index 6d9de626c967..651c5a6578fd 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c @@ -1875,11 +1875,11 @@ static int __init vlsi_mod_init(void) sirpulse = !!sirpulse; - /* create_proc_entry returns NULL if !CONFIG_PROC_FS. + /* proc_mkdir returns NULL if !CONFIG_PROC_FS. * Failure to create the procfs entry is handled like running * without procfs - it's not required for the driver to work. */ - vlsi_proc_root = create_proc_entry(PROC_DIR, S_IFDIR, NULL); + vlsi_proc_root = proc_mkdir(PROC_DIR, NULL); if (vlsi_proc_root) { /* protect registered procdir against module removal. * Because we are in the module init path there's no race diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c index 9d026ed77ddd..04e47189d830 100644 --- a/drivers/net/ixgb/ixgb_ethtool.c +++ b/drivers/net/ixgb/ixgb_ethtool.c @@ -645,11 +645,10 @@ ixgb_phys_id(struct net_device *netdev, uint32_t data) mod_timer(&adapter->blink_timer, jiffies); - set_current_state(TASK_INTERRUPTIBLE); - if(data) - schedule_timeout(data * HZ); + if (data) + schedule_timeout_interruptible(data * HZ); else - schedule_timeout(MAX_SCHEDULE_TIMEOUT); + schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT); del_timer_sync(&adapter->blink_timer); ixgb_led_off(&adapter->hw); @@ -723,6 +722,7 @@ struct ethtool_ops ixgb_ethtool_ops = { .phys_id = ixgb_phys_id, .get_stats_count = ixgb_get_stats_count, .get_ethtool_stats = ixgb_get_ethtool_stats, + .get_perm_addr = ethtool_op_get_perm_addr, }; void ixgb_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 89d6d69be382..176680cb153e 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -460,8 +460,9 @@ ixgb_probe(struct pci_dev *pdev, } ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); + memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); - if(!is_valid_ether_addr(netdev->dev_addr)) { + if(!is_valid_ether_addr(netdev->perm_addr)) { err = -EIO; goto err_eeprom; } diff --git a/drivers/net/lance.c b/drivers/net/lance.c index b4929beb33b2..1d75ca0bb939 100644 --- a/drivers/net/lance.c +++ b/drivers/net/lance.c @@ -298,7 +298,7 @@ enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_ static unsigned char lance_need_isa_bounce_buffers = 1; static int lance_open(struct net_device *dev); -static void lance_init_ring(struct net_device *dev, int mode); +static void lance_init_ring(struct net_device *dev, gfp_t mode); static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev); static int lance_rx(struct net_device *dev); static irqreturn_t lance_interrupt(int irq, void *dev_id, struct pt_regs *regs); @@ -846,7 +846,7 @@ lance_purge_ring(struct net_device *dev) /* Initialize the LANCE Rx and Tx rings. */ static void -lance_init_ring(struct net_device *dev, int gfp) +lance_init_ring(struct net_device *dev, gfp_t gfp) { struct lance_private *lp = dev->priv; int i; diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c index 27f0d8ac4c40..309d254842cf 100644 --- a/drivers/net/lne390.c +++ b/drivers/net/lne390.c @@ -298,7 +298,7 @@ static int __init lne390_probe1(struct net_device *dev, int ioaddr) return 0; unmap: if (ei_status.reg0) - iounmap((void *)dev->mem_start); + iounmap(ei_status.mem); cleanup: free_irq(dev->irq, dev); return ret; diff --git a/drivers/net/mii.c b/drivers/net/mii.c index c33cb3dc942b..e42aa797f08b 100644 --- a/drivers/net/mii.c +++ b/drivers/net/mii.c @@ -207,6 +207,20 @@ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) return 0; } +int mii_check_gmii_support(struct mii_if_info *mii) +{ + int reg; + + reg = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); + if (reg & BMSR_ESTATEN) { + reg = mii->mdio_read(mii->dev, mii->phy_id, MII_ESTATUS); + if (reg & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) + return 1; + } + + return 0; +} + int mii_link_ok (struct mii_if_info *mii) { /* first, a dummy read, needed to latch some MII phys */ @@ -394,5 +408,6 @@ EXPORT_SYMBOL(mii_ethtool_gset); EXPORT_SYMBOL(mii_ethtool_sset); EXPORT_SYMBOL(mii_check_link); EXPORT_SYMBOL(mii_check_media); +EXPORT_SYMBOL(mii_check_gmii_support); EXPORT_SYMBOL(generic_mii_ioctl); diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c new file mode 100644 index 000000000000..f79f7ee72ab8 --- /dev/null +++ b/drivers/net/mipsnet.c @@ -0,0 +1,371 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#define DEBUG + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mipsnet.h" /* actual device IO mapping */ + +#define MIPSNET_VERSION "2005-06-20" + +#define mipsnet_reg_address(dev, field) (dev->base_addr + field_offset(field)) + +struct mipsnet_priv { + struct net_device_stats stats; +}; + +static struct platform_device *mips_plat_dev; + +static char mipsnet_string[] = "mipsnet"; + +/* + * Copy data from the MIPSNET rx data port + */ +static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata, + int len) +{ + uint32_t available_len = inl(mipsnet_reg_address(dev, rxDataCount)); + if (available_len < len) + return -EFAULT; + + for (; len > 0; len--, kdata++) { + *kdata = inb(mipsnet_reg_address(dev, rxDataBuffer)); + } + + return inl(mipsnet_reg_address(dev, rxDataCount)); +} + +static inline ssize_t mipsnet_put_todevice(struct net_device *dev, + struct sk_buff *skb) +{ + int count_to_go = skb->len; + char *buf_ptr = skb->data; + struct mipsnet_priv *mp = netdev_priv(dev); + + pr_debug("%s: %s(): telling MIPSNET txDataCount(%d)\n", + dev->name, __FUNCTION__, skb->len); + + outl(skb->len, mipsnet_reg_address(dev, txDataCount)); + + pr_debug("%s: %s(): sending data to MIPSNET txDataBuffer(%d)\n", + dev->name, __FUNCTION__, skb->len); + + for (; count_to_go; buf_ptr++, count_to_go--) { + outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer)); + } + + mp->stats.tx_packets++; + mp->stats.tx_bytes += skb->len; + + return skb->len; +} + +static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev) +{ + pr_debug("%s:%s(): transmitting %d bytes\n", + dev->name, __FUNCTION__, skb->len); + + /* Only one packet at a time. Once TXDONE interrupt is serviced, the + * queue will be restarted. + */ + netif_stop_queue(dev); + mipsnet_put_todevice(dev, skb); + + return 0; +} + +static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count) +{ + struct sk_buff *skb; + size_t len = count; + struct mipsnet_priv *mp = netdev_priv(dev); + + if (!(skb = alloc_skb(len + 2, GFP_KERNEL))) { + mp->stats.rx_dropped++; + return -ENOMEM; + } + + skb_reserve(skb, 2); + if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len)) + return -EFAULT; + + skb->dev = dev; + skb->protocol = eth_type_trans(skb, dev); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + pr_debug("%s:%s(): pushing RXed data to kernel\n", + dev->name, __FUNCTION__); + netif_rx(skb); + + mp->stats.rx_packets++; + mp->stats.rx_bytes += len; + + return count; +} + +static irqreturn_t +mipsnet_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = dev_id; + + irqreturn_t retval = IRQ_NONE; + uint64_t interruptFlags; + + if (irq == dev->irq) { + pr_debug("%s:%s(): irq %d for device\n", + dev->name, __FUNCTION__, irq); + + retval = IRQ_HANDLED; + + interruptFlags = + inl(mipsnet_reg_address(dev, interruptControl)); + pr_debug("%s:%s(): intCtl=0x%016llx\n", dev->name, + __FUNCTION__, interruptFlags); + + if (interruptFlags & MIPSNET_INTCTL_TXDONE) { + pr_debug("%s:%s(): got TXDone\n", + dev->name, __FUNCTION__); + outl(MIPSNET_INTCTL_TXDONE, + mipsnet_reg_address(dev, interruptControl)); + // only one packet at a time, we are done. + netif_wake_queue(dev); + } else if (interruptFlags & MIPSNET_INTCTL_RXDONE) { + pr_debug("%s:%s(): got RX data\n", + dev->name, __FUNCTION__); + mipsnet_get_fromdev(dev, + inl(mipsnet_reg_address(dev, rxDataCount))); + pr_debug("%s:%s(): clearing RX int\n", + dev->name, __FUNCTION__); + outl(MIPSNET_INTCTL_RXDONE, + mipsnet_reg_address(dev, interruptControl)); + + } else if (interruptFlags & MIPSNET_INTCTL_TESTBIT) { + pr_debug("%s:%s(): got test interrupt\n", + dev->name, __FUNCTION__); + // TESTBIT is cleared on read. + // And takes effect after a write with 0 + outl(0, mipsnet_reg_address(dev, interruptControl)); + } else { + pr_debug("%s:%s(): no valid fags 0x%016llx\n", + dev->name, __FUNCTION__, interruptFlags); + // Maybe shared IRQ, just ignore, no clearing. + retval = IRQ_NONE; + } + + } else { + printk(KERN_INFO "%s: %s(): irq %d for unknown device\n", + dev->name, __FUNCTION__, irq); + retval = IRQ_NONE; + } + return retval; +} //mipsnet_interrupt() + +static int mipsnet_open(struct net_device *dev) +{ + int err; + pr_debug("%s: mipsnet_open\n", dev->name); + + err = request_irq(dev->irq, &mipsnet_interrupt, + SA_SHIRQ, dev->name, (void *) dev); + + if (err) { + pr_debug("%s: %s(): can't get irq %d\n", + dev->name, __FUNCTION__, dev->irq); + release_region(dev->base_addr, MIPSNET_IO_EXTENT); + return err; + } + + pr_debug("%s: %s(): got IO region at 0x%04lx and irq %d for dev.\n", + dev->name, __FUNCTION__, dev->base_addr, dev->irq); + + + netif_start_queue(dev); + + // test interrupt handler + outl(MIPSNET_INTCTL_TESTBIT, + mipsnet_reg_address(dev, interruptControl)); + + + return 0; +} + +static int mipsnet_close(struct net_device *dev) +{ + pr_debug("%s: %s()\n", dev->name, __FUNCTION__); + netif_stop_queue(dev); + return 0; +} + +static struct net_device_stats *mipsnet_get_stats(struct net_device *dev) +{ + struct mipsnet_priv *mp = netdev_priv(dev); + + return &mp->stats; +} + +static void mipsnet_set_mclist(struct net_device *dev) +{ + // we don't do anything + return; +} + +static int __init mipsnet_probe(struct device *dev) +{ + struct net_device *netdev; + int err; + + netdev = alloc_etherdev(sizeof(struct mipsnet_priv)); + if (!netdev) { + err = -ENOMEM; + goto out; + } + + dev_set_drvdata(dev, netdev); + + netdev->open = mipsnet_open; + netdev->stop = mipsnet_close; + netdev->hard_start_xmit = mipsnet_xmit; + netdev->get_stats = mipsnet_get_stats; + netdev->set_multicast_list = mipsnet_set_mclist; + + /* + * TODO: probe for these or load them from PARAM + */ + netdev->base_addr = 0x4200; + netdev->irq = MIPSCPU_INT_BASE + MIPSCPU_INT_MB0 + + inl(mipsnet_reg_address(netdev, interruptInfo)); + + // Get the io region now, get irq on open() + if (!request_region(netdev->base_addr, MIPSNET_IO_EXTENT, "mipsnet")) { + pr_debug("%s: %s(): IO region {start: 0x%04lux, len: %d} " + "for dev is not availble.\n", netdev->name, + __FUNCTION__, netdev->base_addr, MIPSNET_IO_EXTENT); + err = -EBUSY; + goto out_free_netdev; + } + + /* + * Lacking any better mechanism to allocate a MAC address we use a + * random one ... + */ + random_ether_addr(netdev->dev_addr); + + err = register_netdev(netdev); + if (err) { + printk(KERN_ERR "MIPSNet: failed to register netdev.\n"); + goto out_free_region; + } + + return 0; + +out_free_region: + release_region(netdev->base_addr, MIPSNET_IO_EXTENT); + +out_free_netdev: + free_netdev(netdev); + +out: + return err; +} + +static int __devexit mipsnet_device_remove(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + + unregister_netdev(dev); + release_region(dev->base_addr, MIPSNET_IO_EXTENT); + free_netdev(dev); + dev_set_drvdata(device, NULL); + + return 0; +} + +static struct device_driver mipsnet_driver = { + .name = mipsnet_string, + .bus = &platform_bus_type, + .probe = mipsnet_probe, + .remove = __devexit_p(mipsnet_device_remove), +}; + +static void mipsnet_platform_release(struct device *device) +{ + struct platform_device *pldev; + + /* free device */ + pldev = to_platform_device(device); + kfree(pldev); +} + +static int __init mipsnet_init_module(void) +{ + struct platform_device *pldev; + int err; + + printk(KERN_INFO "MIPSNet Ethernet driver. Version: %s. " + "(c)2005 MIPS Technologies, Inc.\n", MIPSNET_VERSION); + + if (driver_register(&mipsnet_driver)) { + printk(KERN_ERR "Driver registration failed\n"); + err = -ENODEV; + goto out; + } + + if (!(pldev = kmalloc (sizeof (*pldev), GFP_KERNEL))) { + err = -ENOMEM; + goto out_unregister_driver; + } + + memset (pldev, 0, sizeof (*pldev)); + pldev->name = mipsnet_string; + pldev->id = 0; + pldev->dev.release = mipsnet_platform_release; + + if (platform_device_register(pldev)) { + err = -ENODEV; + goto out_free_pldev; + } + + if (!pldev->dev.driver) { + /* + * The driver was not bound to this device, there was + * no hardware at this address. Unregister it, as the + * release fuction will take care of freeing the + * allocated structure + */ + platform_device_unregister (pldev); + } + + mips_plat_dev = pldev; + + return 0; + +out_free_pldev: + kfree(pldev); + +out_unregister_driver: + driver_unregister(&mipsnet_driver); +out: + return err; +} + +static void __exit mipsnet_exit_module(void) +{ + pr_debug("MIPSNet Ethernet driver exiting\n"); + + driver_unregister(&mipsnet_driver); +} + +module_init(mipsnet_init_module); +module_exit(mipsnet_exit_module); diff --git a/drivers/net/mipsnet.h b/drivers/net/mipsnet.h new file mode 100644 index 000000000000..878535953cb1 --- /dev/null +++ b/drivers/net/mipsnet.h @@ -0,0 +1,127 @@ +// +// +// Unpublished work (c) MIPS Technologies, Inc. All rights reserved. +// Unpublished rights reserved under the copyright laws of the U.S.A. and +// other countries. +// +// PROPRIETARY / SECRET CONFIDENTIAL INFORMATION OF MIPS TECHNOLOGIES, INC. +// FOR INTERNAL USE ONLY. +// +// Under no circumstances (contract or otherwise) may this information be +// disclosed to, or copied, modified or used by anyone other than employees +// or contractors of MIPS Technologies having a need to know. +// +// +//++ +// File: MIPS_Net.h +// +// Description: +// The definition of the emulated MIPSNET device's interface. +// +// Notes: This include file needs to work from a Linux device drivers. +// +//-- +// + +#ifndef __MIPSNET_H +#define __MIPSNET_H + +/* + * Id of this Net device, as seen by the core. + */ +#define MIPS_NET_DEV_ID ((uint64_t) \ + ((uint64_t)'M'<< 0)| \ + ((uint64_t)'I'<< 8)| \ + ((uint64_t)'P'<<16)| \ + ((uint64_t)'S'<<24)| \ + ((uint64_t)'N'<<32)| \ + ((uint64_t)'E'<<40)| \ + ((uint64_t)'T'<<48)| \ + ((uint64_t)'0'<<56)) + +/* + * Net status/control block as seen by sw in the core. + * (Why not use bit fields? can't be bothered with cross-platform struct + * packing.) + */ +typedef struct _net_control_block { + /// dev info for probing + /// reads as MIPSNET%d where %d is some form of version + uint64_t devId; /*0x00 */ + + /* + * read only busy flag. + * Set and cleared by the Net Device to indicate that an rx or a tx + * is in progress. + */ + uint32_t busy; /*0x08 */ + + /* + * Set by the Net Device. + * The device will set it once data has been received. + * The value is the number of bytes that should be read from + * rxDataBuffer. The value will decrease till 0 until all the data + * from rxDataBuffer has been read. + */ + uint32_t rxDataCount; /*0x0c */ +#define MIPSNET_MAX_RXTX_DATACOUNT (1<<16) + + /* + * Settable from the MIPS core, cleared by the Net Device. + * The core should set the number of bytes it wants to send, + * then it should write those bytes of data to txDataBuffer. + * The device will clear txDataCount has been processed (not necessarily sent). + */ + uint32_t txDataCount; /*0x10 */ + + /* + * Interrupt control + * + * Used to clear the interrupted generated by this dev. + * Write a 1 to clear the interrupt. (except bit31). + * + * Bit0 is set if it was a tx-done interrupt. + * Bit1 is set when new rx-data is available. + * Until this bit is cleared there will be no other RXs. + * + * Bit31 is used for testing, it clears after a read. + * Writing 1 to this bit will cause an interrupt to be generated. + * To clear the test interrupt, write 0 to this register. + */ + uint32_t interruptControl; /*0x14 */ +#define MIPSNET_INTCTL_TXDONE ((uint32_t)(1<< 0)) +#define MIPSNET_INTCTL_RXDONE ((uint32_t)(1<< 1)) +#define MIPSNET_INTCTL_TESTBIT ((uint32_t)(1<<31)) +#define MIPSNET_INTCTL_ALLSOURCES (MIPSNET_INTCTL_TXDONE|MIPSNET_INTCTL_RXDONE|MIPSNET_INTCTL_TESTBIT) + + /* + * Readonly core-specific interrupt info for the device to signal the core. + * The meaning of the contents of this field might change. + */ + /*###\todo: the whole memIntf interrupt scheme is messy: the device should have + * no control what so ever of what VPE/register set is being used. + * The MemIntf should only expose interrupt lines, and something in the + * config should be responsible for the line<->core/vpe bindings. + */ + uint32_t interruptInfo; /*0x18 */ + + /* + * This is where the received data is read out. + * There is more data to read until rxDataReady is 0. + * Only 1 byte at this regs offset is used. + */ + uint32_t rxDataBuffer; /*0x1c */ + + /* + * This is where the data to transmit is written. + * Data should be written for the amount specified in the txDataCount register. + * Only 1 byte at this regs offset is used. + */ + uint32_t txDataBuffer; /*0x20 */ +} MIPS_T_NetControl; + +#define MIPSNET_IO_EXTENT 0x40 /* being generous */ + +#define field_offset(field) ((int)&((MIPS_T_NetControl*)(0))->field) + +#endif /* __MIPSNET_H */ diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c index f0996ce5c268..6c86dca62e2a 100644 --- a/drivers/net/myri_sbus.c +++ b/drivers/net/myri_sbus.c @@ -277,7 +277,7 @@ static void myri_init_rings(struct myri_eth *mp, int from_irq) struct recvq __iomem *rq = mp->rq; struct myri_rxd __iomem *rxd = &rq->myri_rxd[0]; struct net_device *dev = mp->dev; - int gfp_flags = GFP_KERNEL; + gfp_t gfp_flags = GFP_KERNEL; int i; if (from_irq || in_interrupt()) diff --git a/drivers/net/myri_sbus.h b/drivers/net/myri_sbus.h index 9391e55a5e92..47722f708a41 100644 --- a/drivers/net/myri_sbus.h +++ b/drivers/net/myri_sbus.h @@ -296,7 +296,7 @@ struct myri_eth { /* We use this to acquire receive skb's that we can DMA directly into. */ #define ALIGNED_RX_SKB_ADDR(addr) \ ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr)) -static inline struct sk_buff *myri_alloc_skb(unsigned int length, int gfp_flags) +static inline struct sk_buff *myri_alloc_skb(unsigned int length, gfp_t gfp_flags) { struct sk_buff *skb; diff --git a/drivers/net/ne.c b/drivers/net/ne.c index d209a1556b2e..0de8fdd2aa86 100644 --- a/drivers/net/ne.c +++ b/drivers/net/ne.c @@ -54,6 +54,10 @@ static const char version2[] = #include #include +#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938) +#include +#endif + #include "8390.h" #define DRV_NAME "ne" @@ -111,6 +115,9 @@ bad_clone_list[] __initdata = { {"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */ {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */ {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */ +#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938) + {"RBHMA4X00-RTL8019", "RBHMA4X00/RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */ +#endif {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */ {NULL,} }; @@ -226,6 +233,10 @@ struct net_device * __init ne_probe(int unit) sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); +#ifdef CONFIG_TOSHIBA_RBTX4938 + dev->base_addr = 0x07f20280; + dev->irq = RBTX4938_RTL_8019_IRQ; +#endif err = do_ne_probe(dev); if (err) goto out; @@ -506,6 +517,10 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr) ei_status.name = name; ei_status.tx_start_page = start_page; ei_status.stop_page = stop_page; +#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938) + wordlength = 1; +#endif + #ifdef CONFIG_PLAT_OAKS32R ei_status.word16 = 0; #else diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c index f1c01ac29102..e531a4eedfee 100644 --- a/drivers/net/ne2k-pci.c +++ b/drivers/net/ne2k-pci.c @@ -372,6 +372,7 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev, printk("%2.2X%s", SA_prom[i], i == 5 ? ".\n": ":"); dev->dev_addr[i] = SA_prom[i]; } + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); return 0; @@ -637,6 +638,7 @@ static struct ethtool_ops ne2k_pci_ethtool_ops = { .get_drvinfo = ne2k_pci_get_drvinfo, .get_tx_csum = ethtool_op_get_tx_csum, .get_sg = ethtool_op_get_sg, + .get_perm_addr = ethtool_op_get_perm_addr, }; static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev) diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index e64df4d0800b..a3c3fc9c0d8a 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c @@ -584,7 +584,7 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb) return 0; } -static inline int rx_refill(struct net_device *ndev, int gfp) +static inline int rx_refill(struct net_device *ndev, gfp_t gfp) { struct ns83820 *dev = PRIV(ndev); unsigned i; @@ -1632,8 +1632,7 @@ static void ns83820_run_bist(struct net_device *ndev, const char *name, u32 enab timed_out = 1; break; } - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(1); + schedule_timeout_uninterruptible(1); } if (status & fail) diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index d652e1eddb45..c7cca842e5ee 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c @@ -1832,7 +1832,7 @@ static void fill_multicast_tbl(int count, struct dev_mc_list *addrs, { struct dev_mc_list *mc_addr; - for (mc_addr = addrs; mc_addr && --count > 0; mc_addr = mc_addr->next) { + for (mc_addr = addrs; mc_addr && count-- > 0; mc_addr = mc_addr->next) { u_int position = ether_crc(6, mc_addr->dmi_addr); #ifndef final_version /* Verify multicast address. */ if ((mc_addr->dmi_addr[0] & 1) == 0) diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 113b68099216..70fe81a89df9 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c @@ -22,8 +22,8 @@ *************************************************************************/ #define DRV_NAME "pcnet32" -#define DRV_VERSION "1.30j" -#define DRV_RELDATE "29.04.2005" +#define DRV_VERSION "1.31a" +#define DRV_RELDATE "12.Sep.2005" #define PFX DRV_NAME ": " static const char *version = @@ -257,6 +257,9 @@ static int homepna[MAX_UNITS]; * v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32. * v1.30i 28 Jun 2004 Don Fry change to use module_param. * v1.30j 29 Apr 2005 Don Fry fix skb/map leak with loopback test. + * v1.31 02 Sep 2005 Hubert WS Lin added set_ringparam(). + * v1.31a 12 Sep 2005 Hubert WS Lin set min ring size to 4 + * to allow loopback test to work unchanged. */ @@ -266,17 +269,17 @@ static int homepna[MAX_UNITS]; * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). */ #ifndef PCNET32_LOG_TX_BUFFERS -#define PCNET32_LOG_TX_BUFFERS 4 -#define PCNET32_LOG_RX_BUFFERS 5 +#define PCNET32_LOG_TX_BUFFERS 4 +#define PCNET32_LOG_RX_BUFFERS 5 +#define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */ +#define PCNET32_LOG_MAX_RX_BUFFERS 9 #endif #define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS)) -#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) -#define TX_RING_LEN_BITS ((PCNET32_LOG_TX_BUFFERS) << 12) +#define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS)) #define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS)) -#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) -#define RX_RING_LEN_BITS ((PCNET32_LOG_RX_BUFFERS) << 4) +#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS)) #define PKT_BUF_SZ 1544 @@ -334,14 +337,14 @@ struct pcnet32_access { }; /* - * The first three fields of pcnet32_private are read by the ethernet device - * so we allocate the structure should be allocated by pci_alloc_consistent(). + * The first field of pcnet32_private is read by the ethernet device + * so the structure should be allocated using pci_alloc_consistent(). */ struct pcnet32_private { - /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ - struct pcnet32_rx_head rx_ring[RX_RING_SIZE]; - struct pcnet32_tx_head tx_ring[TX_RING_SIZE]; struct pcnet32_init_block init_block; + /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ + struct pcnet32_rx_head *rx_ring; + struct pcnet32_tx_head *tx_ring; dma_addr_t dma_addr; /* DMA address of beginning of this object, returned by pci_alloc_consistent */ @@ -349,13 +352,21 @@ struct pcnet32_private { structure */ const char *name; /* The saved address of a sent-in-place packet/buffer, for skfree(). */ - struct sk_buff *tx_skbuff[TX_RING_SIZE]; - struct sk_buff *rx_skbuff[RX_RING_SIZE]; - dma_addr_t tx_dma_addr[TX_RING_SIZE]; - dma_addr_t rx_dma_addr[RX_RING_SIZE]; + struct sk_buff **tx_skbuff; + struct sk_buff **rx_skbuff; + dma_addr_t *tx_dma_addr; + dma_addr_t *rx_dma_addr; struct pcnet32_access a; spinlock_t lock; /* Guard lock */ unsigned int cur_rx, cur_tx; /* The next free ring entry */ + unsigned int rx_ring_size; /* current rx ring size */ + unsigned int tx_ring_size; /* current tx ring size */ + unsigned int rx_mod_mask; /* rx ring modular mask */ + unsigned int tx_mod_mask; /* tx ring modular mask */ + unsigned short rx_len_bits; + unsigned short tx_len_bits; + dma_addr_t rx_ring_dma_addr; + dma_addr_t tx_ring_dma_addr; unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ struct net_device_stats stats; char tx_full; @@ -397,6 +408,9 @@ static int pcnet32_get_regs_len(struct net_device *dev); static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *ptr); static void pcnet32_purge_tx_ring(struct net_device *dev); +static int pcnet32_alloc_ring(struct net_device *dev); +static void pcnet32_free_ring(struct net_device *dev); + enum pci_flags_bit { PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4, @@ -613,10 +627,62 @@ static void pcnet32_get_ringparam(struct net_device *dev, struct ethtool_ringpar { struct pcnet32_private *lp = dev->priv; - ering->tx_max_pending = TX_RING_SIZE - 1; - ering->tx_pending = lp->cur_tx - lp->dirty_tx; - ering->rx_max_pending = RX_RING_SIZE - 1; - ering->rx_pending = lp->cur_rx & RX_RING_MOD_MASK; + ering->tx_max_pending = TX_MAX_RING_SIZE - 1; + ering->tx_pending = lp->tx_ring_size - 1; + ering->rx_max_pending = RX_MAX_RING_SIZE - 1; + ering->rx_pending = lp->rx_ring_size - 1; +} + +static int pcnet32_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +{ + struct pcnet32_private *lp = dev->priv; + unsigned long flags; + int i; + + if (ering->rx_mini_pending || ering->rx_jumbo_pending) + return -EINVAL; + + if (netif_running(dev)) + pcnet32_close(dev); + + spin_lock_irqsave(&lp->lock, flags); + pcnet32_free_ring(dev); + lp->tx_ring_size = min(ering->tx_pending, (unsigned int) TX_MAX_RING_SIZE); + lp->rx_ring_size = min(ering->rx_pending, (unsigned int) RX_MAX_RING_SIZE); + + /* set the minimum ring size to 4, to allow the loopback test to work + * unchanged. + */ + for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { + if (lp->tx_ring_size <= (1 << i)) + break; + } + lp->tx_ring_size = (1 << i); + lp->tx_mod_mask = lp->tx_ring_size - 1; + lp->tx_len_bits = (i << 12); + + for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { + if (lp->rx_ring_size <= (1 << i)) + break; + } + lp->rx_ring_size = (1 << i); + lp->rx_mod_mask = lp->rx_ring_size - 1; + lp->rx_len_bits = (i << 4); + + if (pcnet32_alloc_ring(dev)) { + pcnet32_free_ring(dev); + return -ENOMEM; + } + + spin_unlock_irqrestore(&lp->lock, flags); + + if (pcnet32_debug & NETIF_MSG_DRV) + printk(KERN_INFO PFX "Ring Param Settings: RX: %d, TX: %d\n", lp->rx_ring_size, lp->tx_ring_size); + + if (netif_running(dev)) + pcnet32_open(dev); + + return 0; } static void pcnet32_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -948,6 +1014,7 @@ static struct ethtool_ops pcnet32_ethtool_ops = { .nway_reset = pcnet32_nway_reset, .get_link = pcnet32_get_link, .get_ringparam = pcnet32_get_ringparam, + .set_ringparam = pcnet32_set_ringparam, .get_tx_csum = ethtool_op_get_tx_csum, .get_sg = ethtool_op_get_sg, .get_tso = ethtool_op_get_tso, @@ -957,6 +1024,7 @@ static struct ethtool_ops pcnet32_ethtool_ops = { .phys_id = pcnet32_phys_id, .get_regs_len = pcnet32_get_regs_len, .get_regs = pcnet32_get_regs, + .get_perm_addr = ethtool_op_get_perm_addr, }; /* only probes for non-PCI devices, the rest are handled by @@ -1185,9 +1253,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) memcpy(dev->dev_addr, promaddr, 6); } } + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ - if (!is_valid_ether_addr(dev->dev_addr)) + if (!is_valid_ether_addr(dev->perm_addr)) memset(dev->dev_addr, 0, sizeof(dev->dev_addr)); if (pcnet32_debug & NETIF_MSG_PROBE) { @@ -1239,6 +1308,12 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) dev->priv = lp; lp->name = chipname; lp->shared_irq = shared; + lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ + lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */ + lp->tx_mod_mask = lp->tx_ring_size - 1; + lp->rx_mod_mask = lp->rx_ring_size - 1; + lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12); + lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4); lp->mii_if.full_duplex = fdx; lp->mii_if.phy_id_mask = 0x1f; lp->mii_if.reg_num_mask = 0x1f; @@ -1265,21 +1340,23 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) } lp->a = *a; + if (pcnet32_alloc_ring(dev)) { + ret = -ENOMEM; + goto err_free_ring; + } /* detect special T1/E1 WAN card by checking for MAC address */ if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 && dev->dev_addr[2] == 0x75) lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */ - lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS); + lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); for (i = 0; i < 6; i++) lp->init_block.phys_addr[i] = dev->dev_addr[i]; lp->init_block.filter[0] = 0x00000000; lp->init_block.filter[1] = 0x00000000; - lp->init_block.rx_ring = (u32)le32_to_cpu(lp->dma_addr + - offsetof(struct pcnet32_private, rx_ring)); - lp->init_block.tx_ring = (u32)le32_to_cpu(lp->dma_addr + - offsetof(struct pcnet32_private, tx_ring)); + lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr); + lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr); /* switch pcnet32 to 32bit mode */ a->write_bcr(ioaddr, 20, 2); @@ -1310,7 +1387,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) if (pcnet32_debug & NETIF_MSG_PROBE) printk(", failed to detect IRQ line.\n"); ret = -ENODEV; - goto err_free_consistent; + goto err_free_ring; } if (pcnet32_debug & NETIF_MSG_PROBE) printk(", probed IRQ %d.\n", dev->irq); @@ -1341,7 +1418,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) /* Fill in the generic fields of the device structure. */ if (register_netdev(dev)) - goto err_free_consistent; + goto err_free_ring; if (pdev) { pci_set_drvdata(pdev, dev); @@ -1359,6 +1436,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) return 0; +err_free_ring: + pcnet32_free_ring(dev); err_free_consistent: pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); err_free_netdev: @@ -1369,6 +1448,86 @@ err_release_region: } +static int pcnet32_alloc_ring(struct net_device *dev) +{ + struct pcnet32_private *lp = dev->priv; + + if ((lp->tx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, + &lp->tx_ring_dma_addr)) == NULL) { + if (pcnet32_debug & NETIF_MSG_DRV) + printk(KERN_ERR PFX "Consistent memory allocation failed.\n"); + return -ENOMEM; + } + + if ((lp->rx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, + &lp->rx_ring_dma_addr)) == NULL) { + if (pcnet32_debug & NETIF_MSG_DRV) + printk(KERN_ERR PFX "Consistent memory allocation failed.\n"); + return -ENOMEM; + } + + if (!(lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size, GFP_ATOMIC))) { + if (pcnet32_debug & NETIF_MSG_DRV) + printk(KERN_ERR PFX "Memory allocation failed.\n"); + return -ENOMEM; + } + memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size); + + if (!(lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size, GFP_ATOMIC))) { + if (pcnet32_debug & NETIF_MSG_DRV) + printk(KERN_ERR PFX "Memory allocation failed.\n"); + return -ENOMEM; + } + memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size); + + if (!(lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size, GFP_ATOMIC))) { + if (pcnet32_debug & NETIF_MSG_DRV) + printk(KERN_ERR PFX "Memory allocation failed.\n"); + return -ENOMEM; + } + memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size); + + if (!(lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size, GFP_ATOMIC))) { + if (pcnet32_debug & NETIF_MSG_DRV) + printk(KERN_ERR PFX "Memory allocation failed.\n"); + return -ENOMEM; + } + memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size); + + return 0; +} + + +static void pcnet32_free_ring(struct net_device *dev) +{ + struct pcnet32_private *lp = dev->priv; + + kfree(lp->tx_skbuff); + lp->tx_skbuff = NULL; + + kfree(lp->rx_skbuff); + lp->rx_skbuff = NULL; + + kfree(lp->tx_dma_addr); + lp->tx_dma_addr = NULL; + + kfree(lp->rx_dma_addr); + lp->rx_dma_addr = NULL; + + if (lp->tx_ring) { + pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, + lp->tx_ring, lp->tx_ring_dma_addr); + lp->tx_ring = NULL; + } + + if (lp->rx_ring) { + pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, + lp->rx_ring, lp->rx_ring_dma_addr); + lp->rx_ring = NULL; + } +} + + static int pcnet32_open(struct net_device *dev) { @@ -1400,8 +1559,8 @@ pcnet32_open(struct net_device *dev) if (netif_msg_ifup(lp)) printk(KERN_DEBUG "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n", dev->name, dev->irq, - (u32) (lp->dma_addr + offsetof(struct pcnet32_private, tx_ring)), - (u32) (lp->dma_addr + offsetof(struct pcnet32_private, rx_ring)), + (u32) (lp->tx_ring_dma_addr), + (u32) (lp->rx_ring_dma_addr), (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block))); /* set/reset autoselect bit */ @@ -1521,7 +1680,7 @@ pcnet32_open(struct net_device *dev) err_free_ring: /* free any allocated skbuffs */ - for (i = 0; i < RX_RING_SIZE; i++) { + for (i = 0; i < lp->rx_ring_size; i++) { lp->rx_ring[i].status = 0; if (lp->rx_skbuff[i]) { pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2, @@ -1531,6 +1690,9 @@ err_free_ring: lp->rx_skbuff[i] = NULL; lp->rx_dma_addr[i] = 0; } + + pcnet32_free_ring(dev); + /* * Switch back to 16bit mode to avoid problems with dumb * DOS packet driver after a warm reboot @@ -1562,7 +1724,7 @@ pcnet32_purge_tx_ring(struct net_device *dev) struct pcnet32_private *lp = dev->priv; int i; - for (i = 0; i < TX_RING_SIZE; i++) { + for (i = 0; i < lp->tx_ring_size; i++) { lp->tx_ring[i].status = 0; /* CPU owns buffer */ wmb(); /* Make sure adapter sees owner change */ if (lp->tx_skbuff[i]) { @@ -1587,7 +1749,7 @@ pcnet32_init_ring(struct net_device *dev) lp->cur_rx = lp->cur_tx = 0; lp->dirty_rx = lp->dirty_tx = 0; - for (i = 0; i < RX_RING_SIZE; i++) { + for (i = 0; i < lp->rx_ring_size; i++) { struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; if (rx_skbuff == NULL) { if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) { @@ -1611,20 +1773,18 @@ pcnet32_init_ring(struct net_device *dev) } /* The Tx buffer address is filled in as needed, but we do need to clear * the upper ownership bit. */ - for (i = 0; i < TX_RING_SIZE; i++) { + for (i = 0; i < lp->tx_ring_size; i++) { lp->tx_ring[i].status = 0; /* CPU owns buffer */ wmb(); /* Make sure adapter sees owner change */ lp->tx_ring[i].base = 0; lp->tx_dma_addr[i] = 0; } - lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS); + lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); for (i = 0; i < 6; i++) lp->init_block.phys_addr[i] = dev->dev_addr[i]; - lp->init_block.rx_ring = (u32)le32_to_cpu(lp->dma_addr + - offsetof(struct pcnet32_private, rx_ring)); - lp->init_block.tx_ring = (u32)le32_to_cpu(lp->dma_addr + - offsetof(struct pcnet32_private, tx_ring)); + lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr); + lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr); wmb(); /* Make sure all changes are visible */ return 0; } @@ -1682,13 +1842,13 @@ pcnet32_tx_timeout (struct net_device *dev) printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", lp->cur_rx); - for (i = 0 ; i < RX_RING_SIZE; i++) + for (i = 0 ; i < lp->rx_ring_size; i++) printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", le32_to_cpu(lp->rx_ring[i].base), (-le16_to_cpu(lp->rx_ring[i].buf_length)) & 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length), le16_to_cpu(lp->rx_ring[i].status)); - for (i = 0 ; i < TX_RING_SIZE; i++) + for (i = 0 ; i < lp->tx_ring_size; i++) printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", le32_to_cpu(lp->tx_ring[i].base), (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, @@ -1729,7 +1889,7 @@ pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Fill in a Tx ring entry */ /* Mask to ring buffer boundary. */ - entry = lp->cur_tx & TX_RING_MOD_MASK; + entry = lp->cur_tx & lp->tx_mod_mask; /* Caution: the write order is important here, set the status * with the "ownership" bits last. */ @@ -1753,7 +1913,7 @@ pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; - if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base != 0) { + if (lp->tx_ring[(entry+1) & lp->tx_mod_mask].base != 0) { lp->tx_full = 1; netif_stop_queue(dev); } @@ -1806,7 +1966,7 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs) int delta; while (dirty_tx != lp->cur_tx) { - int entry = dirty_tx & TX_RING_MOD_MASK; + int entry = dirty_tx & lp->tx_mod_mask; int status = (short)le16_to_cpu(lp->tx_ring[entry].status); if (status < 0) @@ -1864,18 +2024,18 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs) dirty_tx++; } - delta = (lp->cur_tx - dirty_tx) & (TX_RING_MOD_MASK + TX_RING_SIZE); - if (delta > TX_RING_SIZE) { + delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size); + if (delta > lp->tx_ring_size) { if (netif_msg_drv(lp)) printk(KERN_ERR "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n", dev->name, dirty_tx, lp->cur_tx, lp->tx_full); - dirty_tx += TX_RING_SIZE; - delta -= TX_RING_SIZE; + dirty_tx += lp->tx_ring_size; + delta -= lp->tx_ring_size; } if (lp->tx_full && netif_queue_stopped(dev) && - delta < TX_RING_SIZE - 2) { + delta < lp->tx_ring_size - 2) { /* The ring is no longer full, clear tbusy. */ lp->tx_full = 0; netif_wake_queue (dev); @@ -1932,8 +2092,8 @@ static int pcnet32_rx(struct net_device *dev) { struct pcnet32_private *lp = dev->priv; - int entry = lp->cur_rx & RX_RING_MOD_MASK; - int boguscnt = RX_RING_SIZE / 2; + int entry = lp->cur_rx & lp->rx_mod_mask; + int boguscnt = lp->rx_ring_size / 2; /* If we own the next entry, it's a new packet. Send it up. */ while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) { @@ -1998,12 +2158,12 @@ pcnet32_rx(struct net_device *dev) if (netif_msg_drv(lp)) printk(KERN_ERR "%s: Memory squeeze, deferring packet.\n", dev->name); - for (i = 0; i < RX_RING_SIZE; i++) + for (i = 0; i < lp->rx_ring_size; i++) if ((short)le16_to_cpu(lp->rx_ring[(entry+i) - & RX_RING_MOD_MASK].status) < 0) + & lp->rx_mod_mask].status) < 0) break; - if (i > RX_RING_SIZE -2) { + if (i > lp->rx_ring_size -2) { lp->stats.rx_dropped++; lp->rx_ring[entry].status |= le16_to_cpu(0x8000); wmb(); /* Make sure adapter sees owner change */ @@ -2041,7 +2201,7 @@ pcnet32_rx(struct net_device *dev) lp->rx_ring[entry].buf_length = le16_to_cpu(2-PKT_BUF_SZ); wmb(); /* Make sure owner changes after all others are visible */ lp->rx_ring[entry].status |= le16_to_cpu(0x8000); - entry = (++lp->cur_rx) & RX_RING_MOD_MASK; + entry = (++lp->cur_rx) & lp->rx_mod_mask; if (--boguscnt <= 0) break; /* don't stay in loop forever */ } @@ -2084,7 +2244,7 @@ pcnet32_close(struct net_device *dev) spin_lock_irqsave(&lp->lock, flags); /* free all allocated skbuffs */ - for (i = 0; i < RX_RING_SIZE; i++) { + for (i = 0; i < lp->rx_ring_size; i++) { lp->rx_ring[i].status = 0; wmb(); /* Make sure adapter sees owner change */ if (lp->rx_skbuff[i]) { @@ -2096,7 +2256,7 @@ pcnet32_close(struct net_device *dev) lp->rx_dma_addr[i] = 0; } - for (i = 0; i < TX_RING_SIZE; i++) { + for (i = 0; i < lp->tx_ring_size; i++) { lp->tx_ring[i].status = 0; /* CPU owns buffer */ wmb(); /* Make sure adapter sees owner change */ if (lp->tx_skbuff[i]) { @@ -2265,6 +2425,7 @@ static void __devexit pcnet32_remove_one(struct pci_dev *pdev) struct pcnet32_private *lp = dev->priv; unregister_netdev(dev); + pcnet32_free_ring(dev); release_region(dev->base_addr, PCNET32_TOTAL_SIZE); pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); free_netdev(dev); @@ -2340,6 +2501,7 @@ static void __exit pcnet32_cleanup_module(void) struct pcnet32_private *lp = pcnet32_dev->priv; next_dev = lp->next; unregister_netdev(pcnet32_dev); + pcnet32_free_ring(pcnet32_dev); release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); free_netdev(pcnet32_dev); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 14f4de1a8180..c782a6329805 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -12,14 +12,6 @@ config PHYLIB devices. This option provides infrastructure for managing PHY devices. -config PHYCONTROL - bool " Support for automatically handling PHY state changes" - depends on PHYLIB - help - Adds code to perform all the work for keeping PHY link - state (speed/duplex/etc) up-to-date. Also handles - interrupts. - comment "MII PHY device drivers" depends on PHYLIB diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index d9e11f93bf3a..9209da9dde0d 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -242,10 +242,6 @@ EXPORT_SYMBOL(phy_sanitize_settings); * choose the next best ones from the ones selected, so we don't * care if ethtool tries to give us bad values * - * A note about the PHYCONTROL Layer. If you turn off - * CONFIG_PHYCONTROL, you will need to read the PHY status - * registers after this function completes, and update your - * controller manually. */ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) { @@ -380,7 +376,6 @@ int phy_start_aneg(struct phy_device *phydev) err = phydev->drv->config_aneg(phydev); -#ifdef CONFIG_PHYCONTROL if (err < 0) goto out_unlock; @@ -395,14 +390,12 @@ int phy_start_aneg(struct phy_device *phydev) } out_unlock: -#endif spin_unlock(&phydev->lock); return err; } EXPORT_SYMBOL(phy_start_aneg); -#ifdef CONFIG_PHYCONTROL static void phy_change(void *data); static void phy_timer(unsigned long data); @@ -868,4 +861,3 @@ static void phy_timer(unsigned long data) mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ); } -#endif /* CONFIG_PHYCONTROL */ diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 33f7bdb5857c..6da1aa0706a1 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -101,7 +101,6 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr) return dev; } -#ifdef CONFIG_PHYCONTROL /* phy_prepare_link: * * description: Tells the PHY infrastructure to handle the @@ -160,8 +159,6 @@ void phy_disconnect(struct phy_device *phydev) } EXPORT_SYMBOL(phy_disconnect); -#endif /* CONFIG_PHYCONTROL */ - /* phy_attach: * * description: Called by drivers to attach to a particular PHY diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 82f236cc3b9b..a842ecc60a34 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -1070,7 +1070,7 @@ static int __init pppoe_proc_init(void) { struct proc_dir_entry *p; - p = create_proc_entry("pppoe", S_IRUGO, proc_net); + p = create_proc_entry("net/pppoe", S_IRUGO, NULL); if (!p) return -ENOMEM; @@ -1142,7 +1142,7 @@ static void __exit pppoe_exit(void) dev_remove_pack(&pppoes_ptype); dev_remove_pack(&pppoed_ptype); unregister_netdevice_notifier(&pppoe_notifier); - remove_proc_entry("pppoe", proc_net); + remove_proc_entry("net/pppoe", NULL); proto_unregister(&pppoe_sk_proto); } diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index f0471d102e3c..159b56a56ef4 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -92,19 +92,18 @@ VERSION 2.2LK <2005/01/25> #endif /* RTL8169_DEBUG */ #define R8169_MSG_DEFAULT \ - (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | \ - NETIF_MSG_IFDOWN) + (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) #define TX_BUFFS_AVAIL(tp) \ (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) #ifdef CONFIG_R8169_NAPI #define rtl8169_rx_skb netif_receive_skb -#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx +#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb #define rtl8169_rx_quota(count, quota) min(count, quota) #else #define rtl8169_rx_skb netif_rx -#define rtl8169_rx_hwaccel_skb vlan_hwaccel_receive_skb +#define rtl8169_rx_hwaccel_skb vlan_hwaccel_rx #define rtl8169_rx_quota(count, quota) count #endif @@ -1028,6 +1027,7 @@ static struct ethtool_ops rtl8169_ethtool_ops = { .get_strings = rtl8169_get_strings, .get_stats_count = rtl8169_get_stats_count, .get_ethtool_stats = rtl8169_get_ethtool_stats, + .get_perm_addr = ethtool_op_get_perm_addr, }; static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum, @@ -1512,6 +1512,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Get MAC address. FIXME: read EEPROM */ for (i = 0; i < MAC_ADDR_LEN; i++) dev->dev_addr[i] = RTL_R8(MAC0 + i); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); dev->open = rtl8169_open; dev->hard_start_xmit = rtl8169_start_xmit; diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c new file mode 100644 index 000000000000..12cde0604580 --- /dev/null +++ b/drivers/net/rionet.c @@ -0,0 +1,574 @@ +/* + * rionet - Ethernet driver over RapidIO messaging services + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define DRV_NAME "rionet" +#define DRV_VERSION "0.2" +#define DRV_AUTHOR "Matt Porter " +#define DRV_DESC "Ethernet over RapidIO" + +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_DESCRIPTION(DRV_DESC); +MODULE_LICENSE("GPL"); + +#define RIONET_DEFAULT_MSGLEVEL \ + (NETIF_MSG_DRV | \ + NETIF_MSG_LINK | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +#define RIONET_DOORBELL_JOIN 0x1000 +#define RIONET_DOORBELL_LEAVE 0x1001 + +#define RIONET_MAILBOX 0 + +#define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE +#define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE + +static LIST_HEAD(rionet_peers); + +struct rionet_private { + struct rio_mport *mport; + struct sk_buff *rx_skb[RIONET_RX_RING_SIZE]; + struct sk_buff *tx_skb[RIONET_TX_RING_SIZE]; + struct net_device_stats stats; + int rx_slot; + int tx_slot; + int tx_cnt; + int ack_slot; + spinlock_t lock; + spinlock_t tx_lock; + u32 msg_enable; +}; + +struct rionet_peer { + struct list_head node; + struct rio_dev *rdev; + struct resource *res; +}; + +static int rionet_check = 0; +static int rionet_capable = 1; + +/* + * This is a fast lookup table for for translating TX + * Ethernet packets into a destination RIO device. It + * could be made into a hash table to save memory depending + * on system trade-offs. + */ +static struct rio_dev *rionet_active[RIO_MAX_ROUTE_ENTRIES]; + +#define is_rionet_capable(pef, src_ops, dst_ops) \ + ((pef & RIO_PEF_INB_MBOX) && \ + (pef & RIO_PEF_INB_DOORBELL) && \ + (src_ops & RIO_SRC_OPS_DOORBELL) && \ + (dst_ops & RIO_DST_OPS_DOORBELL)) +#define dev_rionet_capable(dev) \ + is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops) + +#define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001) +#define RIONET_GET_DESTID(x) (*(u16 *)(x + 4)) + +static struct net_device_stats *rionet_stats(struct net_device *ndev) +{ + struct rionet_private *rnet = ndev->priv; + return &rnet->stats; +} + +static int rionet_rx_clean(struct net_device *ndev) +{ + int i; + int error = 0; + struct rionet_private *rnet = ndev->priv; + void *data; + + i = rnet->rx_slot; + + do { + if (!rnet->rx_skb[i]) + continue; + + if (!(data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX))) + break; + + rnet->rx_skb[i]->data = data; + skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE); + rnet->rx_skb[i]->dev = ndev; + rnet->rx_skb[i]->protocol = + eth_type_trans(rnet->rx_skb[i], ndev); + error = netif_rx(rnet->rx_skb[i]); + + if (error == NET_RX_DROP) { + rnet->stats.rx_dropped++; + } else if (error == NET_RX_BAD) { + if (netif_msg_rx_err(rnet)) + printk(KERN_WARNING "%s: bad rx packet\n", + DRV_NAME); + rnet->stats.rx_errors++; + } else { + rnet->stats.rx_packets++; + rnet->stats.rx_bytes += RIO_MAX_MSG_SIZE; + } + + } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot); + + return i; +} + +static void rionet_rx_fill(struct net_device *ndev, int end) +{ + int i; + struct rionet_private *rnet = ndev->priv; + + i = rnet->rx_slot; + do { + rnet->rx_skb[i] = dev_alloc_skb(RIO_MAX_MSG_SIZE); + + if (!rnet->rx_skb[i]) + break; + + rio_add_inb_buffer(rnet->mport, RIONET_MAILBOX, + rnet->rx_skb[i]->data); + } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != end); + + rnet->rx_slot = i; +} + +static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev, + struct rio_dev *rdev) +{ + struct rionet_private *rnet = ndev->priv; + + rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len); + rnet->tx_skb[rnet->tx_slot] = skb; + + rnet->stats.tx_packets++; + rnet->stats.tx_bytes += skb->len; + + if (++rnet->tx_cnt == RIONET_TX_RING_SIZE) + netif_stop_queue(ndev); + + ++rnet->tx_slot; + rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1); + + if (netif_msg_tx_queued(rnet)) + printk(KERN_INFO "%s: queued skb %8.8x len %8.8x\n", DRV_NAME, + (u32) skb, skb->len); + + return 0; +} + +static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + int i; + struct rionet_private *rnet = ndev->priv; + struct ethhdr *eth = (struct ethhdr *)skb->data; + u16 destid; + unsigned long flags; + + local_irq_save(flags); + if (!spin_trylock(&rnet->tx_lock)) { + local_irq_restore(flags); + return NETDEV_TX_LOCKED; + } + + if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) { + netif_stop_queue(ndev); + spin_unlock_irqrestore(&rnet->tx_lock, flags); + printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n", + ndev->name); + return NETDEV_TX_BUSY; + } + + if (eth->h_dest[0] & 0x01) { + for (i = 0; i < RIO_MAX_ROUTE_ENTRIES; i++) + if (rionet_active[i]) + rionet_queue_tx_msg(skb, ndev, + rionet_active[i]); + } else if (RIONET_MAC_MATCH(eth->h_dest)) { + destid = RIONET_GET_DESTID(eth->h_dest); + if (rionet_active[destid]) + rionet_queue_tx_msg(skb, ndev, rionet_active[destid]); + } + + spin_unlock_irqrestore(&rnet->tx_lock, flags); + + return 0; +} + +static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u16 tid, + u16 info) +{ + struct net_device *ndev = dev_id; + struct rionet_private *rnet = ndev->priv; + struct rionet_peer *peer; + + if (netif_msg_intr(rnet)) + printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x", + DRV_NAME, sid, tid, info); + if (info == RIONET_DOORBELL_JOIN) { + if (!rionet_active[sid]) { + list_for_each_entry(peer, &rionet_peers, node) { + if (peer->rdev->destid == sid) + rionet_active[sid] = peer->rdev; + } + rio_mport_send_doorbell(mport, sid, + RIONET_DOORBELL_JOIN); + } + } else if (info == RIONET_DOORBELL_LEAVE) { + rionet_active[sid] = NULL; + } else { + if (netif_msg_intr(rnet)) + printk(KERN_WARNING "%s: unhandled doorbell\n", + DRV_NAME); + } +} + +static void rionet_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot) +{ + int n; + struct net_device *ndev = dev_id; + struct rionet_private *rnet = (struct rionet_private *)ndev->priv; + + if (netif_msg_intr(rnet)) + printk(KERN_INFO "%s: inbound message event, mbox %d slot %d\n", + DRV_NAME, mbox, slot); + + spin_lock(&rnet->lock); + if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot) + rionet_rx_fill(ndev, n); + spin_unlock(&rnet->lock); +} + +static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot) +{ + struct net_device *ndev = dev_id; + struct rionet_private *rnet = ndev->priv; + + spin_lock(&rnet->lock); + + if (netif_msg_intr(rnet)) + printk(KERN_INFO + "%s: outbound message event, mbox %d slot %d\n", + DRV_NAME, mbox, slot); + + while (rnet->tx_cnt && (rnet->ack_slot != slot)) { + /* dma unmap single */ + dev_kfree_skb_irq(rnet->tx_skb[rnet->ack_slot]); + rnet->tx_skb[rnet->ack_slot] = NULL; + ++rnet->ack_slot; + rnet->ack_slot &= (RIONET_TX_RING_SIZE - 1); + rnet->tx_cnt--; + } + + if (rnet->tx_cnt < RIONET_TX_RING_SIZE) + netif_wake_queue(ndev); + + spin_unlock(&rnet->lock); +} + +static int rionet_open(struct net_device *ndev) +{ + int i, rc = 0; + struct rionet_peer *peer, *tmp; + u32 pwdcsr; + struct rionet_private *rnet = ndev->priv; + + if (netif_msg_ifup(rnet)) + printk(KERN_INFO "%s: open\n", DRV_NAME); + + if ((rc = rio_request_inb_dbell(rnet->mport, + (void *)ndev, + RIONET_DOORBELL_JOIN, + RIONET_DOORBELL_LEAVE, + rionet_dbell_event)) < 0) + goto out; + + if ((rc = rio_request_inb_mbox(rnet->mport, + (void *)ndev, + RIONET_MAILBOX, + RIONET_RX_RING_SIZE, + rionet_inb_msg_event)) < 0) + goto out; + + if ((rc = rio_request_outb_mbox(rnet->mport, + (void *)ndev, + RIONET_MAILBOX, + RIONET_TX_RING_SIZE, + rionet_outb_msg_event)) < 0) + goto out; + + /* Initialize inbound message ring */ + for (i = 0; i < RIONET_RX_RING_SIZE; i++) + rnet->rx_skb[i] = NULL; + rnet->rx_slot = 0; + rionet_rx_fill(ndev, 0); + + rnet->tx_slot = 0; + rnet->tx_cnt = 0; + rnet->ack_slot = 0; + + netif_carrier_on(ndev); + netif_start_queue(ndev); + + list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { + if (!(peer->res = rio_request_outb_dbell(peer->rdev, + RIONET_DOORBELL_JOIN, + RIONET_DOORBELL_LEAVE))) + { + printk(KERN_ERR "%s: error requesting doorbells\n", + DRV_NAME); + continue; + } + + /* + * If device has initialized inbound doorbells, + * send a join message + */ + rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr); + if (pwdcsr & RIO_DOORBELL_AVAIL) + rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); + } + + out: + return rc; +} + +static int rionet_close(struct net_device *ndev) +{ + struct rionet_private *rnet = (struct rionet_private *)ndev->priv; + struct rionet_peer *peer, *tmp; + int i; + + if (netif_msg_ifup(rnet)) + printk(KERN_INFO "%s: close\n", DRV_NAME); + + netif_stop_queue(ndev); + netif_carrier_off(ndev); + + for (i = 0; i < RIONET_RX_RING_SIZE; i++) + if (rnet->rx_skb[i]) + kfree_skb(rnet->rx_skb[i]); + + list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { + if (rionet_active[peer->rdev->destid]) { + rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE); + rionet_active[peer->rdev->destid] = NULL; + } + rio_release_outb_dbell(peer->rdev, peer->res); + } + + rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN, + RIONET_DOORBELL_LEAVE); + rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX); + rio_release_outb_mbox(rnet->mport, RIONET_MAILBOX); + + return 0; +} + +static void rionet_remove(struct rio_dev *rdev) +{ + struct net_device *ndev = NULL; + struct rionet_peer *peer, *tmp; + + unregister_netdev(ndev); + kfree(ndev); + + list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { + list_del(&peer->node); + kfree(peer); + } +} + +static void rionet_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct rionet_private *rnet = ndev->priv; + + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->fw_version, "n/a"); + strcpy(info->bus_info, rnet->mport->name); +} + +static u32 rionet_get_msglevel(struct net_device *ndev) +{ + struct rionet_private *rnet = ndev->priv; + + return rnet->msg_enable; +} + +static void rionet_set_msglevel(struct net_device *ndev, u32 value) +{ + struct rionet_private *rnet = ndev->priv; + + rnet->msg_enable = value; +} + +static struct ethtool_ops rionet_ethtool_ops = { + .get_drvinfo = rionet_get_drvinfo, + .get_msglevel = rionet_get_msglevel, + .set_msglevel = rionet_set_msglevel, + .get_link = ethtool_op_get_link, +}; + +static int rionet_setup_netdev(struct rio_mport *mport) +{ + int rc = 0; + struct net_device *ndev = NULL; + struct rionet_private *rnet; + u16 device_id; + + /* Allocate our net_device structure */ + ndev = alloc_etherdev(sizeof(struct rionet_private)); + if (ndev == NULL) { + printk(KERN_INFO "%s: could not allocate ethernet device.\n", + DRV_NAME); + rc = -ENOMEM; + goto out; + } + + /* Set up private area */ + rnet = (struct rionet_private *)ndev->priv; + rnet->mport = mport; + + /* Set the default MAC address */ + device_id = rio_local_get_device_id(mport); + ndev->dev_addr[0] = 0x00; + ndev->dev_addr[1] = 0x01; + ndev->dev_addr[2] = 0x00; + ndev->dev_addr[3] = 0x01; + ndev->dev_addr[4] = device_id >> 8; + ndev->dev_addr[5] = device_id & 0xff; + + /* Fill in the driver function table */ + ndev->open = &rionet_open; + ndev->hard_start_xmit = &rionet_start_xmit; + ndev->stop = &rionet_close; + ndev->get_stats = &rionet_stats; + ndev->mtu = RIO_MAX_MSG_SIZE - 14; + ndev->features = NETIF_F_LLTX; + SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops); + + SET_MODULE_OWNER(ndev); + + spin_lock_init(&rnet->lock); + spin_lock_init(&rnet->tx_lock); + + rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL; + + rc = register_netdev(ndev); + if (rc != 0) + goto out; + + printk("%s: %s %s Version %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n", + ndev->name, + DRV_NAME, + DRV_DESC, + DRV_VERSION, + ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2], + ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]); + + out: + return rc; +} + +/* + * XXX Make multi-net safe + */ +static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id) +{ + int rc = -ENODEV; + u32 lpef, lsrc_ops, ldst_ops; + struct rionet_peer *peer; + + /* If local device is not rionet capable, give up quickly */ + if (!rionet_capable) + goto out; + + /* + * First time through, make sure local device is rionet + * capable, setup netdev, and set flags so this is skipped + * on later probes + */ + if (!rionet_check) { + rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef); + rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, + &lsrc_ops); + rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, + &ldst_ops); + if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) { + printk(KERN_ERR + "%s: local device is not network capable\n", + DRV_NAME); + rionet_check = 1; + rionet_capable = 0; + goto out; + } + + rc = rionet_setup_netdev(rdev->net->hport); + rionet_check = 1; + } + + /* + * If the remote device has mailbox/doorbell capabilities, + * add it to the peer list. + */ + if (dev_rionet_capable(rdev)) { + if (!(peer = kmalloc(sizeof(struct rionet_peer), GFP_KERNEL))) { + rc = -ENOMEM; + goto out; + } + peer->rdev = rdev; + list_add_tail(&peer->node, &rionet_peers); + } + + out: + return rc; +} + +static struct rio_device_id rionet_id_table[] = { + {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)} +}; + +static struct rio_driver rionet_driver = { + .name = "rionet", + .id_table = rionet_id_table, + .probe = rionet_probe, + .remove = rionet_remove, +}; + +static int __init rionet_init(void) +{ + return rio_register_driver(&rionet_driver); +} + +static void __exit rionet_exit(void) +{ + rio_unregister_driver(&rionet_driver); +} + +module_init(rionet_init); +module_exit(rionet_exit); diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h index 7cefe5507b9e..00179bc3437f 100644 --- a/drivers/net/s2io-regs.h +++ b/drivers/net/s2io-regs.h @@ -814,6 +814,17 @@ typedef struct _XENA_dev_config { u64 rxgxs_ber_0; /* CHANGED */ u64 rxgxs_ber_1; /* CHANGED */ + u64 spi_control; +#define SPI_CONTROL_KEY(key) vBIT(key,0,4) +#define SPI_CONTROL_BYTECNT(cnt) vBIT(cnt,29,3) +#define SPI_CONTROL_CMD(cmd) vBIT(cmd,32,8) +#define SPI_CONTROL_ADDR(addr) vBIT(addr,40,24) +#define SPI_CONTROL_SEL1 BIT(4) +#define SPI_CONTROL_REQ BIT(7) +#define SPI_CONTROL_NACK BIT(5) +#define SPI_CONTROL_DONE BIT(6) + u64 spi_data; +#define SPI_DATA_WRITE(data,len) vBIT(data,0,len) } XENA_dev_config_t; #define XENA_REG_SPACE sizeof(XENA_dev_config_t) diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index dd451e099a4c..d303d162974f 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -65,9 +65,11 @@ #include "s2io.h" #include "s2io-regs.h" +#define DRV_VERSION "Version 2.0.9.1" + /* S2io Driver name & version. */ static char s2io_driver_name[] = "Neterion"; -static char s2io_driver_version[] = "Version 2.0.8.1"; +static char s2io_driver_version[] = DRV_VERSION; static inline int RXD_IS_UP2DT(RxD_t *rxdp) { @@ -307,6 +309,8 @@ static unsigned int indicate_max_pkts; #endif /* Frequency of Rx desc syncs expressed as power of 2 */ static unsigned int rxsync_frequency = 3; +/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ +static unsigned int intr_type = 0; /* * S2IO device table. @@ -1396,8 +1400,13 @@ static int init_nic(struct s2io_nic *nic) writeq(val64, &bar0->rti_data1_mem); val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) | - RTI_DATA2_MEM_RX_UFC_B(0x2) | - RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80); + RTI_DATA2_MEM_RX_UFC_B(0x2) ; + if (nic->intr_type == MSI_X) + val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \ + RTI_DATA2_MEM_RX_UFC_D(0x40)); + else + val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \ + RTI_DATA2_MEM_RX_UFC_D(0x80)); writeq(val64, &bar0->rti_data2_mem); for (i = 0; i < config->rx_ring_num; i++) { @@ -1507,17 +1516,15 @@ static int init_nic(struct s2io_nic *nic) #define LINK_UP_DOWN_INTERRUPT 1 #define MAC_RMAC_ERR_TIMER 2 -#if defined(CONFIG_MSI_MODE) || defined(CONFIG_MSIX_MODE) -#define s2io_link_fault_indication(x) MAC_RMAC_ERR_TIMER -#else int s2io_link_fault_indication(nic_t *nic) { + if (nic->intr_type != INTA) + return MAC_RMAC_ERR_TIMER; if (nic->device_type == XFRAME_II_DEVICE) return LINK_UP_DOWN_INTERRUPT; else return MAC_RMAC_ERR_TIMER; } -#endif /** * en_dis_able_nic_intrs - Enable or Disable the interrupts @@ -1941,11 +1948,14 @@ static int start_nic(struct s2io_nic *nic) } /* Enable select interrupts */ - interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; - interruptible |= TX_PIC_INTR | RX_PIC_INTR; - interruptible |= TX_MAC_INTR | RX_MAC_INTR; - - en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS); + if (nic->intr_type != INTA) + en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS); + else { + interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; + interruptible |= TX_PIC_INTR | RX_PIC_INTR; + interruptible |= TX_MAC_INTR | RX_MAC_INTR; + en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS); + } /* * With some switches, link might be already up at this point. @@ -2633,11 +2643,11 @@ static void tx_intr_handler(fifo_info_t *fifo_data) err = txdlp->Control_1 & TXD_T_CODE; if ((err >> 48) == 0xA) { DBG_PRINT(TX_DBG, "TxD returned due \ - to loss of link\n"); +to loss of link\n"); } else { DBG_PRINT(ERR_DBG, "***TxD error \ - %llx\n", err); +%llx\n", err); } } @@ -2854,6 +2864,9 @@ void s2io_reset(nic_t * sp) /* Set swapper to enable I/O register access */ s2io_set_swapper(sp); + /* Restore the MSIX table entries from local variables */ + restore_xmsi_data(sp); + /* Clear certain PCI/PCI-X fields after reset */ if (sp->device_type == XFRAME_II_DEVICE) { /* Clear parity err detect bit */ @@ -2983,8 +2996,9 @@ int s2io_set_swapper(nic_t * sp) SWAPPER_CTRL_RXD_W_FE | SWAPPER_CTRL_RXF_W_FE | SWAPPER_CTRL_XMSI_FE | - SWAPPER_CTRL_XMSI_SE | SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE); + if (sp->intr_type == INTA) + val64 |= SWAPPER_CTRL_XMSI_SE; writeq(val64, &bar0->swapper_ctrl); #else /* @@ -3005,8 +3019,9 @@ int s2io_set_swapper(nic_t * sp) SWAPPER_CTRL_RXD_W_SE | SWAPPER_CTRL_RXF_W_FE | SWAPPER_CTRL_XMSI_FE | - SWAPPER_CTRL_XMSI_SE | SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE); + if (sp->intr_type == INTA) + val64 |= SWAPPER_CTRL_XMSI_SE; writeq(val64, &bar0->swapper_ctrl); #endif val64 = readq(&bar0->swapper_ctrl); @@ -3028,6 +3043,201 @@ int s2io_set_swapper(nic_t * sp) return SUCCESS; } +int wait_for_msix_trans(nic_t *nic, int i) +{ + XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; + u64 val64; + int ret = 0, cnt = 0; + + do { + val64 = readq(&bar0->xmsi_access); + if (!(val64 & BIT(15))) + break; + mdelay(1); + cnt++; + } while(cnt < 5); + if (cnt == 5) { + DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i); + ret = 1; + } + + return ret; +} + +void restore_xmsi_data(nic_t *nic) +{ + XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; + u64 val64; + int i; + + for (i=0; i< MAX_REQUESTED_MSI_X; i++) { + writeq(nic->msix_info[i].addr, &bar0->xmsi_address); + writeq(nic->msix_info[i].data, &bar0->xmsi_data); + val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6)); + writeq(val64, &bar0->xmsi_access); + if (wait_for_msix_trans(nic, i)) { + DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); + continue; + } + } +} + +void store_xmsi_data(nic_t *nic) +{ + XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; + u64 val64, addr, data; + int i; + + /* Store and display */ + for (i=0; i< MAX_REQUESTED_MSI_X; i++) { + val64 = (BIT(15) | vBIT(i, 26, 6)); + writeq(val64, &bar0->xmsi_access); + if (wait_for_msix_trans(nic, i)) { + DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); + continue; + } + addr = readq(&bar0->xmsi_address); + data = readq(&bar0->xmsi_data); + if (addr && data) { + nic->msix_info[i].addr = addr; + nic->msix_info[i].data = data; + } + } +} + +int s2io_enable_msi(nic_t *nic) +{ + XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; + u16 msi_ctrl, msg_val; + struct config_param *config = &nic->config; + struct net_device *dev = nic->dev; + u64 val64, tx_mat, rx_mat; + int i, err; + + val64 = readq(&bar0->pic_control); + val64 &= ~BIT(1); + writeq(val64, &bar0->pic_control); + + err = pci_enable_msi(nic->pdev); + if (err) { + DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n", + nic->dev->name); + return err; + } + + /* + * Enable MSI and use MSI-1 in stead of the standard MSI-0 + * for interrupt handling. + */ + pci_read_config_word(nic->pdev, 0x4c, &msg_val); + msg_val ^= 0x1; + pci_write_config_word(nic->pdev, 0x4c, msg_val); + pci_read_config_word(nic->pdev, 0x4c, &msg_val); + + pci_read_config_word(nic->pdev, 0x42, &msi_ctrl); + msi_ctrl |= 0x10; + pci_write_config_word(nic->pdev, 0x42, msi_ctrl); + + /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */ + tx_mat = readq(&bar0->tx_mat0_n[0]); + for (i=0; itx_fifo_num; i++) { + tx_mat |= TX_MAT_SET(i, 1); + } + writeq(tx_mat, &bar0->tx_mat0_n[0]); + + rx_mat = readq(&bar0->rx_mat); + for (i=0; irx_ring_num; i++) { + rx_mat |= RX_MAT_SET(i, 1); + } + writeq(rx_mat, &bar0->rx_mat); + + dev->irq = nic->pdev->irq; + return 0; +} + +int s2io_enable_msi_x(nic_t *nic) +{ + XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; + u64 tx_mat, rx_mat; + u16 msi_control; /* Temp variable */ + int ret, i, j, msix_indx = 1; + + nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry), + GFP_KERNEL); + if (nic->entries == NULL) { + DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__); + return -ENOMEM; + } + memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); + + nic->s2io_entries = + kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry), + GFP_KERNEL); + if (nic->s2io_entries == NULL) { + DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__); + kfree(nic->entries); + return -ENOMEM; + } + memset(nic->s2io_entries, 0, + MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); + + for (i=0; i< MAX_REQUESTED_MSI_X; i++) { + nic->entries[i].entry = i; + nic->s2io_entries[i].entry = i; + nic->s2io_entries[i].arg = NULL; + nic->s2io_entries[i].in_use = 0; + } + + tx_mat = readq(&bar0->tx_mat0_n[0]); + for (i=0; iconfig.tx_fifo_num; i++, msix_indx++) { + tx_mat |= TX_MAT_SET(i, msix_indx); + nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i]; + nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE; + nic->s2io_entries[msix_indx].in_use = MSIX_FLG; + } + writeq(tx_mat, &bar0->tx_mat0_n[0]); + + if (!nic->config.bimodal) { + rx_mat = readq(&bar0->rx_mat); + for (j=0; jconfig.rx_ring_num; j++, msix_indx++) { + rx_mat |= RX_MAT_SET(j, msix_indx); + nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j]; + nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; + nic->s2io_entries[msix_indx].in_use = MSIX_FLG; + } + writeq(rx_mat, &bar0->rx_mat); + } else { + tx_mat = readq(&bar0->tx_mat0_n[7]); + for (j=0; jconfig.rx_ring_num; j++, msix_indx++) { + tx_mat |= TX_MAT_SET(i, msix_indx); + nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j]; + nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; + nic->s2io_entries[msix_indx].in_use = MSIX_FLG; + } + writeq(tx_mat, &bar0->tx_mat0_n[7]); + } + + ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X); + if (ret) { + DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); + kfree(nic->entries); + kfree(nic->s2io_entries); + nic->entries = NULL; + nic->s2io_entries = NULL; + return -ENOMEM; + } + + /* + * To enable MSI-X, MSI also needs to be enabled, due to a bug + * in the herc NIC. (Temp change, needs to be removed later) + */ + pci_read_config_word(nic->pdev, 0x42, &msi_control); + msi_control |= 0x1; /* Enable MSI */ + pci_write_config_word(nic->pdev, 0x42, msi_control); + + return 0; +} + /* ********************************************************* * * Functions defined below concern the OS part of the driver * * ********************************************************* */ @@ -3048,6 +3258,8 @@ int s2io_open(struct net_device *dev) { nic_t *sp = dev->priv; int err = 0; + int i; + u16 msi_control; /* Temp variable */ /* * Make sure you have link off by default every time @@ -3064,13 +3276,55 @@ int s2io_open(struct net_device *dev) goto hw_init_failed; } + /* Store the values of the MSIX table in the nic_t structure */ + store_xmsi_data(sp); + /* After proper initialization of H/W, register ISR */ - err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ, - sp->name, dev); - if (err) { - DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n", - dev->name); - goto isr_registration_failed; + if (sp->intr_type == MSI) { + err = request_irq((int) sp->pdev->irq, s2io_msi_handle, + SA_SHIRQ, sp->name, dev); + if (err) { + DBG_PRINT(ERR_DBG, "%s: MSI registration \ +failed\n", dev->name); + goto isr_registration_failed; + } + } + if (sp->intr_type == MSI_X) { + for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { + if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) { + sprintf(sp->desc1, "%s:MSI-X-%d-TX", + dev->name, i); + err = request_irq(sp->entries[i].vector, + s2io_msix_fifo_handle, 0, sp->desc1, + sp->s2io_entries[i].arg); + DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1, + sp->msix_info[i].addr); + } else { + sprintf(sp->desc2, "%s:MSI-X-%d-RX", + dev->name, i); + err = request_irq(sp->entries[i].vector, + s2io_msix_ring_handle, 0, sp->desc2, + sp->s2io_entries[i].arg); + DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2, + sp->msix_info[i].addr); + } + if (err) { + DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \ +failed\n", dev->name, i); + DBG_PRINT(ERR_DBG, "Returned: %d\n", err); + goto isr_registration_failed; + } + sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS; + } + } + if (sp->intr_type == INTA) { + err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ, + sp->name, dev); + if (err) { + DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n", + dev->name); + goto isr_registration_failed; + } } if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) { @@ -3083,11 +3337,37 @@ int s2io_open(struct net_device *dev) return 0; setting_mac_address_failed: - free_irq(sp->pdev->irq, dev); + if (sp->intr_type != MSI_X) + free_irq(sp->pdev->irq, dev); isr_registration_failed: del_timer_sync(&sp->alarm_timer); + if (sp->intr_type == MSI_X) { + if (sp->device_type == XFRAME_II_DEVICE) { + for (i=1; (sp->s2io_entries[i].in_use == + MSIX_REGISTERED_SUCCESS); i++) { + int vector = sp->entries[i].vector; + void *arg = sp->s2io_entries[i].arg; + + free_irq(vector, arg); + } + pci_disable_msix(sp->pdev); + + /* Temp */ + pci_read_config_word(sp->pdev, 0x42, &msi_control); + msi_control &= 0xFFFE; /* Disable MSI */ + pci_write_config_word(sp->pdev, 0x42, msi_control); + } + } + else if (sp->intr_type == MSI) + pci_disable_msi(sp->pdev); s2io_reset(sp); hw_init_failed: + if (sp->intr_type == MSI_X) { + if (sp->entries) + kfree(sp->entries); + if (sp->s2io_entries) + kfree(sp->s2io_entries); + } return err; } @@ -3107,12 +3387,35 @@ hw_init_failed: int s2io_close(struct net_device *dev) { nic_t *sp = dev->priv; + int i; + u16 msi_control; + flush_scheduled_work(); netif_stop_queue(dev); /* Reset card, kill tasklet and free Tx and Rx buffers. */ s2io_card_down(sp); - free_irq(sp->pdev->irq, dev); + if (sp->intr_type == MSI_X) { + if (sp->device_type == XFRAME_II_DEVICE) { + for (i=1; (sp->s2io_entries[i].in_use == + MSIX_REGISTERED_SUCCESS); i++) { + int vector = sp->entries[i].vector; + void *arg = sp->s2io_entries[i].arg; + + free_irq(vector, arg); + } + pci_read_config_word(sp->pdev, 0x42, &msi_control); + msi_control &= 0xFFFE; /* Disable MSI */ + pci_write_config_word(sp->pdev, 0x42, msi_control); + + pci_disable_msix(sp->pdev); + } + } + else { + free_irq(sp->pdev->irq, dev); + if (sp->intr_type == MSI) + pci_disable_msi(sp->pdev); + } sp->device_close_flag = TRUE; /* Device is shut down. */ return 0; } @@ -3278,6 +3581,104 @@ s2io_alarm_handle(unsigned long data) mod_timer(&sp->alarm_timer, jiffies + HZ / 2); } +static irqreturn_t +s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = (struct net_device *) dev_id; + nic_t *sp = dev->priv; + int i; + int ret; + mac_info_t *mac_control; + struct config_param *config; + + atomic_inc(&sp->isr_cnt); + mac_control = &sp->mac_control; + config = &sp->config; + DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__); + + /* If Intr is because of Rx Traffic */ + for (i = 0; i < config->rx_ring_num; i++) + rx_intr_handler(&mac_control->rings[i]); + + /* If Intr is because of Tx Traffic */ + for (i = 0; i < config->tx_fifo_num; i++) + tx_intr_handler(&mac_control->fifos[i]); + + /* + * If the Rx buffer count is below the panic threshold then + * reallocate the buffers from the interrupt handler itself, + * else schedule a tasklet to reallocate the buffers. + */ + for (i = 0; i < config->rx_ring_num; i++) { + int rxb_size = atomic_read(&sp->rx_bufs_left[i]); + int level = rx_buffer_level(sp, rxb_size, i); + + if ((level == PANIC) && (!TASKLET_IN_USE)) { + DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name); + DBG_PRINT(INTR_DBG, "PANIC levels\n"); + if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) { + DBG_PRINT(ERR_DBG, "%s:Out of memory", + dev->name); + DBG_PRINT(ERR_DBG, " in ISR!!\n"); + clear_bit(0, (&sp->tasklet_status)); + atomic_dec(&sp->isr_cnt); + return IRQ_HANDLED; + } + clear_bit(0, (&sp->tasklet_status)); + } else if (level == LOW) { + tasklet_schedule(&sp->task); + } + } + + atomic_dec(&sp->isr_cnt); + return IRQ_HANDLED; +} + +static irqreturn_t +s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs) +{ + ring_info_t *ring = (ring_info_t *)dev_id; + nic_t *sp = ring->nic; + int rxb_size, level, rng_n; + + atomic_inc(&sp->isr_cnt); + rx_intr_handler(ring); + + rng_n = ring->ring_no; + rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]); + level = rx_buffer_level(sp, rxb_size, rng_n); + + if ((level == PANIC) && (!TASKLET_IN_USE)) { + int ret; + DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__); + DBG_PRINT(INTR_DBG, "PANIC levels\n"); + if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) { + DBG_PRINT(ERR_DBG, "Out of memory in %s", + __FUNCTION__); + clear_bit(0, (&sp->tasklet_status)); + return IRQ_HANDLED; + } + clear_bit(0, (&sp->tasklet_status)); + } else if (level == LOW) { + tasklet_schedule(&sp->task); + } + atomic_dec(&sp->isr_cnt); + + return IRQ_HANDLED; +} + +static irqreturn_t +s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs) +{ + fifo_info_t *fifo = (fifo_info_t *)dev_id; + nic_t *sp = fifo->nic; + + atomic_inc(&sp->isr_cnt); + tx_intr_handler(fifo); + atomic_dec(&sp->isr_cnt); + return IRQ_HANDLED; +} + static void s2io_txpic_intr_handle(nic_t *sp) { XENA_dev_config_t __iomem *bar0 = sp->bar0; @@ -3778,11 +4179,10 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev, { nic_t *sp = dev->priv; - strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name)); - strncpy(info->version, s2io_driver_version, - sizeof(s2io_driver_version)); - strncpy(info->fw_version, "", 32); - strncpy(info->bus_info, pci_name(sp->pdev), 32); + strncpy(info->driver, s2io_driver_name, sizeof(info->driver)); + strncpy(info->version, s2io_driver_version, sizeof(info->version)); + strncpy(info->fw_version, "", sizeof(info->fw_version)); + strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info)); info->regdump_len = XENA_REG_SPACE; info->eedump_len = XENA_EEPROM_SPACE; info->testinfo_len = S2IO_TEST_LEN; @@ -3978,29 +4378,53 @@ static int s2io_ethtool_setpause_data(struct net_device *dev, */ #define S2IO_DEV_ID 5 -static int read_eeprom(nic_t * sp, int off, u32 * data) +static int read_eeprom(nic_t * sp, int off, u64 * data) { int ret = -1; u32 exit_cnt = 0; u64 val64; XENA_dev_config_t __iomem *bar0 = sp->bar0; - val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) | - I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ | - I2C_CONTROL_CNTL_START; - SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF); + if (sp->device_type == XFRAME_I_DEVICE) { + val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) | + I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ | + I2C_CONTROL_CNTL_START; + SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF); - while (exit_cnt < 5) { - val64 = readq(&bar0->i2c_control); - if (I2C_CONTROL_CNTL_END(val64)) { - *data = I2C_CONTROL_GET_DATA(val64); - ret = 0; - break; + while (exit_cnt < 5) { + val64 = readq(&bar0->i2c_control); + if (I2C_CONTROL_CNTL_END(val64)) { + *data = I2C_CONTROL_GET_DATA(val64); + ret = 0; + break; + } + msleep(50); + exit_cnt++; } - msleep(50); - exit_cnt++; } + if (sp->device_type == XFRAME_II_DEVICE) { + val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 | + SPI_CONTROL_BYTECNT(0x3) | + SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off); + SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); + val64 |= SPI_CONTROL_REQ; + SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); + while (exit_cnt < 5) { + val64 = readq(&bar0->spi_control); + if (val64 & SPI_CONTROL_NACK) { + ret = 1; + break; + } else if (val64 & SPI_CONTROL_DONE) { + *data = readq(&bar0->spi_data); + *data &= 0xffffff; + ret = 0; + break; + } + msleep(50); + exit_cnt++; + } + } return ret; } @@ -4019,28 +4443,53 @@ static int read_eeprom(nic_t * sp, int off, u32 * data) * 0 on success, -1 on failure. */ -static int write_eeprom(nic_t * sp, int off, u32 data, int cnt) +static int write_eeprom(nic_t * sp, int off, u64 data, int cnt) { int exit_cnt = 0, ret = -1; u64 val64; XENA_dev_config_t __iomem *bar0 = sp->bar0; - val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) | - I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) | - I2C_CONTROL_CNTL_START; - SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF); + if (sp->device_type == XFRAME_I_DEVICE) { + val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) | + I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) | + I2C_CONTROL_CNTL_START; + SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF); - while (exit_cnt < 5) { - val64 = readq(&bar0->i2c_control); - if (I2C_CONTROL_CNTL_END(val64)) { - if (!(val64 & I2C_CONTROL_NACK)) - ret = 0; - break; + while (exit_cnt < 5) { + val64 = readq(&bar0->i2c_control); + if (I2C_CONTROL_CNTL_END(val64)) { + if (!(val64 & I2C_CONTROL_NACK)) + ret = 0; + break; + } + msleep(50); + exit_cnt++; } - msleep(50); - exit_cnt++; } + if (sp->device_type == XFRAME_II_DEVICE) { + int write_cnt = (cnt == 8) ? 0 : cnt; + writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data); + + val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 | + SPI_CONTROL_BYTECNT(write_cnt) | + SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off); + SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); + val64 |= SPI_CONTROL_REQ; + SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF); + while (exit_cnt < 5) { + val64 = readq(&bar0->spi_control); + if (val64 & SPI_CONTROL_NACK) { + ret = 1; + break; + } else if (val64 & SPI_CONTROL_DONE) { + ret = 0; + break; + } + msleep(50); + exit_cnt++; + } + } return ret; } @@ -4060,7 +4509,8 @@ static int write_eeprom(nic_t * sp, int off, u32 data, int cnt) static int s2io_ethtool_geeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 * data_buf) { - u32 data, i, valid; + u32 i, valid; + u64 data; nic_t *sp = dev->priv; eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16); @@ -4098,7 +4548,7 @@ static int s2io_ethtool_seeprom(struct net_device *dev, u8 * data_buf) { int len = eeprom->len, cnt = 0; - u32 valid = 0, data; + u64 valid = 0, data; nic_t *sp = dev->priv; if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) { @@ -4146,7 +4596,7 @@ static int s2io_ethtool_seeprom(struct net_device *dev, static int s2io_register_test(nic_t * sp, uint64_t * data) { XENA_dev_config_t __iomem *bar0 = sp->bar0; - u64 val64 = 0; + u64 val64 = 0, exp_val; int fail = 0; val64 = readq(&bar0->pif_rd_swapper_fb); @@ -4162,7 +4612,11 @@ static int s2io_register_test(nic_t * sp, uint64_t * data) } val64 = readq(&bar0->rx_queue_cfg); - if (val64 != 0x0808080808080808ULL) { + if (sp->device_type == XFRAME_II_DEVICE) + exp_val = 0x0404040404040404ULL; + else + exp_val = 0x0808080808080808ULL; + if (val64 != exp_val) { fail = 1; DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n"); } @@ -4190,7 +4644,7 @@ static int s2io_register_test(nic_t * sp, uint64_t * data) } *data = fail; - return 0; + return fail; } /** @@ -4209,58 +4663,83 @@ static int s2io_register_test(nic_t * sp, uint64_t * data) static int s2io_eeprom_test(nic_t * sp, uint64_t * data) { int fail = 0; - u32 ret_data; + u64 ret_data, org_4F0, org_7F0; + u8 saved_4F0 = 0, saved_7F0 = 0; + struct net_device *dev = sp->dev; /* Test Write Error at offset 0 */ - if (!write_eeprom(sp, 0, 0, 3)) - fail = 1; + /* Note that SPI interface allows write access to all areas + * of EEPROM. Hence doing all negative testing only for Xframe I. + */ + if (sp->device_type == XFRAME_I_DEVICE) + if (!write_eeprom(sp, 0, 0, 3)) + fail = 1; + + /* Save current values at offsets 0x4F0 and 0x7F0 */ + if (!read_eeprom(sp, 0x4F0, &org_4F0)) + saved_4F0 = 1; + if (!read_eeprom(sp, 0x7F0, &org_7F0)) + saved_7F0 = 1; /* Test Write at offset 4f0 */ - if (write_eeprom(sp, 0x4F0, 0x01234567, 3)) + if (write_eeprom(sp, 0x4F0, 0x012345, 3)) fail = 1; if (read_eeprom(sp, 0x4F0, &ret_data)) fail = 1; - if (ret_data != 0x01234567) + if (ret_data != 0x012345) { + DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. Data written %llx Data read %llx\n", dev->name, (u64)0x12345, ret_data); fail = 1; + } /* Reset the EEPROM data go FFFF */ - write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3); + write_eeprom(sp, 0x4F0, 0xFFFFFF, 3); /* Test Write Request Error at offset 0x7c */ - if (!write_eeprom(sp, 0x07C, 0, 3)) + if (sp->device_type == XFRAME_I_DEVICE) + if (!write_eeprom(sp, 0x07C, 0, 3)) + fail = 1; + + /* Test Write Request at offset 0x7f0 */ + if (write_eeprom(sp, 0x7F0, 0x012345, 3)) + fail = 1; + if (read_eeprom(sp, 0x7F0, &ret_data)) fail = 1; - /* Test Write Request at offset 0x7fc */ - if (write_eeprom(sp, 0x7FC, 0x01234567, 3)) - fail = 1; - if (read_eeprom(sp, 0x7FC, &ret_data)) - fail = 1; - - if (ret_data != 0x01234567) + if (ret_data != 0x012345) { + DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. Data written %llx Data read %llx\n", dev->name, (u64)0x12345, ret_data); fail = 1; + } /* Reset the EEPROM data go FFFF */ - write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3); + write_eeprom(sp, 0x7F0, 0xFFFFFF, 3); - /* Test Write Error at offset 0x80 */ - if (!write_eeprom(sp, 0x080, 0, 3)) - fail = 1; + if (sp->device_type == XFRAME_I_DEVICE) { + /* Test Write Error at offset 0x80 */ + if (!write_eeprom(sp, 0x080, 0, 3)) + fail = 1; - /* Test Write Error at offset 0xfc */ - if (!write_eeprom(sp, 0x0FC, 0, 3)) - fail = 1; + /* Test Write Error at offset 0xfc */ + if (!write_eeprom(sp, 0x0FC, 0, 3)) + fail = 1; - /* Test Write Error at offset 0x100 */ - if (!write_eeprom(sp, 0x100, 0, 3)) - fail = 1; + /* Test Write Error at offset 0x100 */ + if (!write_eeprom(sp, 0x100, 0, 3)) + fail = 1; - /* Test Write Error at offset 4ec */ - if (!write_eeprom(sp, 0x4EC, 0, 3)) - fail = 1; + /* Test Write Error at offset 4ec */ + if (!write_eeprom(sp, 0x4EC, 0, 3)) + fail = 1; + } + + /* Restore values at offsets 0x4F0 and 0x7F0 */ + if (saved_4F0) + write_eeprom(sp, 0x4F0, org_4F0, 3); + if (saved_7F0) + write_eeprom(sp, 0x7F0, org_7F0, 3); *data = fail; - return 0; + return fail; } /** @@ -4342,7 +4821,7 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data) { XENA_dev_config_t __iomem *bar0 = sp->bar0; u64 val64; - int cnt, iteration = 0, test_pass = 0; + int cnt, iteration = 0, test_fail = 0; val64 = readq(&bar0->adapter_control); val64 &= ~ADAPTER_ECC_EN; @@ -4350,7 +4829,7 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data) val64 = readq(&bar0->mc_rldram_test_ctrl); val64 |= MC_RLDRAM_TEST_MODE; - writeq(val64, &bar0->mc_rldram_test_ctrl); + SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF); val64 = readq(&bar0->mc_rldram_mrs); val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE; @@ -4378,17 +4857,12 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data) } writeq(val64, &bar0->mc_rldram_test_d2); - val64 = (u64) (0x0000003fffff0000ULL); + val64 = (u64) (0x0000003ffffe0100ULL); writeq(val64, &bar0->mc_rldram_test_add); - - val64 = MC_RLDRAM_TEST_MODE; - writeq(val64, &bar0->mc_rldram_test_ctrl); - - val64 |= - MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE | - MC_RLDRAM_TEST_GO; - writeq(val64, &bar0->mc_rldram_test_ctrl); + val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE | + MC_RLDRAM_TEST_GO; + SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF); for (cnt = 0; cnt < 5; cnt++) { val64 = readq(&bar0->mc_rldram_test_ctrl); @@ -4400,11 +4874,8 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data) if (cnt == 5) break; - val64 = MC_RLDRAM_TEST_MODE; - writeq(val64, &bar0->mc_rldram_test_ctrl); - - val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO; - writeq(val64, &bar0->mc_rldram_test_ctrl); + val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO; + SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF); for (cnt = 0; cnt < 5; cnt++) { val64 = readq(&bar0->mc_rldram_test_ctrl); @@ -4417,18 +4888,18 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data) break; val64 = readq(&bar0->mc_rldram_test_ctrl); - if (val64 & MC_RLDRAM_TEST_PASS) - test_pass = 1; + if (!(val64 & MC_RLDRAM_TEST_PASS)) + test_fail = 1; iteration++; } - if (!test_pass) - *data = 1; - else - *data = 0; + *data = test_fail; - return 0; + /* Bring the adapter out of test mode */ + SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF); + + return test_fail; } /** @@ -4932,7 +5403,7 @@ static void s2io_card_down(nic_t * sp) static int s2io_card_up(nic_t * sp) { - int i, ret; + int i, ret = 0; mac_info_t *mac_control; struct config_param *config; struct net_device *dev = (struct net_device *) sp->dev; @@ -4944,6 +5415,15 @@ static int s2io_card_up(nic_t * sp) return -ENODEV; } + if (sp->intr_type == MSI) + ret = s2io_enable_msi(sp); + else if (sp->intr_type == MSI_X) + ret = s2io_enable_msi_x(sp); + if (ret) { + DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name); + sp->intr_type = INTA; + } + /* * Initializing the Rx buffers. For now we are considering only 1 * Rx ring and initializing buffers into 30 Rx blocks @@ -5228,6 +5708,8 @@ static void s2io_init_pci(nic_t * sp) MODULE_AUTHOR("Raghavendra Koushik "); MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + module_param(tx_fifo_num, int, 0); module_param(rx_ring_num, int, 0); module_param_array(tx_fifo_len, uint, NULL, 0); @@ -5245,6 +5727,7 @@ module_param(bimodal, bool, 0); module_param(indicate_max_pkts, int, 0); #endif module_param(rxsync_frequency, int, 0); +module_param(intr_type, int, 0); /** * s2io_init_nic - Initialization of the adapter . @@ -5274,9 +5757,16 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) mac_info_t *mac_control; struct config_param *config; int mode; + u8 dev_intr_type = intr_type; #ifdef CONFIG_S2IO_NAPI - DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n"); + if (dev_intr_type != INTA) { + DBG_PRINT(ERR_DBG, "NAPI cannot be enabled when MSI/MSI-X \ +is enabled. Defaulting to INTA\n"); + dev_intr_type = INTA; + } + else + DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n"); #endif if ((ret = pci_enable_device(pdev))) { @@ -5303,10 +5793,35 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) return -ENOMEM; } - if (pci_request_regions(pdev, s2io_driver_name)) { - DBG_PRINT(ERR_DBG, "Request Regions failed\n"), - pci_disable_device(pdev); - return -ENODEV; + if ((dev_intr_type == MSI_X) && + ((pdev->device != PCI_DEVICE_ID_HERC_WIN) && + (pdev->device != PCI_DEVICE_ID_HERC_UNI))) { + DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. \ +Defaulting to INTA\n"); + dev_intr_type = INTA; + } + if (dev_intr_type != MSI_X) { + if (pci_request_regions(pdev, s2io_driver_name)) { + DBG_PRINT(ERR_DBG, "Request Regions failed\n"), + pci_disable_device(pdev); + return -ENODEV; + } + } + else { + if (!(request_mem_region(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0), s2io_driver_name))) { + DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n"); + pci_disable_device(pdev); + return -ENODEV; + } + if (!(request_mem_region(pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2), s2io_driver_name))) { + DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n"); + release_mem_region(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + pci_disable_device(pdev); + return -ENODEV; + } } dev = alloc_etherdev(sizeof(nic_t)); @@ -5329,6 +5844,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) sp->pdev = pdev; sp->high_dma_flag = dma_flag; sp->device_enabled_once = FALSE; + sp->intr_type = dev_intr_type; if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) || (pdev->device == PCI_DEVICE_ID_HERC_UNI)) @@ -5336,6 +5852,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) else sp->device_type = XFRAME_I_DEVICE; + /* Initialize some PCI/PCI-X fields of the NIC. */ s2io_init_pci(sp); @@ -5571,12 +6088,23 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) if (sp->device_type & XFRAME_II_DEVICE) { DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ", dev->name); - DBG_PRINT(ERR_DBG, "(rev %d), %s", + DBG_PRINT(ERR_DBG, "(rev %d), Version %s", get_xena_rev_id(sp->pdev), s2io_driver_version); #ifdef CONFIG_2BUFF_MODE DBG_PRINT(ERR_DBG, ", Buffer mode %d",2); #endif + switch(sp->intr_type) { + case INTA: + DBG_PRINT(ERR_DBG, ", Intr type INTA"); + break; + case MSI: + DBG_PRINT(ERR_DBG, ", Intr type MSI"); + break; + case MSI_X: + DBG_PRINT(ERR_DBG, ", Intr type MSI-X"); + break; + } DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n"); DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n", @@ -5595,12 +6123,23 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) } else { DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ", dev->name); - DBG_PRINT(ERR_DBG, "(rev %d), %s", + DBG_PRINT(ERR_DBG, "(rev %d), Version %s", get_xena_rev_id(sp->pdev), s2io_driver_version); #ifdef CONFIG_2BUFF_MODE DBG_PRINT(ERR_DBG, ", Buffer mode %d",2); #endif + switch(sp->intr_type) { + case INTA: + DBG_PRINT(ERR_DBG, ", Intr type INTA"); + break; + case MSI: + DBG_PRINT(ERR_DBG, ", Intr type MSI"); + break; + case MSI_X: + DBG_PRINT(ERR_DBG, ", Intr type MSI-X"); + break; + } DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n"); DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n", sp->def_mac_addr[0].mac_addr[0], @@ -5644,7 +6183,14 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) mem_alloc_failed: free_shared_mem(sp); pci_disable_device(pdev); - pci_release_regions(pdev); + if (dev_intr_type != MSI_X) + pci_release_regions(pdev); + else { + release_mem_region(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + release_mem_region(pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); + } pci_set_drvdata(pdev, NULL); free_netdev(dev); @@ -5678,7 +6224,14 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev) iounmap(sp->bar0); iounmap(sp->bar1); pci_disable_device(pdev); - pci_release_regions(pdev); + if (sp->intr_type != MSI_X) + pci_release_regions(pdev); + else { + release_mem_region(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + release_mem_region(pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); + } pci_set_drvdata(pdev, NULL); free_netdev(dev); } diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 89151cb52181..1cc24b56760e 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h @@ -652,6 +652,30 @@ typedef struct { #define SMALL_BLK_CNT 30 #define LARGE_BLK_CNT 100 +/* + * Structure to keep track of the MSI-X vectors and the corresponding + * argument registered against each vector + */ +#define MAX_REQUESTED_MSI_X 17 +struct s2io_msix_entry +{ + u16 vector; + u16 entry; + void *arg; + + u8 type; +#define MSIX_FIFO_TYPE 1 +#define MSIX_RING_TYPE 2 + + u8 in_use; +#define MSIX_REGISTERED_SUCCESS 0xAA +}; + +struct msix_info_st { + u64 addr; + u64 data; +}; + /* Structure representing one instance of the NIC */ struct s2io_nic { #ifdef CONFIG_S2IO_NAPI @@ -719,13 +743,8 @@ struct s2io_nic { * a schedule task that will set the correct Link state once the * NIC's PHY has stabilized after a state change. */ -#ifdef INIT_TQUEUE - struct tq_struct rst_timer_task; - struct tq_struct set_link_task; -#else struct work_struct rst_timer_task; struct work_struct set_link_task; -#endif /* Flag that can be used to turn on or turn off the Rx checksum * offload feature. @@ -748,10 +767,23 @@ struct s2io_nic { atomic_t card_state; volatile unsigned long link_state; struct vlan_group *vlgrp; +#define MSIX_FLG 0xA5 + struct msix_entry *entries; + struct s2io_msix_entry *s2io_entries; + char desc1[35]; + char desc2[35]; + + struct msix_info_st msix_info[0x3f]; + #define XFRAME_I_DEVICE 1 #define XFRAME_II_DEVICE 2 u8 device_type; +#define INTA 0 +#define MSI 1 +#define MSI_X 2 + u8 intr_type; + spinlock_t rx_lock; atomic_t isr_cnt; }; @@ -886,6 +918,13 @@ static int s2io_poll(struct net_device *dev, int *budget); static void s2io_init_pci(nic_t * sp); int s2io_set_mac_addr(struct net_device *dev, u8 * addr); static void s2io_alarm_handle(unsigned long data); +static int s2io_enable_msi(nic_t *nic); +static irqreturn_t s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs); +static irqreturn_t +s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs); +static irqreturn_t +s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs); +int s2io_enable_msi_x(nic_t *nic); static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs); static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); static struct ethtool_ops netdev_ethtool_ops; @@ -894,4 +933,5 @@ int s2io_set_swapper(nic_t * sp); static void s2io_card_down(nic_t *nic); static int s2io_card_up(nic_t *nic); int get_xena_rev_id(struct pci_dev *pdev); +void restore_xmsi_data(nic_t *nic); #endif /* _S2IO_H */ diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index 7abd55a4fb21..aa4ca1821759 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c @@ -10,7 +10,7 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. @@ -118,8 +118,6 @@ MODULE_PARM_DESC(int_timeout, "Timeout value"); ********************************************************************* */ -typedef unsigned long sbmac_port_t; - typedef enum { sbmac_speed_auto, sbmac_speed_10, sbmac_speed_100, sbmac_speed_1000 } sbmac_speed_t; @@ -129,7 +127,7 @@ typedef enum { sbmac_duplex_auto, sbmac_duplex_half, typedef enum { sbmac_fc_auto, sbmac_fc_disabled, sbmac_fc_frame, sbmac_fc_collision, sbmac_fc_carrier } sbmac_fc_t; -typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on, +typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on, sbmac_state_broken } sbmac_state_t; @@ -144,17 +142,13 @@ typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on, #define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES) -#define SBMAC_READCSR(t) __raw_readq((unsigned long)t) -#define SBMAC_WRITECSR(t,v) __raw_writeq(v, (unsigned long)t) - - #define SBMAC_MAX_TXDESCR 32 #define SBMAC_MAX_RXDESCR 32 #define ETHER_ALIGN 2 #define ETHER_ADDR_LEN 6 -#define ENET_PACKET_SIZE 1518 -/*#define ENET_PACKET_SIZE 9216 */ +#define ENET_PACKET_SIZE 1518 +/*#define ENET_PACKET_SIZE 9216 */ /********************************************************************** * DMA Descriptor structure @@ -172,12 +166,12 @@ typedef unsigned long paddr_t; ********************************************************************* */ typedef struct sbmacdma_s { - - /* + + /* * This stuff is used to identify the channel and the registers * associated with it. */ - + struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */ int sbdma_channel; /* channel number */ int sbdma_txdir; /* direction (1=transmit) */ @@ -187,21 +181,21 @@ typedef struct sbmacdma_s { int sbdma_int_timeout; /* # usec rx/tx interrupt */ #endif - sbmac_port_t sbdma_config0; /* DMA config register 0 */ - sbmac_port_t sbdma_config1; /* DMA config register 1 */ - sbmac_port_t sbdma_dscrbase; /* Descriptor base address */ - sbmac_port_t sbdma_dscrcnt; /* Descriptor count register */ - sbmac_port_t sbdma_curdscr; /* current descriptor address */ - + volatile void __iomem *sbdma_config0; /* DMA config register 0 */ + volatile void __iomem *sbdma_config1; /* DMA config register 1 */ + volatile void __iomem *sbdma_dscrbase; /* Descriptor base address */ + volatile void __iomem *sbdma_dscrcnt; /* Descriptor count register */ + volatile void __iomem *sbdma_curdscr; /* current descriptor address */ + /* * This stuff is for maintenance of the ring */ - + sbdmadscr_t *sbdma_dscrtable; /* base of descriptor table */ sbdmadscr_t *sbdma_dscrtable_end; /* end of descriptor table */ - + struct sk_buff **sbdma_ctxtable; /* context table, one per descr */ - + paddr_t sbdma_dscrtable_phys; /* and also the phys addr */ sbdmadscr_t *sbdma_addptr; /* next dscr for sw to add */ sbdmadscr_t *sbdma_remptr; /* next dscr for sw to remove */ @@ -213,15 +207,15 @@ typedef struct sbmacdma_s { ********************************************************************* */ struct sbmac_softc { - + /* * Linux-specific things */ - + struct net_device *sbm_dev; /* pointer to linux device */ spinlock_t sbm_lock; /* spin lock */ struct timer_list sbm_timer; /* for monitoring MII */ - struct net_device_stats sbm_stats; + struct net_device_stats sbm_stats; int sbm_devflags; /* current device flags */ int sbm_phy_oldbmsr; @@ -229,31 +223,31 @@ struct sbmac_softc { int sbm_phy_oldk1stsr; int sbm_phy_oldlinkstat; int sbm_buffersize; - + unsigned char sbm_phys[2]; - + /* * Controller-specific things */ - - unsigned long sbm_base; /* MAC's base address */ + + volatile void __iomem *sbm_base; /* MAC's base address */ sbmac_state_t sbm_state; /* current state */ - - sbmac_port_t sbm_macenable; /* MAC Enable Register */ - sbmac_port_t sbm_maccfg; /* MAC Configuration Register */ - sbmac_port_t sbm_fifocfg; /* FIFO configuration register */ - sbmac_port_t sbm_framecfg; /* Frame configuration register */ - sbmac_port_t sbm_rxfilter; /* receive filter register */ - sbmac_port_t sbm_isr; /* Interrupt status register */ - sbmac_port_t sbm_imr; /* Interrupt mask register */ - sbmac_port_t sbm_mdio; /* MDIO register */ - + + volatile void __iomem *sbm_macenable; /* MAC Enable Register */ + volatile void __iomem *sbm_maccfg; /* MAC Configuration Register */ + volatile void __iomem *sbm_fifocfg; /* FIFO configuration register */ + volatile void __iomem *sbm_framecfg; /* Frame configuration register */ + volatile void __iomem *sbm_rxfilter; /* receive filter register */ + volatile void __iomem *sbm_isr; /* Interrupt status register */ + volatile void __iomem *sbm_imr; /* Interrupt mask register */ + volatile void __iomem *sbm_mdio; /* MDIO register */ + sbmac_speed_t sbm_speed; /* current speed */ sbmac_duplex_t sbm_duplex; /* current duplex */ sbmac_fc_t sbm_fc; /* current flow control setting */ - + unsigned char sbm_hwaddr[ETHER_ADDR_LEN]; - + sbmacdma_t sbm_txdma; /* for now, only use channel 0 */ sbmacdma_t sbm_rxdma; int rx_hw_checksum; @@ -302,6 +296,7 @@ static void sbmac_set_rx_mode(struct net_device *dev); static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int sbmac_close(struct net_device *dev); static int sbmac_mii_poll(struct sbmac_softc *s,int noisy); +static int sbmac_mii_probe(struct net_device *dev); static void sbmac_mii_sync(struct sbmac_softc *s); static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitcnt); @@ -439,6 +434,9 @@ static uint64_t sbmac_orig_hwaddr[MAX_UNITS]; #define MII_BMCR 0x00 /* Basic mode control register (rw) */ #define MII_BMSR 0x01 /* Basic mode status register (ro) */ +#define MII_PHYIDR1 0x02 +#define MII_PHYIDR2 0x03 + #define MII_K1STSR 0x0A /* 1K Status Register (ro) */ #define MII_ANLPAR 0x05 /* Autonegotiation lnk partner abilities (rw) */ @@ -450,13 +448,13 @@ static uint64_t sbmac_orig_hwaddr[MAX_UNITS]; /********************************************************************** * SBMAC_MII_SYNC(s) - * + * * Synchronize with the MII - send a pattern of bits to the MII * that will guarantee that it is ready to accept a command. - * - * Input parameters: + * + * Input parameters: * s - sbmac structure - * + * * Return value: * nothing ********************************************************************* */ @@ -467,25 +465,25 @@ static void sbmac_mii_sync(struct sbmac_softc *s) uint64_t bits; int mac_mdio_genc; - mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC; - + mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC; + bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT; - - SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc); - + + __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio); + for (cnt = 0; cnt < 32; cnt++) { - SBMAC_WRITECSR(s->sbm_mdio,bits | M_MAC_MDC | mac_mdio_genc); - SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc); + __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio); } } /********************************************************************** * SBMAC_MII_SENDDATA(s,data,bitcnt) - * + * * Send some bits to the MII. The bits to be sent are right- * justified in the 'data' parameter. - * - * Input parameters: + * + * Input parameters: * s - sbmac structure * data - data to send * bitcnt - number of bits to send @@ -498,20 +496,20 @@ static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitc unsigned int curmask; int mac_mdio_genc; - mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC; - + mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC; + bits = M_MAC_MDIO_DIR_OUTPUT; - SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc); - + __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio); + curmask = 1 << (bitcnt - 1); - + for (i = 0; i < bitcnt; i++) { if (data & curmask) bits |= M_MAC_MDIO_OUT; else bits &= ~M_MAC_MDIO_OUT; - SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc); - SBMAC_WRITECSR(s->sbm_mdio,bits | M_MAC_MDC | mac_mdio_genc); - SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc); + __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio); curmask >>= 1; } } @@ -520,14 +518,14 @@ static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitc /********************************************************************** * SBMAC_MII_READ(s,phyaddr,regidx) - * + * * Read a PHY register. - * - * Input parameters: + * + * Input parameters: * s - sbmac structure * phyaddr - PHY's address * regidx = index of register to read - * + * * Return value: * value read, or 0 if an error occurred. ********************************************************************* */ @@ -543,9 +541,9 @@ static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx) * Synchronize ourselves so that the PHY knows the next * thing coming down is a command */ - + sbmac_mii_sync(s); - + /* * Send the data to the PHY. The sequence is * a "start" command (2 bits) @@ -553,59 +551,55 @@ static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx) * the PHY addr (5 bits) * the register index (5 bits) */ - + sbmac_mii_senddata(s,MII_COMMAND_START, 2); sbmac_mii_senddata(s,MII_COMMAND_READ, 2); sbmac_mii_senddata(s,phyaddr, 5); sbmac_mii_senddata(s,regidx, 5); - - mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC; - - /* + + mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC; + + /* * Switch the port around without a clock transition. */ - SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_INPUT | mac_mdio_genc); - + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio); + /* * Send out a clock pulse to signal we want the status */ - - SBMAC_WRITECSR(s->sbm_mdio, - M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc); - SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_INPUT | mac_mdio_genc); - - /* + + __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio); + + /* * If an error occurred, the PHY will signal '1' back */ - error = SBMAC_READCSR(s->sbm_mdio) & M_MAC_MDIO_IN; - - /* + error = __raw_readq(s->sbm_mdio) & M_MAC_MDIO_IN; + + /* * Issue an 'idle' clock pulse, but keep the direction * the same. */ - SBMAC_WRITECSR(s->sbm_mdio, - M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc); - SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_INPUT | mac_mdio_genc); - + __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio); + regval = 0; - + for (idx = 0; idx < 16; idx++) { regval <<= 1; - + if (error == 0) { - if (SBMAC_READCSR(s->sbm_mdio) & M_MAC_MDIO_IN) + if (__raw_readq(s->sbm_mdio) & M_MAC_MDIO_IN) regval |= 1; } - - SBMAC_WRITECSR(s->sbm_mdio, - M_MAC_MDIO_DIR_INPUT|M_MAC_MDC | mac_mdio_genc); - SBMAC_WRITECSR(s->sbm_mdio, - M_MAC_MDIO_DIR_INPUT | mac_mdio_genc); + + __raw_writeq(M_MAC_MDIO_DIR_INPUT|M_MAC_MDC | mac_mdio_genc, s->sbm_mdio); + __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio); } - + /* Switch back to output */ - SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc); - + __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, s->sbm_mdio); + if (error == 0) return regval; return 0; @@ -614,15 +608,15 @@ static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx) /********************************************************************** * SBMAC_MII_WRITE(s,phyaddr,regidx,regval) - * + * * Write a value to a PHY register. - * - * Input parameters: + * + * Input parameters: * s - sbmac structure * phyaddr - PHY to use * regidx - register within the PHY * regval - data to write to register - * + * * Return value: * nothing ********************************************************************* */ @@ -633,7 +627,7 @@ static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx, int mac_mdio_genc; sbmac_mii_sync(s); - + sbmac_mii_senddata(s,MII_COMMAND_START,2); sbmac_mii_senddata(s,MII_COMMAND_WRITE,2); sbmac_mii_senddata(s,phyaddr, 5); @@ -641,27 +635,27 @@ static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx, sbmac_mii_senddata(s,MII_COMMAND_ACK,2); sbmac_mii_senddata(s,regval,16); - mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC; + mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC; - SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc); + __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, s->sbm_mdio); } /********************************************************************** * SBDMA_INITCTX(d,s,chan,txrx,maxdescr) - * + * * Initialize a DMA channel context. Since there are potentially * eight DMA channels per MAC, it's nice to do this in a standard - * way. - * - * Input parameters: + * way. + * + * Input parameters: * d - sbmacdma_t structure (DMA channel context) * s - sbmac_softc structure (pointer to a MAC) * chan - channel number (0..1 right now) * txrx - Identifies DMA_TX or DMA_RX for channel direction * maxdescr - number of descriptors - * + * * Return value: * nothing ********************************************************************* */ @@ -672,101 +666,87 @@ static void sbdma_initctx(sbmacdma_t *d, int txrx, int maxdescr) { - /* - * Save away interesting stuff in the structure + /* + * Save away interesting stuff in the structure */ - + d->sbdma_eth = s; d->sbdma_channel = chan; d->sbdma_txdir = txrx; - + #if 0 /* RMON clearing */ s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING; #endif - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BYTES)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_COLLISIONS)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_LATE_COL)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_EX_COL)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_FCS_ERROR)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_ABORT)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BAD)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_GOOD)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_RUNT)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_OVERSIZE)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BYTES)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_MCAST)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BCAST)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BAD)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_GOOD)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_RUNT)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_OVERSIZE)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_FCS_ERROR)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_LENGTH_ERROR)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_CODE_ERROR)), 0); - SBMAC_WRITECSR(IOADDR( - A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_ALIGN_ERROR)), 0); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BYTES))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_COLLISIONS))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_LATE_COL))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_EX_COL))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_FCS_ERROR))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_ABORT))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BAD))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_GOOD))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_RUNT))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_OVERSIZE))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BYTES))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_MCAST))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BCAST))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BAD))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_GOOD))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_RUNT))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_OVERSIZE))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_FCS_ERROR))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_LENGTH_ERROR))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_CODE_ERROR))); + __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_ALIGN_ERROR))); - /* - * initialize register pointers + /* + * initialize register pointers */ - - d->sbdma_config0 = + + d->sbdma_config0 = s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0); - d->sbdma_config1 = + d->sbdma_config1 = s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1); - d->sbdma_dscrbase = + d->sbdma_dscrbase = s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE); - d->sbdma_dscrcnt = + d->sbdma_dscrcnt = s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT); - d->sbdma_curdscr = + d->sbdma_curdscr = s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR); - + /* * Allocate memory for the ring */ - + d->sbdma_maxdescr = maxdescr; - - d->sbdma_dscrtable = (sbdmadscr_t *) - kmalloc(d->sbdma_maxdescr*sizeof(sbdmadscr_t), GFP_KERNEL); - + + d->sbdma_dscrtable = (sbdmadscr_t *) + kmalloc((d->sbdma_maxdescr+1)*sizeof(sbdmadscr_t), GFP_KERNEL); + + /* + * The descriptor table must be aligned to at least 16 bytes or the + * MAC will corrupt it. + */ + d->sbdma_dscrtable = (sbdmadscr_t *) + ALIGN((unsigned long)d->sbdma_dscrtable, sizeof(sbdmadscr_t)); + memset(d->sbdma_dscrtable,0,d->sbdma_maxdescr*sizeof(sbdmadscr_t)); - + d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr; - + d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable); - + /* * And context table */ - - d->sbdma_ctxtable = (struct sk_buff **) + + d->sbdma_ctxtable = (struct sk_buff **) kmalloc(d->sbdma_maxdescr*sizeof(struct sk_buff *), GFP_KERNEL); - + memset(d->sbdma_ctxtable,0,d->sbdma_maxdescr*sizeof(struct sk_buff *)); - + #ifdef CONFIG_SBMAC_COALESCE /* * Setup Rx/Tx DMA coalescing defaults @@ -777,7 +757,7 @@ static void sbdma_initctx(sbmacdma_t *d, } else { d->sbdma_int_pktcnt = 1; } - + if ( int_timeout ) { d->sbdma_int_timeout = int_timeout; } else { @@ -789,13 +769,13 @@ static void sbdma_initctx(sbmacdma_t *d, /********************************************************************** * SBDMA_CHANNEL_START(d) - * + * * Initialize the hardware registers for a DMA channel. - * - * Input parameters: + * + * Input parameters: * d - DMA channel to init (context must be previously init'd * rxtx - DMA_RX or DMA_TX depending on what type of channel - * + * * Return value: * nothing ********************************************************************* */ @@ -805,24 +785,21 @@ static void sbdma_channel_start(sbmacdma_t *d, int rxtx ) /* * Turn on the DMA channel */ - + #ifdef CONFIG_SBMAC_COALESCE - SBMAC_WRITECSR(d->sbdma_config1, - V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) | - 0); - SBMAC_WRITECSR(d->sbdma_config0, - M_DMA_EOP_INT_EN | + __raw_writeq(V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) | + 0, d->sbdma_config1); + __raw_writeq(M_DMA_EOP_INT_EN | V_DMA_RINGSZ(d->sbdma_maxdescr) | V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) | - 0); + 0, d->sbdma_config0); #else - SBMAC_WRITECSR(d->sbdma_config1,0); - SBMAC_WRITECSR(d->sbdma_config0, - V_DMA_RINGSZ(d->sbdma_maxdescr) | - 0); + __raw_writeq(0, d->sbdma_config1); + __raw_writeq(V_DMA_RINGSZ(d->sbdma_maxdescr) | + 0, d->sbdma_config0); #endif - SBMAC_WRITECSR(d->sbdma_dscrbase,d->sbdma_dscrtable_phys); + __raw_writeq(d->sbdma_dscrtable_phys, d->sbdma_dscrbase); /* * Initialize ring pointers @@ -834,12 +811,12 @@ static void sbdma_channel_start(sbmacdma_t *d, int rxtx ) /********************************************************************** * SBDMA_CHANNEL_STOP(d) - * + * * Initialize the hardware registers for a DMA channel. - * - * Input parameters: + * + * Input parameters: * d - DMA channel to init (context must be previously init'd - * + * * Return value: * nothing ********************************************************************* */ @@ -849,44 +826,44 @@ static void sbdma_channel_stop(sbmacdma_t *d) /* * Turn off the DMA channel */ - - SBMAC_WRITECSR(d->sbdma_config1,0); - - SBMAC_WRITECSR(d->sbdma_dscrbase,0); - - SBMAC_WRITECSR(d->sbdma_config0,0); - + + __raw_writeq(0, d->sbdma_config1); + + __raw_writeq(0, d->sbdma_dscrbase); + + __raw_writeq(0, d->sbdma_config0); + /* * Zero ring pointers */ - - d->sbdma_addptr = 0; - d->sbdma_remptr = 0; + + d->sbdma_addptr = NULL; + d->sbdma_remptr = NULL; } static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) { unsigned long addr; unsigned long newaddr; - + addr = (unsigned long) skb->data; - + newaddr = (addr + power2 - 1) & ~(power2 - 1); - + skb_reserve(skb,newaddr-addr+offset); } /********************************************************************** * SBDMA_ADD_RCVBUFFER(d,sb) - * + * * Add a buffer to the specified DMA channel. For receive channels, * this queues a buffer for inbound packets. - * - * Input parameters: + * + * Input parameters: * d - DMA channel descriptor * sb - sk_buff to add, or NULL if we should allocate one - * + * * Return value: * 0 if buffer could not be added (ring is full) * 1 if buffer added successfully @@ -899,24 +876,24 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb) sbdmadscr_t *nextdsc; struct sk_buff *sb_new = NULL; int pktsize = ENET_PACKET_SIZE; - + /* get pointer to our current place in the ring */ - + dsc = d->sbdma_addptr; nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr); - + /* * figure out if the ring is full - if the next descriptor * is the same as the one that we're going to remove from * the ring, the ring is full */ - + if (nextdsc == d->sbdma_remptr) { return -ENOSPC; } - /* - * Allocate a sk_buff if we don't already have one. + /* + * Allocate a sk_buff if we don't already have one. * If we do have an sk_buff, reset it so that it's empty. * * Note: sk_buffs don't seem to be guaranteed to have any sort @@ -925,7 +902,7 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb) * * 1. the data does not start in the middle of a cache line. * 2. The data does not end in the middle of a cache line - * 3. The buffer can be aligned such that the IP addresses are + * 3. The buffer can be aligned such that the IP addresses are * naturally aligned. * * Remember, the SOCs MAC writes whole cache lines at a time, @@ -933,7 +910,7 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb) * data portion starts in the middle of a cache line, the SOC * DMA will trash the beginning (and ending) portions. */ - + if (sb == NULL) { sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN); if (sb_new == NULL) { @@ -949,23 +926,22 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb) } else { sb_new = sb; - /* + /* * nothing special to reinit buffer, it's already aligned * and sb->data already points to a good place. */ } - + /* - * fill in the descriptor + * fill in the descriptor */ - + #ifdef CONFIG_SBMAC_COALESCE /* * Do not interrupt per DMA transfer. */ dsc->dscr_a = virt_to_phys(sb_new->data) | - V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | - 0; + V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 0; #else dsc->dscr_a = virt_to_phys(sb_new->data) | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | @@ -974,38 +950,38 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb) /* receiving: no options */ dsc->dscr_b = 0; - + /* - * fill in the context + * fill in the context */ - + d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new; - - /* - * point at next packet + + /* + * point at next packet */ - + d->sbdma_addptr = nextdsc; - - /* + + /* * Give the buffer to the DMA engine. */ - - SBMAC_WRITECSR(d->sbdma_dscrcnt,1); - + + __raw_writeq(1, d->sbdma_dscrcnt); + return 0; /* we did it */ } /********************************************************************** * SBDMA_ADD_TXBUFFER(d,sb) - * + * * Add a transmit buffer to the specified DMA channel, causing a * transmit to start. - * - * Input parameters: + * + * Input parameters: * d - DMA channel descriptor * sb - sk_buff to add - * + * * Return value: * 0 transmit queued successfully * otherwise error code @@ -1019,70 +995,70 @@ static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *sb) uint64_t phys; uint64_t ncb; int length; - + /* get pointer to our current place in the ring */ - + dsc = d->sbdma_addptr; nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr); - + /* * figure out if the ring is full - if the next descriptor * is the same as the one that we're going to remove from * the ring, the ring is full */ - + if (nextdsc == d->sbdma_remptr) { return -ENOSPC; } - + /* * Under Linux, it's not necessary to copy/coalesce buffers * like it is on NetBSD. We think they're all contiguous, * but that may not be true for GBE. */ - + length = sb->len; - + /* * fill in the descriptor. Note that the number of cache * blocks in the descriptor is the number of blocks * *spanned*, so we need to add in the offset (if any) * while doing the calculation. */ - + phys = virt_to_phys(sb->data); ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1))); - dsc->dscr_a = phys | + dsc->dscr_a = phys | V_DMA_DSCRA_A_SIZE(ncb) | #ifndef CONFIG_SBMAC_COALESCE M_DMA_DSCRA_INTERRUPT | #endif M_DMA_ETHTX_SOP; - + /* transmitting: set outbound options and length */ dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) | V_DMA_DSCRB_PKT_SIZE(length); - + /* - * fill in the context + * fill in the context */ - + d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb; - - /* - * point at next packet + + /* + * point at next packet */ - + d->sbdma_addptr = nextdsc; - - /* + + /* * Give the buffer to the DMA engine. */ - - SBMAC_WRITECSR(d->sbdma_dscrcnt,1); - + + __raw_writeq(1, d->sbdma_dscrcnt); + return 0; /* we did it */ } @@ -1091,12 +1067,12 @@ static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *sb) /********************************************************************** * SBDMA_EMPTYRING(d) - * + * * Free all allocated sk_buffs on the specified DMA channel; - * - * Input parameters: + * + * Input parameters: * d - DMA channel - * + * * Return value: * nothing ********************************************************************* */ @@ -1105,7 +1081,7 @@ static void sbdma_emptyring(sbmacdma_t *d) { int idx; struct sk_buff *sb; - + for (idx = 0; idx < d->sbdma_maxdescr; idx++) { sb = d->sbdma_ctxtable[idx]; if (sb) { @@ -1118,13 +1094,13 @@ static void sbdma_emptyring(sbmacdma_t *d) /********************************************************************** * SBDMA_FILLRING(d) - * + * * Fill the specified DMA channel (must be receive channel) * with sk_buffs - * - * Input parameters: + * + * Input parameters: * d - DMA channel - * + * * Return value: * nothing ********************************************************************* */ @@ -1132,7 +1108,7 @@ static void sbdma_emptyring(sbmacdma_t *d) static void sbdma_fillring(sbmacdma_t *d) { int idx; - + for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) { if (sbdma_add_rcvbuffer(d,NULL) != 0) break; @@ -1142,16 +1118,16 @@ static void sbdma_fillring(sbmacdma_t *d) /********************************************************************** * SBDMA_RX_PROCESS(sc,d) - * - * Process "completed" receive buffers on the specified DMA channel. - * Note that this isn't really ideal for priority channels, since - * it processes all of the packets on a given channel before - * returning. * - * Input parameters: + * Process "completed" receive buffers on the specified DMA channel. + * Note that this isn't really ideal for priority channels, since + * it processes all of the packets on a given channel before + * returning. + * + * Input parameters: * sc - softc structure * d - DMA channel context - * + * * Return value: * nothing ********************************************************************* */ @@ -1163,56 +1139,56 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d) sbdmadscr_t *dsc; struct sk_buff *sb; int len; - + for (;;) { - /* + /* * figure out where we are (as an index) and where * the hardware is (also as an index) * - * This could be done faster if (for example) the + * This could be done faster if (for example) the * descriptor table was page-aligned and contiguous in * both virtual and physical memory -- you could then * just compare the low-order bits of the virtual address * (sbdma_remptr) and the physical address (sbdma_curdscr CSR) */ - + curidx = d->sbdma_remptr - d->sbdma_dscrtable; - hwidx = (int) (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - + hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t)); - + /* * If they're the same, that means we've processed all * of the descriptors up to (but not including) the one that * the hardware is working on right now. */ - + if (curidx == hwidx) break; - + /* * Otherwise, get the packet's sk_buff ptr back */ - + dsc = &(d->sbdma_dscrtable[curidx]); sb = d->sbdma_ctxtable[curidx]; d->sbdma_ctxtable[curidx] = NULL; - + len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4; - + /* * Check packet status. If good, process it. * If not, silently drop it and put it back on the * receive ring. */ - + if (!(dsc->dscr_a & M_DMA_ETHRX_BAD)) { - + /* * Add a new buffer to replace the old one. If we fail * to allocate a buffer, we're going to drop this * packet and put it right back on the receive ring. */ - + if (sbdma_add_rcvbuffer(d,NULL) == -ENOBUFS) { sc->sbm_stats.rx_dropped++; sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */ @@ -1221,7 +1197,7 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d) * Set length into the packet */ skb_put(sb,len); - + /* * Buffer has been replaced on the * receive ring. Pass the buffer to @@ -1240,7 +1216,7 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d) sb->ip_summed = CHECKSUM_NONE; } } - + netif_rx(sb); } } else { @@ -1251,14 +1227,14 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d) sc->sbm_stats.rx_errors++; sbdma_add_rcvbuffer(d,sb); } - - - /* + + + /* * .. and advance to the next buffer. */ - + d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); - + } } @@ -1266,17 +1242,17 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d) /********************************************************************** * SBDMA_TX_PROCESS(sc,d) - * - * Process "completed" transmit buffers on the specified DMA channel. + * + * Process "completed" transmit buffers on the specified DMA channel. * This is normally called within the interrupt service routine. * Note that this isn't really ideal for priority channels, since - * it processes all of the packets on a given channel before - * returning. + * it processes all of the packets on a given channel before + * returning. * - * Input parameters: + * Input parameters: * sc - softc structure * d - DMA channel context - * + * * Return value: * nothing ********************************************************************* */ @@ -1290,21 +1266,21 @@ static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d) unsigned long flags; spin_lock_irqsave(&(sc->sbm_lock), flags); - + for (;;) { - /* + /* * figure out where we are (as an index) and where * the hardware is (also as an index) * - * This could be done faster if (for example) the + * This could be done faster if (for example) the * descriptor table was page-aligned and contiguous in * both virtual and physical memory -- you could then * just compare the low-order bits of the virtual address * (sbdma_remptr) and the physical address (sbdma_curdscr CSR) */ - + curidx = d->sbdma_remptr - d->sbdma_dscrtable; - hwidx = (int) (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - + hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t)); /* @@ -1312,75 +1288,75 @@ static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d) * of the descriptors up to (but not including) the one that * the hardware is working on right now. */ - + if (curidx == hwidx) break; - + /* * Otherwise, get the packet's sk_buff ptr back */ - + dsc = &(d->sbdma_dscrtable[curidx]); sb = d->sbdma_ctxtable[curidx]; d->sbdma_ctxtable[curidx] = NULL; - + /* * Stats */ - + sc->sbm_stats.tx_bytes += sb->len; sc->sbm_stats.tx_packets++; - + /* * for transmits, we just free buffers. */ - + dev_kfree_skb_irq(sb); - - /* + + /* * .. and advance to the next buffer. */ d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); - + } - + /* * Decide if we should wake up the protocol or not. * Other drivers seem to do this when we reach a low * watermark on the transmit queue. */ - + netif_wake_queue(d->sbdma_eth->sbm_dev); - + spin_unlock_irqrestore(&(sc->sbm_lock), flags); - + } /********************************************************************** * SBMAC_INITCTX(s) - * + * * Initialize an Ethernet context structure - this is called * once per MAC on the 1250. Memory is allocated here, so don't * call it again from inside the ioctl routines that bring the * interface up/down - * - * Input parameters: + * + * Input parameters: * s - sbmac context structure - * + * * Return value: * 0 ********************************************************************* */ static int sbmac_initctx(struct sbmac_softc *s) { - - /* - * figure out the addresses of some ports + + /* + * figure out the addresses of some ports */ - + s->sbm_macenable = s->sbm_base + R_MAC_ENABLE; s->sbm_maccfg = s->sbm_base + R_MAC_CFG; s->sbm_fifocfg = s->sbm_base + R_MAC_THRSH_CFG; @@ -1397,29 +1373,29 @@ static int sbmac_initctx(struct sbmac_softc *s) s->sbm_phy_oldanlpar = 0; s->sbm_phy_oldk1stsr = 0; s->sbm_phy_oldlinkstat = 0; - + /* * Initialize the DMA channels. Right now, only one per MAC is used * Note: Only do this _once_, as it allocates memory from the kernel! */ - + sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR); sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR); - + /* * initial state is OFF */ - + s->sbm_state = sbmac_state_off; - + /* * Initial speed is (XXX TEMP) 10MBit/s HDX no FC */ - + s->sbm_speed = sbmac_speed_10; s->sbm_duplex = sbmac_duplex_half; s->sbm_fc = sbmac_fc_disabled; - + return 0; } @@ -1430,7 +1406,7 @@ static void sbdma_uninitctx(struct sbmacdma_s *d) kfree(d->sbdma_dscrtable); d->sbdma_dscrtable = NULL; } - + if (d->sbdma_ctxtable) { kfree(d->sbdma_ctxtable); d->sbdma_ctxtable = NULL; @@ -1447,12 +1423,12 @@ static void sbmac_uninitctx(struct sbmac_softc *sc) /********************************************************************** * SBMAC_CHANNEL_START(s) - * + * * Start packet processing on this MAC. - * - * Input parameters: + * + * Input parameters: * s - sbmac structure - * + * * Return value: * nothing ********************************************************************* */ @@ -1460,49 +1436,49 @@ static void sbmac_uninitctx(struct sbmac_softc *sc) static void sbmac_channel_start(struct sbmac_softc *s) { uint64_t reg; - sbmac_port_t port; + volatile void __iomem *port; uint64_t cfg,fifo,framecfg; int idx, th_value; - + /* * Don't do this if running */ if (s->sbm_state == sbmac_state_on) return; - + /* * Bring the controller out of reset, but leave it off. */ - - SBMAC_WRITECSR(s->sbm_macenable,0); - + + __raw_writeq(0, s->sbm_macenable); + /* * Ignore all received packets */ - - SBMAC_WRITECSR(s->sbm_rxfilter,0); - - /* + + __raw_writeq(0, s->sbm_rxfilter); + + /* * Calculate values for various control registers. */ - + cfg = M_MAC_RETRY_EN | - M_MAC_TX_HOLD_SOP_EN | + M_MAC_TX_HOLD_SOP_EN | V_MAC_TX_PAUSE_CNT_16K | M_MAC_AP_STAT_EN | M_MAC_FAST_SYNC | M_MAC_SS_EN | 0; - - /* + + /* * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above * Use a larger RD_THRSH for gigabit */ - if (periph_rev >= 2) + if (periph_rev >= 2) th_value = 64; - else + else th_value = 28; fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */ @@ -1520,51 +1496,51 @@ static void sbmac_channel_start(struct sbmac_softc *s) V_MAC_BACKOFF_SEL(1); /* - * Clear out the hash address map + * Clear out the hash address map */ - + port = s->sbm_base + R_MAC_HASH_BASE; for (idx = 0; idx < MAC_HASH_COUNT; idx++) { - SBMAC_WRITECSR(port,0); + __raw_writeq(0, port); port += sizeof(uint64_t); } - + /* * Clear out the exact-match table */ - + port = s->sbm_base + R_MAC_ADDR_BASE; for (idx = 0; idx < MAC_ADDR_COUNT; idx++) { - SBMAC_WRITECSR(port,0); + __raw_writeq(0, port); port += sizeof(uint64_t); } - + /* * Clear out the DMA Channel mapping table registers */ - + port = s->sbm_base + R_MAC_CHUP0_BASE; for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) { - SBMAC_WRITECSR(port,0); + __raw_writeq(0, port); port += sizeof(uint64_t); } port = s->sbm_base + R_MAC_CHLO0_BASE; for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) { - SBMAC_WRITECSR(port,0); + __raw_writeq(0, port); port += sizeof(uint64_t); } - + /* * Program the hardware address. It goes into the hardware-address * register as well as the first filter register. */ - + reg = sbmac_addr2reg(s->sbm_hwaddr); - + port = s->sbm_base + R_MAC_ADDR_BASE; - SBMAC_WRITECSR(port,reg); + __raw_writeq(reg, port); port = s->sbm_base + R_MAC_ETHERNET_ADDR; #ifdef CONFIG_SB1_PASS_1_WORKAROUNDS @@ -1573,108 +1549,105 @@ static void sbmac_channel_start(struct sbmac_softc *s) * destination address in the R_MAC_ETHERNET_ADDR register. * Set the value to zero. */ - SBMAC_WRITECSR(port,0); + __raw_writeq(0, port); #else - SBMAC_WRITECSR(port,reg); + __raw_writeq(reg, port); #endif - + /* * Set the receive filter for no packets, and write values * to the various config registers */ - - SBMAC_WRITECSR(s->sbm_rxfilter,0); - SBMAC_WRITECSR(s->sbm_imr,0); - SBMAC_WRITECSR(s->sbm_framecfg,framecfg); - SBMAC_WRITECSR(s->sbm_fifocfg,fifo); - SBMAC_WRITECSR(s->sbm_maccfg,cfg); - + + __raw_writeq(0, s->sbm_rxfilter); + __raw_writeq(0, s->sbm_imr); + __raw_writeq(framecfg, s->sbm_framecfg); + __raw_writeq(fifo, s->sbm_fifocfg); + __raw_writeq(cfg, s->sbm_maccfg); + /* * Initialize DMA channels (rings should be ok now) */ - + sbdma_channel_start(&(s->sbm_rxdma), DMA_RX); sbdma_channel_start(&(s->sbm_txdma), DMA_TX); - + /* * Configure the speed, duplex, and flow control */ sbmac_set_speed(s,s->sbm_speed); sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc); - + /* * Fill the receive ring */ - + sbdma_fillring(&(s->sbm_rxdma)); - - /* + + /* * Turn on the rest of the bits in the enable register - */ - - SBMAC_WRITECSR(s->sbm_macenable, - M_MAC_RXDMA_EN0 | + */ + + __raw_writeq(M_MAC_RXDMA_EN0 | M_MAC_TXDMA_EN0 | M_MAC_RX_ENABLE | - M_MAC_TX_ENABLE); - - + M_MAC_TX_ENABLE, s->sbm_macenable); + + #ifdef CONFIG_SBMAC_COALESCE /* * Accept any TX interrupt and EOP count/timer RX interrupts on ch 0 */ - SBMAC_WRITECSR(s->sbm_imr, - ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | - ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0)); + __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | + ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr); #else /* * Accept any kind of interrupt on TX and RX DMA channel 0 */ - SBMAC_WRITECSR(s->sbm_imr, - (M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | - (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)); + __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | + (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr); #endif - - /* - * Enable receiving unicasts and broadcasts - */ - - SBMAC_WRITECSR(s->sbm_rxfilter,M_MAC_UCAST_EN | M_MAC_BCAST_EN); - + /* - * we're running now. + * Enable receiving unicasts and broadcasts */ - + + __raw_writeq(M_MAC_UCAST_EN | M_MAC_BCAST_EN, s->sbm_rxfilter); + + /* + * we're running now. + */ + s->sbm_state = sbmac_state_on; - - /* - * Program multicast addresses + + /* + * Program multicast addresses */ - + sbmac_setmulti(s); - - /* - * If channel was in promiscuous mode before, turn that on + + /* + * If channel was in promiscuous mode before, turn that on */ - + if (s->sbm_devflags & IFF_PROMISC) { sbmac_promiscuous_mode(s,1); } - + } /********************************************************************** * SBMAC_CHANNEL_STOP(s) - * + * * Stop packet processing on this MAC. - * - * Input parameters: + * + * Input parameters: * s - sbmac structure - * + * * Return value: * nothing ********************************************************************* */ @@ -1682,49 +1655,49 @@ static void sbmac_channel_start(struct sbmac_softc *s) static void sbmac_channel_stop(struct sbmac_softc *s) { /* don't do this if already stopped */ - + if (s->sbm_state == sbmac_state_off) return; - + /* don't accept any packets, disable all interrupts */ - - SBMAC_WRITECSR(s->sbm_rxfilter,0); - SBMAC_WRITECSR(s->sbm_imr,0); - + + __raw_writeq(0, s->sbm_rxfilter); + __raw_writeq(0, s->sbm_imr); + /* Turn off ticker */ - + /* XXX */ - + /* turn off receiver and transmitter */ - - SBMAC_WRITECSR(s->sbm_macenable,0); - + + __raw_writeq(0, s->sbm_macenable); + /* We're stopped now. */ - + s->sbm_state = sbmac_state_off; - + /* * Stop DMA channels (rings should be ok now) */ - + sbdma_channel_stop(&(s->sbm_rxdma)); sbdma_channel_stop(&(s->sbm_txdma)); - + /* Empty the receive and transmit rings */ - + sbdma_emptyring(&(s->sbm_rxdma)); sbdma_emptyring(&(s->sbm_txdma)); - + } /********************************************************************** * SBMAC_SET_CHANNEL_STATE(state) - * + * * Set the channel's state ON or OFF - * - * Input parameters: + * + * Input parameters: * state - new state - * + * * Return value: * old state ********************************************************************* */ @@ -1732,43 +1705,43 @@ static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *sc, sbmac_state_t state) { sbmac_state_t oldstate = sc->sbm_state; - + /* * If same as previous state, return */ - + if (state == oldstate) { return oldstate; } - + /* - * If new state is ON, turn channel on + * If new state is ON, turn channel on */ - + if (state == sbmac_state_on) { sbmac_channel_start(sc); } else { sbmac_channel_stop(sc); } - + /* * Return previous state */ - + return oldstate; } /********************************************************************** * SBMAC_PROMISCUOUS_MODE(sc,onoff) - * + * * Turn on or off promiscuous mode - * - * Input parameters: + * + * Input parameters: * sc - softc * onoff - 1 to turn on, 0 to turn off - * + * * Return value: * nothing ********************************************************************* */ @@ -1776,30 +1749,30 @@ static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *sc, static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff) { uint64_t reg; - + if (sc->sbm_state != sbmac_state_on) return; - + if (onoff) { - reg = SBMAC_READCSR(sc->sbm_rxfilter); + reg = __raw_readq(sc->sbm_rxfilter); reg |= M_MAC_ALLPKT_EN; - SBMAC_WRITECSR(sc->sbm_rxfilter,reg); - } + __raw_writeq(reg, sc->sbm_rxfilter); + } else { - reg = SBMAC_READCSR(sc->sbm_rxfilter); + reg = __raw_readq(sc->sbm_rxfilter); reg &= ~M_MAC_ALLPKT_EN; - SBMAC_WRITECSR(sc->sbm_rxfilter,reg); + __raw_writeq(reg, sc->sbm_rxfilter); } } /********************************************************************** * SBMAC_SETIPHDR_OFFSET(sc,onoff) - * + * * Set the iphdr offset as 15 assuming ethernet encapsulation - * - * Input parameters: + * + * Input parameters: * sc - softc - * + * * Return value: * nothing ********************************************************************* */ @@ -1807,12 +1780,12 @@ static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff) static void sbmac_set_iphdr_offset(struct sbmac_softc *sc) { uint64_t reg; - + /* Hard code the off set to 15 for now */ - reg = SBMAC_READCSR(sc->sbm_rxfilter); + reg = __raw_readq(sc->sbm_rxfilter); reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15); - SBMAC_WRITECSR(sc->sbm_rxfilter,reg); - + __raw_writeq(reg, sc->sbm_rxfilter); + /* read system identification to determine revision */ if (periph_rev >= 2) { sc->rx_hw_checksum = ENABLE; @@ -1824,13 +1797,13 @@ static void sbmac_set_iphdr_offset(struct sbmac_softc *sc) /********************************************************************** * SBMAC_ADDR2REG(ptr) - * + * * Convert six bytes into the 64-bit register value that * we typically write into the SBMAC's address/mcast registers - * - * Input parameters: + * + * Input parameters: * ptr - pointer to 6 bytes - * + * * Return value: * register value ********************************************************************* */ @@ -1838,35 +1811,35 @@ static void sbmac_set_iphdr_offset(struct sbmac_softc *sc) static uint64_t sbmac_addr2reg(unsigned char *ptr) { uint64_t reg = 0; - + ptr += 6; - - reg |= (uint64_t) *(--ptr); + + reg |= (uint64_t) *(--ptr); reg <<= 8; - reg |= (uint64_t) *(--ptr); + reg |= (uint64_t) *(--ptr); reg <<= 8; - reg |= (uint64_t) *(--ptr); + reg |= (uint64_t) *(--ptr); reg <<= 8; - reg |= (uint64_t) *(--ptr); + reg |= (uint64_t) *(--ptr); reg <<= 8; - reg |= (uint64_t) *(--ptr); + reg |= (uint64_t) *(--ptr); reg <<= 8; - reg |= (uint64_t) *(--ptr); - + reg |= (uint64_t) *(--ptr); + return reg; } /********************************************************************** * SBMAC_SET_SPEED(s,speed) - * + * * Configure LAN speed for the specified MAC. * Warning: must be called when MAC is off! - * - * Input parameters: + * + * Input parameters: * s - sbmac structure * speed - speed to set MAC to (see sbmac_speed_t enum) - * + * * Return value: * 1 if successful * 0 indicates invalid parameters @@ -1880,31 +1853,31 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed) /* * Save new current values */ - + s->sbm_speed = speed; - + if (s->sbm_state == sbmac_state_on) return 0; /* save for next restart */ /* - * Read current register values + * Read current register values */ - - cfg = SBMAC_READCSR(s->sbm_maccfg); - framecfg = SBMAC_READCSR(s->sbm_framecfg); - + + cfg = __raw_readq(s->sbm_maccfg); + framecfg = __raw_readq(s->sbm_framecfg); + /* * Mask out the stuff we want to change */ - + cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL); framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH | M_MAC_SLOT_SIZE); - + /* * Now add in the new bits */ - + switch (speed) { case sbmac_speed_10: framecfg |= V_MAC_IFG_RX_10 | @@ -1913,7 +1886,7 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed) V_MAC_SLOT_SIZE_10; cfg |= V_MAC_SPEED_SEL_10MBPS; break; - + case sbmac_speed_100: framecfg |= V_MAC_IFG_RX_100 | V_MAC_IFG_TX_100 | @@ -1921,7 +1894,7 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed) V_MAC_SLOT_SIZE_100; cfg |= V_MAC_SPEED_SEL_100MBPS ; break; - + case sbmac_speed_1000: framecfg |= V_MAC_IFG_RX_1000 | V_MAC_IFG_TX_1000 | @@ -1929,34 +1902,34 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed) V_MAC_SLOT_SIZE_1000; cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN; break; - + case sbmac_speed_auto: /* XXX not implemented */ /* fall through */ default: return 0; } - + /* - * Send the bits back to the hardware + * Send the bits back to the hardware */ - - SBMAC_WRITECSR(s->sbm_framecfg,framecfg); - SBMAC_WRITECSR(s->sbm_maccfg,cfg); - + + __raw_writeq(framecfg, s->sbm_framecfg); + __raw_writeq(cfg, s->sbm_maccfg); + return 1; } /********************************************************************** * SBMAC_SET_DUPLEX(s,duplex,fc) - * + * * Set Ethernet duplex and flow control options for this MAC * Warning: must be called when MAC is off! - * - * Input parameters: + * + * Input parameters: * s - sbmac structure * duplex - duplex setting (see sbmac_duplex_t) * fc - flow control setting (see sbmac_fc_t) - * + * * Return value: * 1 if ok * 0 if an invalid parameter combination was specified @@ -1965,67 +1938,67 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed) static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc_t fc) { uint64_t cfg; - + /* * Save new current values */ - + s->sbm_duplex = duplex; s->sbm_fc = fc; - + if (s->sbm_state == sbmac_state_on) return 0; /* save for next restart */ - + /* - * Read current register values + * Read current register values */ - - cfg = SBMAC_READCSR(s->sbm_maccfg); - + + cfg = __raw_readq(s->sbm_maccfg); + /* * Mask off the stuff we're about to change */ - + cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN); - - + + switch (duplex) { case sbmac_duplex_half: switch (fc) { case sbmac_fc_disabled: cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED; break; - + case sbmac_fc_collision: cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED; break; - + case sbmac_fc_carrier: cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR; break; - + case sbmac_fc_auto: /* XXX not implemented */ - /* fall through */ + /* fall through */ case sbmac_fc_frame: /* not valid in half duplex */ default: /* invalid selection */ return 0; } break; - + case sbmac_duplex_full: switch (fc) { case sbmac_fc_disabled: cfg |= V_MAC_FC_CMD_DISABLED; break; - + case sbmac_fc_frame: cfg |= V_MAC_FC_CMD_ENABLED; break; - + case sbmac_fc_collision: /* not valid in full duplex */ case sbmac_fc_carrier: /* not valid in full duplex */ case sbmac_fc_auto: /* XXX not implemented */ - /* fall through */ + /* fall through */ default: return 0; } @@ -2034,13 +2007,13 @@ static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc /* XXX not implemented */ break; } - + /* - * Send the bits back to the hardware + * Send the bits back to the hardware */ - - SBMAC_WRITECSR(s->sbm_maccfg,cfg); - + + __raw_writeq(cfg, s->sbm_maccfg); + return 1; } @@ -2049,12 +2022,12 @@ static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc /********************************************************************** * SBMAC_INTR() - * + * * Interrupt handler for MAC interrupts - * - * Input parameters: + * + * Input parameters: * MAC structure - * + * * Return value: * nothing ********************************************************************* */ @@ -2066,27 +2039,27 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance,struct pt_regs *rgs) int handled = 0; for (;;) { - + /* * Read the ISR (this clears the bits in the real * register, except for counter addr) */ - - isr = SBMAC_READCSR(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR; - + + isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR; + if (isr == 0) break; handled = 1; - + /* * Transmits on channel 0 */ - + if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) { sbdma_tx_process(sc,&(sc->sbm_txdma)); } - + /* * Receives on channel 0 */ @@ -2106,8 +2079,8 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance,struct pt_regs *rgs) * EOP_SEEN here takes care of this case. * (EOP_SEEN is part of M_MAC_INT_CHANNEL << S_MAC_RX_CH0) */ - - + + if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { sbdma_rx_process(sc,&(sc->sbm_rxdma)); } @@ -2118,29 +2091,29 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance,struct pt_regs *rgs) /********************************************************************** * SBMAC_START_TX(skb,dev) - * - * Start output on the specified interface. Basically, we + * + * Start output on the specified interface. Basically, we * queue as many buffers as we can until the ring fills up, or * we run off the end of the queue, whichever comes first. - * - * Input parameters: - * - * + * + * Input parameters: + * + * * Return value: * nothing ********************************************************************* */ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); - + /* lock eth irq */ spin_lock_irq (&sc->sbm_lock); - + /* - * Put the buffer on the transmit ring. If we + * Put the buffer on the transmit ring. If we * don't have room, stop the queue. */ - + if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) { /* XXX save skb that we could not send */ netif_stop_queue(dev); @@ -2148,24 +2121,24 @@ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) return 1; } - + dev->trans_start = jiffies; - + spin_unlock_irq (&sc->sbm_lock); - + return 0; } /********************************************************************** * SBMAC_SETMULTI(sc) - * + * * Reprogram the multicast table into the hardware, given * the list of multicasts associated with the interface * structure. - * - * Input parameters: + * + * Input parameters: * sc - softc - * + * * Return value: * nothing ********************************************************************* */ @@ -2173,75 +2146,75 @@ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) static void sbmac_setmulti(struct sbmac_softc *sc) { uint64_t reg; - sbmac_port_t port; + volatile void __iomem *port; int idx; struct dev_mc_list *mclist; struct net_device *dev = sc->sbm_dev; - - /* + + /* * Clear out entire multicast table. We do this by nuking * the entire hash table and all the direct matches except - * the first one, which is used for our station address + * the first one, which is used for our station address */ - + for (idx = 1; idx < MAC_ADDR_COUNT; idx++) { port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)); - SBMAC_WRITECSR(port,0); + __raw_writeq(0, port); } - + for (idx = 0; idx < MAC_HASH_COUNT; idx++) { port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t)); - SBMAC_WRITECSR(port,0); + __raw_writeq(0, port); } - + /* * Clear the filter to say we don't want any multicasts. */ - - reg = SBMAC_READCSR(sc->sbm_rxfilter); + + reg = __raw_readq(sc->sbm_rxfilter); reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN); - SBMAC_WRITECSR(sc->sbm_rxfilter,reg); - + __raw_writeq(reg, sc->sbm_rxfilter); + if (dev->flags & IFF_ALLMULTI) { - /* - * Enable ALL multicasts. Do this by inverting the - * multicast enable bit. + /* + * Enable ALL multicasts. Do this by inverting the + * multicast enable bit. */ - reg = SBMAC_READCSR(sc->sbm_rxfilter); + reg = __raw_readq(sc->sbm_rxfilter); reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN); - SBMAC_WRITECSR(sc->sbm_rxfilter,reg); + __raw_writeq(reg, sc->sbm_rxfilter); return; } - - /* + + /* * Progam new multicast entries. For now, only use the * perfect filter. In the future we'll need to use the * hash filter if the perfect filter overflows */ - + /* XXX only using perfect filter for now, need to use hash * XXX if the table overflows */ - + idx = 1; /* skip station address */ mclist = dev->mc_list; while (mclist && (idx < MAC_ADDR_COUNT)) { reg = sbmac_addr2reg(mclist->dmi_addr); port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t)); - SBMAC_WRITECSR(port,reg); + __raw_writeq(reg, port); idx++; mclist = mclist->next; } - - /* + + /* * Enable the "accept multicast bits" if we programmed at least one - * multicast. + * multicast. */ - + if (idx > 1) { - reg = SBMAC_READCSR(sc->sbm_rxfilter); + reg = __raw_readq(sc->sbm_rxfilter); reg |= M_MAC_MCAST_EN; - SBMAC_WRITECSR(sc->sbm_rxfilter,reg); + __raw_writeq(reg, sc->sbm_rxfilter); } } @@ -2250,12 +2223,12 @@ static void sbmac_setmulti(struct sbmac_softc *sc) #if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) /********************************************************************** * SBMAC_PARSE_XDIGIT(str) - * + * * Parse a hex digit, returning its value - * - * Input parameters: + * + * Input parameters: * str - character - * + * * Return value: * hex value, or -1 if invalid ********************************************************************* */ @@ -2263,7 +2236,7 @@ static void sbmac_setmulti(struct sbmac_softc *sc) static int sbmac_parse_xdigit(char str) { int digit; - + if ((str >= '0') && (str <= '9')) digit = str - '0'; else if ((str >= 'a') && (str <= 'f')) @@ -2272,20 +2245,20 @@ static int sbmac_parse_xdigit(char str) digit = str - 'A' + 10; else return -1; - + return digit; } /********************************************************************** * SBMAC_PARSE_HWADDR(str,hwaddr) - * + * * Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte * Ethernet address. - * - * Input parameters: + * + * Input parameters: * str - string * hwaddr - pointer to hardware address - * + * * Return value: * 0 if ok, else -1 ********************************************************************* */ @@ -2294,7 +2267,7 @@ static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr) { int digit1,digit2; int idx = 6; - + while (*str && (idx > 0)) { digit1 = sbmac_parse_xdigit(*str); if (digit1 < 0) @@ -2302,7 +2275,7 @@ static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr) str++; if (!*str) return -1; - + if ((*str == ':') || (*str == '-')) { digit2 = digit1; digit1 = 0; @@ -2313,10 +2286,10 @@ static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr) return -1; str++; } - + *hwaddr++ = (digit1 << 4) | digit2; idx--; - + if (*str == '-') str++; if (*str == ':') @@ -2337,12 +2310,12 @@ static int sb1250_change_mtu(struct net_device *_dev, int new_mtu) /********************************************************************** * SBMAC_INIT(dev) - * + * * Attach routine - init hardware and hook ourselves into linux - * - * Input parameters: + * + * Input parameters: * dev - net_device structure - * + * * Return value: * status ********************************************************************* */ @@ -2354,53 +2327,53 @@ static int sbmac_init(struct net_device *dev, int idx) uint64_t ea_reg; int i; int err; - + sc = netdev_priv(dev); - + /* Determine controller base address */ - + sc->sbm_base = IOADDR(dev->base_addr); sc->sbm_dev = dev; sc->sbe_idx = idx; - + eaddr = sc->sbm_hwaddr; - - /* + + /* * Read the ethernet address. The firwmare left this programmed * for us in the ethernet address register for each mac. */ - - ea_reg = SBMAC_READCSR(sc->sbm_base + R_MAC_ETHERNET_ADDR); - SBMAC_WRITECSR(sc->sbm_base + R_MAC_ETHERNET_ADDR, 0); + + ea_reg = __raw_readq(sc->sbm_base + R_MAC_ETHERNET_ADDR); + __raw_writeq(0, sc->sbm_base + R_MAC_ETHERNET_ADDR); for (i = 0; i < 6; i++) { eaddr[i] = (uint8_t) (ea_reg & 0xFF); ea_reg >>= 8; } - + for (i = 0; i < 6; i++) { dev->dev_addr[i] = eaddr[i]; } - - + + /* - * Init packet size + * Init packet size */ - + sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN; - /* + /* * Initialize context (get pointers to registers and stuff), then * allocate the memory for the descriptor tables. */ - + sbmac_initctx(sc); - + /* * Set up Linux device callins */ - + spin_lock_init(&(sc->sbm_lock)); - + dev->open = sbmac_open; dev->hard_start_xmit = sbmac_start_tx; dev->stop = sbmac_close; @@ -2419,7 +2392,7 @@ static int sbmac_init(struct net_device *dev, int idx) if (err) goto out_uninit; - if (periph_rev >= 2) { + if (sc->rx_hw_checksum == ENABLE) { printk(KERN_INFO "%s: enabling TCP rcv checksum\n", sc->sbm_dev->name); } @@ -2430,10 +2403,10 @@ static int sbmac_init(struct net_device *dev, int idx) * was being displayed) */ printk(KERN_INFO - "%s: SiByte Ethernet at 0x%08lX, address: %02X:%02X:%02X:%02X:%02X:%02X\n", + "%s: SiByte Ethernet at 0x%08lX, address: %02X:%02X:%02X:%02X:%02X:%02X\n", dev->name, dev->base_addr, eaddr[0],eaddr[1],eaddr[2],eaddr[3],eaddr[4],eaddr[5]); - + return 0; @@ -2447,54 +2420,86 @@ out_uninit: static int sbmac_open(struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); - + if (debug > 1) { printk(KERN_DEBUG "%s: sbmac_open() irq %d.\n", dev->name, dev->irq); } - - /* + + /* * map/route interrupt (clear status first, in case something * weird is pending; we haven't initialized the mac registers * yet) */ - SBMAC_READCSR(sc->sbm_isr); + __raw_readq(sc->sbm_isr); if (request_irq(dev->irq, &sbmac_intr, SA_SHIRQ, dev->name, dev)) return -EBUSY; /* - * Configure default speed + * Probe phy address + */ + + if(sbmac_mii_probe(dev) == -1) { + printk("%s: failed to probe PHY.\n", dev->name); + return -EINVAL; + } + + /* + * Configure default speed */ sbmac_mii_poll(sc,noisy_mii); - + /* * Turn on the channel */ sbmac_set_channel_state(sc,sbmac_state_on); - + /* * XXX Station address is in dev->dev_addr */ - + if (dev->if_port == 0) - dev->if_port = 0; - + dev->if_port = 0; + netif_start_queue(dev); - + sbmac_set_rx_mode(dev); - + /* Set the timer to check for link beat. */ init_timer(&sc->sbm_timer); sc->sbm_timer.expires = jiffies + 2 * HZ/100; sc->sbm_timer.data = (unsigned long)dev; sc->sbm_timer.function = &sbmac_timer; add_timer(&sc->sbm_timer); - + return 0; } +static int sbmac_mii_probe(struct net_device *dev) +{ + int i; + struct sbmac_softc *s = netdev_priv(dev); + u16 bmsr, id1, id2; + u32 vendor, device; + + for (i=1; i<31; i++) { + bmsr = sbmac_mii_read(s, i, MII_BMSR); + if (bmsr != 0) { + s->sbm_phys[0] = i; + id1 = sbmac_mii_read(s, i, MII_PHYIDR1); + id2 = sbmac_mii_read(s, i, MII_PHYIDR2); + vendor = ((u32)id1 << 6) | ((id2 >> 10) & 0x3f); + device = (id2 >> 4) & 0x3f; + + printk(KERN_INFO "%s: found phy %d, vendor %06x part %02x\n", + dev->name, i, vendor, device); + return i; + } + } + return -1; +} static int sbmac_mii_poll(struct sbmac_softc *s,int noisy) @@ -2609,20 +2614,20 @@ static void sbmac_timer(unsigned long data) int mii_status; spin_lock_irq (&sc->sbm_lock); - + /* make IFF_RUNNING follow the MII status bit "Link established" */ mii_status = sbmac_mii_read(sc, sc->sbm_phys[0], MII_BMSR); - + if ( (mii_status & BMSR_LINKSTAT) != (sc->sbm_phy_oldlinkstat) ) { sc->sbm_phy_oldlinkstat = mii_status & BMSR_LINKSTAT; if (mii_status & BMSR_LINKSTAT) { netif_carrier_on(dev); } else { - netif_carrier_off(dev); + netif_carrier_off(dev); } } - + /* * Poll the PHY to see what speed we should be running at */ @@ -2640,9 +2645,9 @@ static void sbmac_timer(unsigned long data) sbmac_channel_start(sc); } } - + spin_unlock_irq (&sc->sbm_lock); - + sc->sbm_timer.expires = jiffies + next_tick; add_timer(&sc->sbm_timer); } @@ -2651,13 +2656,13 @@ static void sbmac_timer(unsigned long data) static void sbmac_tx_timeout (struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); - + spin_lock_irq (&sc->sbm_lock); - - + + dev->trans_start = jiffies; sc->sbm_stats.tx_errors++; - + spin_unlock_irq (&sc->sbm_lock); printk (KERN_WARNING "%s: Transmit timed out\n",dev->name); @@ -2670,13 +2675,13 @@ static struct net_device_stats *sbmac_get_stats(struct net_device *dev) { struct sbmac_softc *sc = netdev_priv(dev); unsigned long flags; - + spin_lock_irqsave(&sc->sbm_lock, flags); - + /* XXX update other stats here */ - + spin_unlock_irqrestore(&sc->sbm_lock, flags); - + return &sc->sbm_stats; } @@ -2693,8 +2698,8 @@ static void sbmac_set_rx_mode(struct net_device *dev) /* * Promiscuous changed. */ - - if (dev->flags & IFF_PROMISC) { + + if (dev->flags & IFF_PROMISC) { /* Unconditionally log net taps. */ msg_flag = 1; sbmac_promiscuous_mode(sc,1); @@ -2705,18 +2710,18 @@ static void sbmac_set_rx_mode(struct net_device *dev) } } spin_unlock_irqrestore(&sc->sbm_lock, flags); - + if (msg_flag) { printk(KERN_NOTICE "%s: Promiscuous mode %sabled.\n", dev->name,(msg_flag==1)?"en":"dis"); } - + /* * Program the multicasts. Do this every time. */ - + sbmac_setmulti(sc); - + } static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) @@ -2725,10 +2730,10 @@ static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) u16 *data = (u16 *)&rq->ifr_ifru; unsigned long flags; int retval; - + spin_lock_irqsave(&sc->sbm_lock, flags); retval = 0; - + switch(cmd) { case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */ data[0] = sc->sbm_phys[0] & 0x1f; @@ -2750,7 +2755,7 @@ static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) default: retval = -EOPNOTSUPP; } - + spin_unlock_irqrestore(&sc->sbm_lock, flags); return retval; } @@ -2781,7 +2786,7 @@ static int sbmac_close(struct net_device *dev) sbdma_emptyring(&(sc->sbm_txdma)); sbdma_emptyring(&(sc->sbm_rxdma)); - + return 0; } @@ -2793,13 +2798,13 @@ sbmac_setup_hwaddr(int chan,char *addr) { uint8_t eaddr[6]; uint64_t val; - sbmac_port_t port; + unsigned long port; port = A_MAC_CHANNEL_BASE(chan); sbmac_parse_hwaddr(addr,eaddr); val = sbmac_addr2reg(eaddr); - SBMAC_WRITECSR(IOADDR(port+R_MAC_ETHERNET_ADDR),val); - val = SBMAC_READCSR(IOADDR(port+R_MAC_ETHERNET_ADDR)); + __raw_writeq(val, IOADDR(port+R_MAC_ETHERNET_ADDR)); + val = __raw_readq(IOADDR(port+R_MAC_ETHERNET_ADDR)); } #endif @@ -2810,9 +2815,9 @@ sbmac_init_module(void) { int idx; struct net_device *dev; - sbmac_port_t port; + unsigned long port; int chip_max_units; - + /* * For bringup when not using the firmware, we can pre-fill * the MAC addresses using the environment variables @@ -2858,13 +2863,13 @@ sbmac_init_module(void) port = A_MAC_CHANNEL_BASE(idx); - /* + /* * The R_MAC_ETHERNET_ADDR register will be set to some nonzero * value for us by the firmware if we're going to use this MAC. * If we find a zero, skip this MAC. */ - sbmac_orig_hwaddr[idx] = SBMAC_READCSR(IOADDR(port+R_MAC_ETHERNET_ADDR)); + sbmac_orig_hwaddr[idx] = __raw_readq(IOADDR(port+R_MAC_ETHERNET_ADDR)); if (sbmac_orig_hwaddr[idx] == 0) { printk(KERN_DEBUG "sbmac: not configuring MAC at " "%lx\n", port); @@ -2876,7 +2881,7 @@ sbmac_init_module(void) */ dev = alloc_etherdev(sizeof(struct sbmac_softc)); - if (!dev) + if (!dev) return -ENOMEM; /* return ENOMEM */ printk(KERN_DEBUG "sbmac: configuring MAC at %lx\n", port); @@ -2886,8 +2891,7 @@ sbmac_init_module(void) dev->mem_end = 0; if (sbmac_init(dev, idx)) { port = A_MAC_CHANNEL_BASE(idx); - SBMAC_WRITECSR(IOADDR(port+R_MAC_ETHERNET_ADDR), - sbmac_orig_hwaddr[idx]); + __raw_writeq(sbmac_orig_hwaddr[idx], IOADDR(port+R_MAC_ETHERNET_ADDR)); free_netdev(dev); continue; } diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c index 9bc3b1c0dd6a..a4614df38a90 100644 --- a/drivers/net/sgiseeq.c +++ b/drivers/net/sgiseeq.c @@ -32,8 +32,6 @@ #include "sgiseeq.h" -static char *version = "sgiseeq.c: David S. Miller (dm@engr.sgi.com)\n"; - static char *sgiseeqstr = "SGI Seeq8003"; /* @@ -113,9 +111,9 @@ static struct net_device *root_sgiseeq_dev; static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs) { - hregs->rx_reset = HPC3_ERXRST_CRESET | HPC3_ERXRST_CLRIRQ; + hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ; udelay(20); - hregs->rx_reset = 0; + hregs->reset = 0; } static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs, @@ -252,7 +250,6 @@ void sgiseeq_dump_rings(void) #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF) #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2) -#define RDMACFG_INIT (HPC3_ERXDCFG_FRXDC | HPC3_ERXDCFG_FEOP | HPC3_ERXDCFG_FIRQ) static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp, struct sgiseeq_regs *sregs) @@ -274,8 +271,6 @@ static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp, sregs->tstat = TSTAT_INIT_SEEQ; } - hregs->rx_dconfig |= RDMACFG_INIT; - hregs->rx_ndptr = CPHYSADDR(sp->rx_desc); hregs->tx_ndptr = CPHYSADDR(sp->tx_desc); @@ -446,7 +441,7 @@ static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id, struct pt_regs *regs spin_lock(&sp->tx_lock); /* Ack the IRQ and set software state. */ - hregs->rx_reset = HPC3_ERXRST_CLRIRQ; + hregs->reset = HPC3_ERST_CLRIRQ; /* Always check for received packets. */ sgiseeq_rx(dev, sp, hregs, sregs); @@ -493,11 +488,13 @@ static int sgiseeq_close(struct net_device *dev) { struct sgiseeq_private *sp = netdev_priv(dev); struct sgiseeq_regs *sregs = sp->sregs; + unsigned int irq = dev->irq; netif_stop_queue(dev); /* Shutdown the Seeq. */ reset_hpc3_and_seeq(sp->hregs, sregs); + free_irq(irq, dev); return 0; } @@ -644,7 +641,7 @@ static inline void setup_rx_ring(struct sgiseeq_rx_desc *buf, int nbufs) #define ALIGNED(x) ((((unsigned long)(x)) + 0xf) & ~(0xf)) -static int sgiseeq_init(struct hpc3_regs* regs, int irq) +static int sgiseeq_init(struct hpc3_regs* hpcregs, int irq) { struct sgiseeq_init_block *sr; struct sgiseeq_private *sp; @@ -680,8 +677,8 @@ static int sgiseeq_init(struct hpc3_regs* regs, int irq) gpriv = sp; gdev = dev; #endif - sp->sregs = (struct sgiseeq_regs *) &hpc3c0->eth_ext[0]; - sp->hregs = &hpc3c0->ethregs; + sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0]; + sp->hregs = &hpcregs->ethregs; sp->name = sgiseeqstr; sp->mode = SEEQ_RCMD_RBCAST; @@ -698,6 +695,11 @@ static int sgiseeq_init(struct hpc3_regs* regs, int irq) setup_rx_ring(sp->rx_desc, SEEQ_RX_BUFFERS); setup_tx_ring(sp->tx_desc, SEEQ_TX_BUFFERS); + /* Setup PIO and DMA transfer timing */ + sp->hregs->pconfig = 0x161; + sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP | + HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026; + /* Reset the chip. */ hpc3_eth_reset(sp->hregs); @@ -724,7 +726,7 @@ static int sgiseeq_init(struct hpc3_regs* regs, int irq) goto err_out_free_page; } - printk(KERN_INFO "%s: SGI Seeq8003 ", dev->name); + printk(KERN_INFO "%s: %s ", dev->name, sgiseeqstr); for (i = 0; i < 6; i++) printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); @@ -734,7 +736,7 @@ static int sgiseeq_init(struct hpc3_regs* regs, int irq) return 0; err_out_free_page: - free_page((unsigned long) sp); + free_page((unsigned long) sp->srings); err_out_free_dev: kfree(dev); @@ -744,8 +746,6 @@ err_out: static int __init sgiseeq_probe(void) { - printk(version); - /* On board adapter on 1st HPC is always present */ return sgiseeq_init(hpc3c0, SGI_ENET_IRQ); } @@ -754,15 +754,12 @@ static void __exit sgiseeq_exit(void) { struct net_device *next, *dev; struct sgiseeq_private *sp; - int irq; for (dev = root_sgiseeq_dev; dev; dev = next) { sp = (struct sgiseeq_private *) netdev_priv(dev); next = sp->next_module; - irq = dev->irq; unregister_netdev(dev); - free_irq(irq, dev); - free_page((unsigned long) sp); + free_page((unsigned long) sp->srings); free_netdev(dev); } } @@ -770,4 +767,6 @@ static void __exit sgiseeq_exit(void) module_init(sgiseeq_probe); module_exit(sgiseeq_exit); +MODULE_DESCRIPTION("SGI Seeq 8003 driver"); +MODULE_AUTHOR("Linux/MIPS Mailing List "); MODULE_LICENSE("GPL"); diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c index 2e72d79a143c..b18c92cb629e 100644 --- a/drivers/net/sk98lin/skge.c +++ b/drivers/net/sk98lin/skge.c @@ -235,7 +235,7 @@ static int SkDrvDeInitAdapter(SK_AC *pAC, int devNbr); * Extern Function Prototypes * ******************************************************************************/ -static const char SKRootName[] = "sk98lin"; +static const char SKRootName[] = "net/sk98lin"; static struct proc_dir_entry *pSkRootDir; extern struct file_operations sk_proc_fops; @@ -5242,20 +5242,20 @@ static int __init skge_init(void) { int error; - pSkRootDir = proc_mkdir(SKRootName, proc_net); + pSkRootDir = proc_mkdir(SKRootName, NULL); if (pSkRootDir) pSkRootDir->owner = THIS_MODULE; error = pci_register_driver(&skge_driver); if (error) - proc_net_remove(SKRootName); + remove_proc_entry(SKRootName, NULL); return error; } static void __exit skge_exit(void) { pci_unregister_driver(&skge_driver); - proc_net_remove(SKRootName); + remove_proc_entry(SKRootName, NULL); } diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 0208258e7826..572f121b1f4e 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -42,7 +42,7 @@ #include "skge.h" #define DRV_NAME "skge" -#define DRV_VERSION "1.0" +#define DRV_VERSION "1.1" #define PFX DRV_NAME " " #define DEFAULT_TX_RING_SIZE 128 @@ -105,41 +105,28 @@ static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 }; -/* Don't need to look at whole 16K. - * last interesting register is descriptor poll timer. - */ -#define SKGE_REGS_LEN (29*128) - static int skge_get_regs_len(struct net_device *dev) { - return SKGE_REGS_LEN; + return 0x4000; } /* - * Returns copy of control register region - * I/O region is divided into banks and certain regions are unreadable + * Returns copy of whole control register region + * Note: skip RAM address register because accessing it will + * cause bus hangs! */ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { const struct skge_port *skge = netdev_priv(dev); - unsigned long offs; const void __iomem *io = skge->hw->regs; - static const unsigned long bankmap - = (1<<0) | (1<<2) | (1<<8) | (1<<9) - | (1<<12) | (1<<13) | (1<<14) | (1<<15) | (1<<16) - | (1<<17) | (1<<20) | (1<<21) | (1<<22) | (1<<23) - | (1<<24) | (1<<25) | (1<<26) | (1<<27) | (1<<28); regs->version = 1; - for (offs = 0; offs < regs->len; offs += 128) { - u32 len = min_t(u32, 128, regs->len - offs); + memset(p, 0, regs->len); + memcpy_fromio(p, io, B3_RAM_ADDR); - if (bankmap & (1<<(offs/128))) - memcpy_fromio(p + offs, io + offs, len); - else - memset(p + offs, 0, len); - } + memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, + regs->len - B3_RI_WTO_R1); } /* Wake on Lan only supported on Yukon chps with rev 1 or above */ @@ -743,6 +730,7 @@ static struct ethtool_ops skge_ethtool_ops = { .phys_id = skge_phys_id, .get_stats_count = skge_get_stats_count, .get_ethtool_stats = skge_get_ethtool_stats, + .get_perm_addr = ethtool_op_get_perm_addr, }; /* @@ -775,17 +763,6 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base) return 0; } -static struct sk_buff *skge_rx_alloc(struct net_device *dev, unsigned int size) -{ - struct sk_buff *skb = dev_alloc_skb(size); - - if (likely(skb)) { - skb->dev = dev; - skb_reserve(skb, NET_IP_ALIGN); - } - return skb; -} - /* Allocate and setup a new buffer for receiving */ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, struct sk_buff *skb, unsigned int bufsize) @@ -858,16 +835,17 @@ static int skge_rx_fill(struct skge_port *skge) { struct skge_ring *ring = &skge->rx_ring; struct skge_element *e; - unsigned int bufsize = skge->rx_buf_size; e = ring->start; do { - struct sk_buff *skb = skge_rx_alloc(skge->netdev, bufsize); + struct sk_buff *skb; + skb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN); if (!skb) return -ENOMEM; - skge_rx_setup(skge, e, skb, bufsize); + skb_reserve(skb, NET_IP_ALIGN); + skge_rx_setup(skge, e, skb, skge->rx_buf_size); } while ( (e = e->next) != ring->start); ring->to_clean = ring->start; @@ -1666,6 +1644,22 @@ static void yukon_reset(struct skge_hw *hw, int port) | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); } +/* Apparently, early versions of Yukon-Lite had wrong chip_id? */ +static int is_yukon_lite_a0(struct skge_hw *hw) +{ + u32 reg; + int ret; + + if (hw->chip_id != CHIP_ID_YUKON) + return 0; + + reg = skge_read32(hw, B2_FAR); + skge_write8(hw, B2_FAR + 3, 0xff); + ret = (skge_read8(hw, B2_FAR + 3) != 0); + skge_write32(hw, B2_FAR, reg); + return ret; +} + static void yukon_mac_init(struct skge_hw *hw, int port) { struct skge_port *skge = netdev_priv(hw->dev[port]); @@ -1781,9 +1775,11 @@ static void yukon_mac_init(struct skge_hw *hw, int port) /* Configure Rx MAC FIFO */ skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); reg = GMF_OPER_ON | GMF_RX_F_FL_ON; - if (hw->chip_id == CHIP_ID_YUKON_LITE && - hw->chip_rev >= CHIP_REV_YU_LITE_A3) + + /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */ + if (is_yukon_lite_a0(hw)) reg &= ~GMF_RX_F_FL_ON; + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); /* @@ -2442,6 +2438,14 @@ static void yukon_set_multicast(struct net_device *dev) gma_write16(hw, port, GM_RX_CTRL, reg); } +static inline u16 phy_length(const struct skge_hw *hw, u32 status) +{ + if (hw->chip_id == CHIP_ID_GENESIS) + return status >> XMR_FS_LEN_SHIFT; + else + return status >> GMR_FS_LEN_SHIFT; +} + static inline int bad_phy_status(const struct skge_hw *hw, u32 status) { if (hw->chip_id == CHIP_ID_GENESIS) @@ -2451,16 +2455,81 @@ static inline int bad_phy_status(const struct skge_hw *hw, u32 status) (status & GMR_FS_RX_OK) == 0; } -static void skge_rx_error(struct skge_port *skge, int slot, - u32 control, u32 status) + +/* Get receive buffer from descriptor. + * Handles copy of small buffers and reallocation failures + */ +static inline struct sk_buff *skge_rx_get(struct skge_port *skge, + struct skge_element *e, + u32 control, u32 status, u16 csum) { - if (netif_msg_rx_err(skge)) - printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n", - skge->netdev->name, slot, control, status); + struct sk_buff *skb; + u16 len = control & BMU_BBC; + + if (unlikely(netif_msg_rx_status(skge))) + printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n", + skge->netdev->name, e - skge->rx_ring.start, + status, len); + + if (len > skge->rx_buf_size) + goto error; if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)) - skge->net_stats.rx_length_errors++; - else if (skge->hw->chip_id == CHIP_ID_GENESIS) { + goto error; + + if (bad_phy_status(skge->hw, status)) + goto error; + + if (phy_length(skge->hw, status) != len) + goto error; + + if (len < RX_COPY_THRESHOLD) { + skb = dev_alloc_skb(len + 2); + if (!skb) + goto resubmit; + + skb_reserve(skb, 2); + pci_dma_sync_single_for_cpu(skge->hw->pdev, + pci_unmap_addr(e, mapaddr), + len, PCI_DMA_FROMDEVICE); + memcpy(skb->data, e->skb->data, len); + pci_dma_sync_single_for_device(skge->hw->pdev, + pci_unmap_addr(e, mapaddr), + len, PCI_DMA_FROMDEVICE); + skge_rx_reuse(e, skge->rx_buf_size); + } else { + struct sk_buff *nskb; + nskb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN); + if (!nskb) + goto resubmit; + + pci_unmap_single(skge->hw->pdev, + pci_unmap_addr(e, mapaddr), + pci_unmap_len(e, maplen), + PCI_DMA_FROMDEVICE); + skb = e->skb; + prefetch(skb->data); + skge_rx_setup(skge, e, nskb, skge->rx_buf_size); + } + + skb_put(skb, len); + skb->dev = skge->netdev; + if (skge->rx_csum) { + skb->csum = csum; + skb->ip_summed = CHECKSUM_HW; + } + + skb->protocol = eth_type_trans(skb, skge->netdev); + + return skb; +error: + + if (netif_msg_rx_err(skge)) + printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n", + skge->netdev->name, e - skge->rx_ring.start, + control, status); + + if (skge->hw->chip_id == CHIP_ID_GENESIS) { if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) skge->net_stats.rx_length_errors++; if (status & XMR_FS_FRA_ERR) @@ -2475,56 +2544,10 @@ static void skge_rx_error(struct skge_port *skge, int slot, if (status & GMR_FS_CRC_ERR) skge->net_stats.rx_crc_errors++; } -} -/* Get receive buffer from descriptor. - * Handles copy of small buffers and reallocation failures - */ -static inline struct sk_buff *skge_rx_get(struct skge_port *skge, - struct skge_element *e, - unsigned int len) -{ - struct sk_buff *nskb, *skb; - - if (len < RX_COPY_THRESHOLD) { - nskb = skge_rx_alloc(skge->netdev, len + NET_IP_ALIGN); - if (unlikely(!nskb)) - return NULL; - - pci_dma_sync_single_for_cpu(skge->hw->pdev, - pci_unmap_addr(e, mapaddr), - len, PCI_DMA_FROMDEVICE); - memcpy(nskb->data, e->skb->data, len); - pci_dma_sync_single_for_device(skge->hw->pdev, - pci_unmap_addr(e, mapaddr), - len, PCI_DMA_FROMDEVICE); - - if (skge->rx_csum) { - struct skge_rx_desc *rd = e->desc; - nskb->csum = le16_to_cpu(rd->csum2); - nskb->ip_summed = CHECKSUM_HW; - } - skge_rx_reuse(e, skge->rx_buf_size); - return nskb; - } else { - nskb = skge_rx_alloc(skge->netdev, skge->rx_buf_size); - if (unlikely(!nskb)) - return NULL; - - pci_unmap_single(skge->hw->pdev, - pci_unmap_addr(e, mapaddr), - pci_unmap_len(e, maplen), - PCI_DMA_FROMDEVICE); - skb = e->skb; - if (skge->rx_csum) { - struct skge_rx_desc *rd = e->desc; - skb->csum = le16_to_cpu(rd->csum2); - skb->ip_summed = CHECKSUM_HW; - } - - skge_rx_setup(skge, e, nskb, skge->rx_buf_size); - return skb; - } +resubmit: + skge_rx_reuse(e, skge->rx_buf_size); + return NULL; } @@ -2540,32 +2563,16 @@ static int skge_poll(struct net_device *dev, int *budget) for (e = ring->to_clean; work_done < to_do; e = e->next) { struct skge_rx_desc *rd = e->desc; struct sk_buff *skb; - u32 control, len, status; + u32 control; rmb(); control = rd->control; if (control & BMU_OWN) break; - len = control & BMU_BBC; - status = rd->status; - - if (unlikely((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF) - || bad_phy_status(hw, status))) { - skge_rx_error(skge, e - ring->start, control, status); - skge_rx_reuse(e, skge->rx_buf_size); - continue; - } - - if (netif_msg_rx_status(skge)) - printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n", - dev->name, e - ring->start, rd->status, len); - - skb = skge_rx_get(skge, e, len); + skb = skge_rx_get(skge, e, control, rd->status, + le16_to_cpu(rd->csum2)); if (likely(skb)) { - skb_put(skb, len); - skb->protocol = eth_type_trans(skb, dev); - dev->last_rx = jiffies; netif_receive_skb(skb); @@ -2831,21 +2838,29 @@ static void skge_netpoll(struct net_device *dev) static int skge_set_mac_address(struct net_device *dev, void *p) { struct skge_port *skge = netdev_priv(dev); - struct sockaddr *addr = p; - int err = 0; + struct skge_hw *hw = skge->hw; + unsigned port = skge->port; + const struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - skge_down(dev); + spin_lock_bh(&hw->phy_lock); memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); - memcpy_toio(skge->hw->regs + B2_MAC_1 + skge->port*8, + memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); - memcpy_toio(skge->hw->regs + B2_MAC_2 + skge->port*8, + memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); - if (dev->flags & IFF_UP) - err = skge_up(dev); - return err; + + if (hw->chip_id == CHIP_ID_GENESIS) + xm_outaddr(hw, port, XM_SA, dev->dev_addr); + else { + gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); + gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); + } + spin_unlock_bh(&hw->phy_lock); + + return 0; } static const struct { @@ -3082,6 +3097,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, /* read the mac address */ memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); /* device is off until link detection */ netif_carrier_off(dev); diff --git a/drivers/net/skge.h b/drivers/net/skge.h index efbf98c675d2..72c175b87a5a 100644 --- a/drivers/net/skge.h +++ b/drivers/net/skge.h @@ -953,6 +953,7 @@ enum { */ enum { XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */ + XMR_FS_LEN_SHIFT = 18, XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/ XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/ XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */ @@ -1868,6 +1869,7 @@ enum { /* Receive Frame Status Encoding */ enum { GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */ + GMR_FS_LEN_SHIFT = 16, GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */ GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */ GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */ diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index 88b89dc95c77..efdb179ecc8c 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c @@ -133,14 +133,18 @@ - finally added firmware (GPL'ed by Adaptec) - removed compatibility code for 2.2.x + LK1.4.2.1 (Ion Badulescu) + - fixed 32/64 bit issues on i386 + CONFIG_HIGHMEM + - added 32-bit padding to outgoing skb's, removed previous workaround + TODO: - fix forced speed/duplexing code (broken a long time ago, when somebody converted the driver to use the generic MII code) - fix VLAN support */ #define DRV_NAME "starfire" -#define DRV_VERSION "1.03+LK1.4.2" -#define DRV_RELDATE "January 19, 2005" +#define DRV_VERSION "1.03+LK1.4.2.1" +#define DRV_RELDATE "October 3, 2005" #include #include @@ -165,6 +169,14 @@ TODO: - fix forced speed/duplexing code (broken a long time ago, when * of length 1. If and when this is fixed, the #define below can be removed. */ #define HAS_BROKEN_FIRMWARE + +/* + * If using the broken firmware, data must be padded to the next 32-bit boundary. + */ +#ifdef HAS_BROKEN_FIRMWARE +#define PADDING_MASK 3 +#endif + /* * Define this if using the driver with the zero-copy patch */ @@ -257,9 +269,10 @@ static int full_duplex[MAX_UNITS] = {0, }; * This SUCKS. * We need a much better method to determine if dma_addr_t is 64-bit. */ -#if (defined(__i386__) && defined(CONFIG_HIGHMEM) && (LINUX_VERSION_CODE > 0x20500 || defined(CONFIG_HIGHMEM64G))) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) +#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) /* 64-bit dma_addr_t */ #define ADDR_64BITS /* This chip uses 64 bit addresses. */ +#define netdrv_addr_t u64 #define cpu_to_dma(x) cpu_to_le64(x) #define dma_to_cpu(x) le64_to_cpu(x) #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit @@ -268,6 +281,7 @@ static int full_duplex[MAX_UNITS] = {0, }; #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit #define RX_DESC_ADDR_SIZE RxDescAddr64bit #else /* 32-bit dma_addr_t */ +#define netdrv_addr_t u32 #define cpu_to_dma(x) cpu_to_le32(x) #define dma_to_cpu(x) le32_to_cpu(x) #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit @@ -1333,21 +1347,10 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) } #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) - { - int has_bad_length = 0; - - if (skb_first_frag_len(skb) == 1) - has_bad_length = 1; - else { - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) - if (skb_shinfo(skb)->frags[i].size == 1) { - has_bad_length = 1; - break; - } - } - - if (has_bad_length) - skb_checksum_help(skb, 0); + if (skb->ip_summed == CHECKSUM_HW) { + skb = skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK); + if (skb == NULL) + return NETDEV_TX_OK; } #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ @@ -2127,13 +2130,12 @@ static int __init starfire_init (void) #endif #endif -#ifndef ADDR_64BITS /* we can do this test only at run-time... sigh */ - if (sizeof(dma_addr_t) == sizeof(u64)) { - printk("This driver has not been ported to this 64-bit architecture yet\n"); + if (sizeof(dma_addr_t) != sizeof(netdrv_addr_t)) { + printk("This driver has dma_addr_t issues, please send email to maintainer\n"); return -ENODEV; } -#endif /* not ADDR_64BITS */ + return pci_module_init (&starfire_driver); } diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c index f88f5e32b714..cfaf47c63c58 100644 --- a/drivers/net/sunbmac.c +++ b/drivers/net/sunbmac.c @@ -214,7 +214,8 @@ static void bigmac_init_rings(struct bigmac *bp, int from_irq) { struct bmac_init_block *bb = bp->bmac_block; struct net_device *dev = bp->dev; - int i, gfp_flags = GFP_KERNEL; + int i; + gfp_t gfp_flags = GFP_KERNEL; if (from_irq || in_interrupt()) gfp_flags = GFP_ATOMIC; diff --git a/drivers/net/sunbmac.h b/drivers/net/sunbmac.h index 5674003fc38a..b0dbc5187143 100644 --- a/drivers/net/sunbmac.h +++ b/drivers/net/sunbmac.h @@ -339,7 +339,7 @@ struct bigmac { #define ALIGNED_RX_SKB_ADDR(addr) \ ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr)) -static inline struct sk_buff *big_mac_alloc_skb(unsigned int length, int gfp_flags) +static inline struct sk_buff *big_mac_alloc_skb(unsigned int length, gfp_t gfp_flags) { struct sk_buff *skb; diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index d500a5771dbc..5de0554fd7c6 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c @@ -518,6 +518,7 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, #else int bar = 1; #endif + int phy, phy_idx = 0; /* when built into the kernel, we only print version if device is found */ @@ -549,6 +550,7 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, for (i = 0; i < 3; i++) ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET)); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); dev->base_addr = (unsigned long)ioaddr; dev->irq = irq; @@ -605,32 +607,30 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, printk("%2.2x:", dev->dev_addr[i]); printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq); - if (1) { - int phy, phy_idx = 0; - np->phys[0] = 1; /* Default setting */ - np->mii_preamble_required++; - for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) { - int mii_status = mdio_read(dev, phy, MII_BMSR); - if (mii_status != 0xffff && mii_status != 0x0000) { - np->phys[phy_idx++] = phy; - np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE); - if ((mii_status & 0x0040) == 0) - np->mii_preamble_required++; - printk(KERN_INFO "%s: MII PHY found at address %d, status " - "0x%4.4x advertising %4.4x.\n", - dev->name, phy, mii_status, np->mii_if.advertising); - } + np->phys[0] = 1; /* Default setting */ + np->mii_preamble_required++; + for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) { + int mii_status = mdio_read(dev, phy, MII_BMSR); + int phyx = phy & 0x1f; + if (mii_status != 0xffff && mii_status != 0x0000) { + np->phys[phy_idx++] = phyx; + np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE); + if ((mii_status & 0x0040) == 0) + np->mii_preamble_required++; + printk(KERN_INFO "%s: MII PHY found at address %d, status " + "0x%4.4x advertising %4.4x.\n", + dev->name, phyx, mii_status, np->mii_if.advertising); } - np->mii_preamble_required--; - - if (phy_idx == 0) { - printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n", - dev->name, ioread32(ioaddr + ASICCtrl)); - goto err_out_unregister; - } - - np->mii_if.phy_id = np->phys[0]; } + np->mii_preamble_required--; + + if (phy_idx == 0) { + printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n", + dev->name, ioread32(ioaddr + ASICCtrl)); + goto err_out_unregister; + } + + np->mii_if.phy_id = np->phys[0]; /* Parse override configuration */ np->an_enable = 1; @@ -692,7 +692,7 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev, /* Reset the chip to erase previous misconfiguration. */ if (netif_msg_hw(np)) printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl)); - iowrite16(0x007f, ioaddr + ASICCtrl + 2); + iowrite16(0x00ff, ioaddr + ASICCtrl + 2); if (netif_msg_hw(np)) printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl)); @@ -1619,6 +1619,7 @@ static struct ethtool_ops ethtool_ops = { .get_link = get_link, .get_msglevel = get_msglevel, .set_msglevel = set_msglevel, + .get_perm_addr = ethtool_op_get_perm_addr, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h index ff8ae5f79970..13006d759ad8 100644 --- a/drivers/net/sungem.h +++ b/drivers/net/sungem.h @@ -1035,7 +1035,8 @@ struct gem { #define ALIGNED_RX_SKB_ADDR(addr) \ ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) -static __inline__ struct sk_buff *gem_alloc_skb(int size, int gfp_flags) +static __inline__ struct sk_buff *gem_alloc_skb(int size, + gfp_t gfp_flags) { struct sk_buff *skb = alloc_skb(size + 64, gfp_flags); diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 81f4aedf534c..1802c3b48799 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -67,8 +67,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.40" -#define DRV_MODULE_RELDATE "September 15, 2005" +#define DRV_MODULE_VERSION "3.42" +#define DRV_MODULE_RELDATE "Oct 3, 2005" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -3389,7 +3389,8 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id, struct tg3 *tp = netdev_priv(dev); struct tg3_hw_status *sblk = tp->hw_status; - if (sblk->status & SD_STATUS_UPDATED) { + if ((sblk->status & SD_STATUS_UPDATED) || + !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); return IRQ_RETVAL(1); @@ -5395,6 +5396,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) struct tg3 *tp = netdev_priv(dev); struct sockaddr *addr = p; + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); spin_lock_bh(&tp->lock); @@ -5806,6 +5810,13 @@ static int tg3_reset_hw(struct tg3 *tp) } memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE); + if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { + tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; + /* reset to prevent losing 1st rx packet intermittently */ + tw32_f(MAC_RX_MODE, RX_MODE_RESET); + udelay(10); + } + tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE; tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR); @@ -5937,7 +5948,7 @@ static int tg3_reset_hw(struct tg3 *tp) tw32(MAC_LED_CTRL, tp->led_ctrl); tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB); - if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) { + if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { tw32_f(MAC_RX_MODE, RX_MODE_RESET); udelay(10); } @@ -7360,12 +7371,17 @@ static int tg3_nway_reset(struct net_device *dev) if (!netif_running(dev)) return -EAGAIN; + if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) + return -EINVAL; + spin_lock_bh(&tp->lock); r = -EINVAL; tg3_readphy(tp, MII_BMCR, &bmcr); if (!tg3_readphy(tp, MII_BMCR, &bmcr) && - (bmcr & BMCR_ANENABLE)) { - tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART); + ((bmcr & BMCR_ANENABLE) || + (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) { + tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART | + BMCR_ANENABLE); r = 0; } spin_unlock_bh(&tp->lock); @@ -7927,19 +7943,32 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) struct tg3_rx_buffer_desc *desc; if (loopback_mode == TG3_MAC_LOOPBACK) { + /* HW errata - mac loopback fails in some cases on 5780. + * Normal traffic and PHY loopback are not affected by + * errata. + */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) + return 0; + mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII; tw32(MAC_MODE, mac_mode); } else if (loopback_mode == TG3_PHY_LOOPBACK) { + tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX | + BMCR_SPEED1000); + udelay(40); + /* reset to prevent losing 1st rx packet intermittently */ + if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { + tw32_f(MAC_RX_MODE, RX_MODE_RESET); + udelay(10); + tw32_f(MAC_RX_MODE, tp->rx_mode); + } mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII; if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) mac_mode &= ~MAC_MODE_LINK_POLARITY; tw32(MAC_MODE, mac_mode); - - tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX | - BMCR_SPEED1000); } else return -EINVAL; @@ -9255,8 +9284,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) static struct pci_device_id write_reorder_chipsets[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, - { PCI_DEVICE(PCI_VENDOR_ID_AMD, - PCI_DEVICE_ID_AMD_K8_NB) }, + { PCI_DEVICE(PCI_VENDOR_ID_VIA, + PCI_DEVICE_ID_VIA_8385_0) }, { }, }; u32 misc_ctrl_reg; @@ -9271,15 +9300,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->tg3_flags2 |= TG3_FLG2_SUN_570X; #endif - /* If we have an AMD 762 or K8 chipset, write - * reordering to the mailbox registers done by the host - * controller can cause major troubles. We read back from - * every mailbox register write to force the writes to be - * posted to the chip in order. - */ - if (pci_dev_present(write_reorder_chipsets)) - tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; - /* Force memory write invalidate off. If we leave it on, * then on 5700_BX chips we have to enable a workaround. * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary @@ -9410,6 +9430,16 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0) tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; + /* If we have an AMD 762 or VIA K8T800 chipset, write + * reordering to the mailbox registers done by the host + * controller can cause major troubles. We read back from + * every mailbox register write to force the writes to be + * posted to the chip in order. + */ + if (pci_dev_present(write_reorder_chipsets) && + !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) + tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 && tp->pci_lat_timer < 64) { tp->pci_lat_timer = 64; @@ -10324,6 +10354,44 @@ static char * __devinit tg3_phy_string(struct tg3 *tp) }; } +static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) +{ + if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { + strcpy(str, "PCI Express"); + return str; + } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) { + u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f; + + strcpy(str, "PCIX:"); + + if ((clock_ctrl == 7) || + ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) == + GRC_MISC_CFG_BOARD_ID_5704CIOBE)) + strcat(str, "133MHz"); + else if (clock_ctrl == 0) + strcat(str, "33MHz"); + else if (clock_ctrl == 2) + strcat(str, "50MHz"); + else if (clock_ctrl == 4) + strcat(str, "66MHz"); + else if (clock_ctrl == 6) + strcat(str, "100MHz"); + else if (clock_ctrl == 7) + strcat(str, "133MHz"); + } else { + strcpy(str, "PCI:"); + if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) + strcat(str, "66MHz"); + else + strcat(str, "33MHz"); + } + if (tp->tg3_flags & TG3_FLAG_PCI_32BIT) + strcat(str, ":32-bit"); + else + strcat(str, ":64-bit"); + return str; +} + static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp) { struct pci_dev *peer; @@ -10386,6 +10454,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, struct net_device *dev; struct tg3 *tp; int i, err, pci_using_dac, pm_cap; + char str[40]; if (tg3_version_printed++ == 0) printk(KERN_INFO "%s", version); @@ -10631,16 +10700,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, pci_set_drvdata(pdev, dev); - printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ", + printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ", dev->name, tp->board_part_number, tp->pci_chip_rev_id, tg3_phy_string(tp), - ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""), - ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ? - ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") : - ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")), - ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"), + tg3_bus_string(tp, str), (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000"); for (i = 0; i < 6; i++) diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index c184b773e585..2e733c60bfa4 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -2246,6 +2246,7 @@ struct tg3 { (X) == PHY_ID_BCM5411 || (X) == PHY_ID_BCM5701 || \ (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \ (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \ + (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5780 || \ (X) == PHY_ID_BCM8002) struct tg3_hw_stats *hw_stats; diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c index e7b001017b9a..9f491563944e 100644 --- a/drivers/net/tokenring/ibmtr.c +++ b/drivers/net/tokenring/ibmtr.c @@ -318,7 +318,7 @@ static void ibmtr_cleanup_card(struct net_device *dev) if (dev->base_addr) { outb(0,dev->base_addr+ADAPTRESET); - schedule_timeout(TR_RST_TIME); /* wait 50ms */ + schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */ outb(0,dev->base_addr+ADAPTRESETREL); } @@ -531,7 +531,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr) if (!time_after(jiffies, timeout)) continue; DPRINTK( "Hardware timeout during initialization.\n"); iounmap(t_mmio); - kfree(ti); return -ENODEV; } ti->sram_phys = @@ -645,7 +644,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr) DPRINTK("Unknown shared ram paging info %01X\n", ti->shared_ram_paging); iounmap(t_mmio); - kfree(ti); return -ENODEV; break; } /*end switch shared_ram_paging */ @@ -675,7 +673,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr) "driver limit (%05x), adapter not started.\n", chk_base, ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE); iounmap(t_mmio); - kfree(ti); return -ENODEV; } else { /* seems cool, record what we have figured out */ ti->sram_base = new_base >> 12; @@ -690,7 +687,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr) DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n", irq); iounmap(t_mmio); - kfree(ti); return -ENODEV; } /*?? Now, allocate some of the PIO PORTs for this driver.. */ @@ -699,7 +695,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr) DPRINTK("Could not grab PIO range. Halting driver.\n"); free_irq(dev->irq, dev); iounmap(t_mmio); - kfree(ti); return -EBUSY; } @@ -859,8 +854,7 @@ static int tok_init_card(struct net_device *dev) writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN); outb(0, PIOaddr + ADAPTRESET); - current->state=TASK_UNINTERRUPTIBLE; - schedule_timeout(TR_RST_TIME); /* wait 50ms */ + schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */ outb(0, PIOaddr + ADAPTRESETREL); #ifdef ENABLE_PAGING @@ -908,8 +902,8 @@ static int tok_open(struct net_device *dev) DPRINTK("Adapter is up and running\n"); return 0; } - current->state=TASK_INTERRUPTIBLE; - i=schedule_timeout(TR_RETRY_INTERVAL); /* wait 30 seconds */ + i=schedule_timeout_interruptible(TR_RETRY_INTERVAL); + /* wait 30 seconds */ if(i!=0) break; /*prob. a signal, like the i>24*HZ case above */ } outb(0, dev->base_addr + ADAPTRESET);/* kill pending interrupts*/ diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c index 9e7923192a49..05477d24fd49 100644 --- a/drivers/net/tokenring/olympic.c +++ b/drivers/net/tokenring/olympic.c @@ -1101,7 +1101,7 @@ static int olympic_close(struct net_device *dev) while(olympic_priv->srb_queued) { - t = schedule_timeout(60*HZ); + t = schedule_timeout_interruptible(60*HZ); if(signal_pending(current)) { printk(KERN_WARNING "%s: SRB timed out.\n",dev->name); diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c index 2e39bf1f7462..c1925590a0e1 100644 --- a/drivers/net/tokenring/tms380tr.c +++ b/drivers/net/tokenring/tms380tr.c @@ -1243,8 +1243,7 @@ void tms380tr_wait(unsigned long time) tmp = jiffies + time/(1000000/HZ); do { - current->state = TASK_INTERRUPTIBLE; - tmp = schedule_timeout(tmp); + tmp = schedule_timeout_interruptible(tmp); } while(time_after(tmp, jiffies)); #else udelay(time); diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c index 5db694c4eb02..683f14b01c06 100644 --- a/drivers/net/tulip/21142.c +++ b/drivers/net/tulip/21142.c @@ -172,7 +172,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5) int i; for (i = 0; i < tp->mtable->leafcount; i++) if (tp->mtable->mleaf[i].media == dev->if_port) { - int startup = ! ((tp->chip_id == DC21143 && tp->revision == 65)); + int startup = ! ((tp->chip_id == DC21143 && (tp->revision == 48 || tp->revision == 65))); tp->cur_index = i; tulip_select_media(dev, startup); setup_done = 1; diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index a22d00198e4d..6b8eee8f7bfd 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c @@ -1787,10 +1787,15 @@ static void __init de21041_get_srom_info (struct de_private *de) /* DEC now has a specification but early board makers just put the address in the first EEPROM locations. */ /* This does memcmp(eedata, eedata+16, 8) */ + +#ifndef CONFIG_MIPS_COBALT + for (i = 0; i < 8; i ++) if (ee_data[i] != ee_data[16+i]) sa_offset = 20; +#endif + /* store MAC address */ for (i = 0; i < 6; i ++) de->dev->dev_addr[i] = ee_data[i + sa_offset]; diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index ecfa6f8805ce..4c76cb794bfb 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c @@ -419,10 +419,9 @@ typhoon_reset(void __iomem *ioaddr, int wait_type) TYPHOON_STATUS_WAITING_FOR_HOST) goto out; - if(wait_type == WaitSleep) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(1); - } else + if(wait_type == WaitSleep) + schedule_timeout_uninterruptible(1); + else udelay(TYPHOON_UDELAY); } diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index fc7738ffbfff..241871589283 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c @@ -490,6 +490,8 @@ struct rhine_private { u8 tx_thresh, rx_thresh; struct mii_if_info mii_if; + struct work_struct tx_timeout_task; + struct work_struct check_media_task; void __iomem *base; }; @@ -497,6 +499,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *dev, int phy_id, int location, int value); static int rhine_open(struct net_device *dev); static void rhine_tx_timeout(struct net_device *dev); +static void rhine_tx_timeout_task(struct net_device *dev); +static void rhine_check_media_task(struct net_device *dev); static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); static void rhine_tx(struct net_device *dev); @@ -814,8 +818,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, for (i = 0; i < 6; i++) dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); - if (!is_valid_ether_addr(dev->dev_addr)) { + if (!is_valid_ether_addr(dev->perm_addr)) { rc = -EIO; printk(KERN_ERR "Invalid MAC address\n"); goto err_out_unmap; @@ -850,6 +855,12 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, if (rp->quirks & rqRhineI) dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; + INIT_WORK(&rp->tx_timeout_task, + (void (*)(void *))rhine_tx_timeout_task, dev); + + INIT_WORK(&rp->check_media_task, + (void (*)(void *))rhine_check_media_task, dev); + /* dev->name not defined before register_netdev()! */ rc = register_netdev(dev); if (rc) @@ -1076,6 +1087,11 @@ static void rhine_check_media(struct net_device *dev, unsigned int init_media) ioaddr + ChipCmd1); } +static void rhine_check_media_task(struct net_device *dev) +{ + rhine_check_media(dev, 0); +} + static void init_registers(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); @@ -1129,8 +1145,8 @@ static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks) if (quirks & rqRhineI) { iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR - /* Can be called from ISR. Evil. */ - mdelay(1); + /* Do not call from ISR! */ + msleep(1); /* 0x80 must be set immediately before turning it off */ iowrite8(0x80, ioaddr + MIICmd); @@ -1218,6 +1234,16 @@ static int rhine_open(struct net_device *dev) } static void rhine_tx_timeout(struct net_device *dev) +{ + struct rhine_private *rp = netdev_priv(dev); + + /* + * Move bulk of work outside of interrupt context + */ + schedule_work(&rp->tx_timeout_task); +} + +static void rhine_tx_timeout_task(struct net_device *dev) { struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; @@ -1625,7 +1651,7 @@ static void rhine_error(struct net_device *dev, int intr_status) spin_lock(&rp->lock); if (intr_status & IntrLinkChange) - rhine_check_media(dev, 0); + schedule_work(&rp->check_media_task); if (intr_status & IntrStatsMax) { rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); @@ -1829,6 +1855,7 @@ static struct ethtool_ops netdev_ethtool_ops = { .set_wol = rhine_set_wol, .get_sg = ethtool_op_get_sg, .get_tx_csum = ethtool_op_get_tx_csum, + .get_perm_addr = ethtool_op_get_perm_addr, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) @@ -1872,6 +1899,9 @@ static int rhine_close(struct net_device *dev) spin_unlock_irq(&rp->lock); free_irq(rp->pdev->irq, dev); + + flush_scheduled_work(); + free_rbufs(dev); free_tbufs(dev); free_ring(dev); diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 7ff814fd65d0..ae9e897c255e 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -1617,8 +1617,7 @@ static int get_wait_data(struct cosa_data *cosa) return r; } /* sleep if not ready to read */ - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(1); + schedule_timeout_interruptible(1); } printk(KERN_INFO "cosa: timeout in get_wait_data (status 0x%x)\n", cosa_getstatus(cosa)); @@ -1644,8 +1643,7 @@ static int put_wait_data(struct cosa_data *cosa, int data) } #if 0 /* sleep if not ready to read */ - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); + schedule_timeout_interruptible(1); #endif } printk(KERN_INFO "cosa%d: timeout in put_wait_data (status 0x%x)\n", diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c index 9e56fc346ba4..e6d005726aad 100644 --- a/drivers/net/wan/cycx_drv.c +++ b/drivers/net/wan/cycx_drv.c @@ -109,7 +109,7 @@ static long cycx_2x_irq_options[] = { 7, 3, 5, 9, 10, 11, 12, 15 }; * < 0 error. * Context: process */ -int __init cycx_drv_init(void) +static int __init cycx_drv_init(void) { printk(KERN_INFO "%s v%u.%u %s\n", fullname, MOD_VERSION, MOD_RELEASE, copyright); @@ -119,7 +119,7 @@ int __init cycx_drv_init(void) /* Module 'remove' entry point. * o release all remaining system resources */ -void cycx_drv_cleanup(void) +static void cycx_drv_cleanup(void) { } @@ -184,8 +184,7 @@ int cycx_down(struct cycx_hw *hw) } /* Enable interrupt generation. */ -EXPORT_SYMBOL(cycx_inten); -void cycx_inten(struct cycx_hw *hw) +static void cycx_inten(struct cycx_hw *hw) { writeb(0, hw->dpmbase); } diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c index 7b48064364dc..430b1f630fb4 100644 --- a/drivers/net/wan/cycx_main.c +++ b/drivers/net/wan/cycx_main.c @@ -103,7 +103,7 @@ static struct cycx_device *cycx_card_array; /* adapter data space */ * < 0 error. * Context: process */ -int __init cycx_init(void) +static int __init cycx_init(void) { int cnt, err = -ENOMEM; diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c index 02d57c0b4243..a631d1c2fa14 100644 --- a/drivers/net/wan/cycx_x25.c +++ b/drivers/net/wan/cycx_x25.c @@ -78,6 +78,7 @@ #define CYCLOMX_X25_DEBUG 1 +#include /* isdigit() */ #include /* return codes */ #include /* ARPHRD_HWX25 */ #include /* printk(), and other useful stuff */ @@ -418,7 +419,7 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev, /* Set channel timeouts (default if not specified) */ chan->idle_tmout = conf->idle_timeout ? conf->idle_timeout : 90; - } else if (is_digit(conf->addr[0])) { /* PVC */ + } else if (isdigit(conf->addr[0])) { /* PVC */ s16 lcn = dec_to_uint(conf->addr, 0); if (lcn >= card->u.x.lo_pvc && lcn <= card->u.x.hi_pvc) @@ -1531,7 +1532,7 @@ static unsigned dec_to_uint(u8 *str, int len) if (!len) len = strlen(str); - for (; len && is_digit(*str); ++str, --len) + for (; len && isdigit(*str); ++str, --len) val = (val * 10) + (*str - (unsigned) '0'); return val; diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 520a77a798e2..2f61a47b4716 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -446,8 +446,8 @@ static inline unsigned int dscc4_tx_quiescent(struct dscc4_dev_priv *dpriv, return readl(dpriv->base_addr + CH0FTDA + dpriv->dev_id*4) == dpriv->ltda; } -int state_check(u32 state, struct dscc4_dev_priv *dpriv, struct net_device *dev, - const char *msg) +static int state_check(u32 state, struct dscc4_dev_priv *dpriv, + struct net_device *dev, const char *msg) { int ret = 0; @@ -466,8 +466,9 @@ int state_check(u32 state, struct dscc4_dev_priv *dpriv, struct net_device *dev, return ret; } -void dscc4_tx_print(struct net_device *dev, struct dscc4_dev_priv *dpriv, - char *msg) +static void dscc4_tx_print(struct net_device *dev, + struct dscc4_dev_priv *dpriv, + char *msg) { printk(KERN_DEBUG "%s: tx_current=%02d tx_dirty=%02d (%s)\n", dev->name, dpriv->tx_current, dpriv->tx_dirty, msg); @@ -507,7 +508,8 @@ static void dscc4_release_ring(struct dscc4_dev_priv *dpriv) } } -inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv, struct net_device *dev) +static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv, + struct net_device *dev) { unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE; struct RxFD *rx_fd = dpriv->rx_fd + dirty; @@ -542,8 +544,7 @@ static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv, msg, i); goto done; } - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(10); + schedule_timeout_uninterruptible(10); rmb(); } while (++i > 0); printk(KERN_ERR "%s: %s timeout\n", dev->name, msg); @@ -588,8 +589,7 @@ static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv) (dpriv->iqtx[cur] & Xpr)) break; smp_rmb(); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(10); + schedule_timeout_uninterruptible(10); } while (++i > 0); return (i >= 0 ) ? i : -EAGAIN; @@ -1035,8 +1035,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr) /* Flush posted writes */ readl(ioaddr + GSTAR); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(10); + schedule_timeout_uninterruptible(10); for (i = 0; i < 16; i++) pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]); @@ -1894,7 +1893,7 @@ try: * It failed and locked solid. Thus the introduction of a dummy skb. * Problem is acknowledged in errata sheet DS5. Joy :o/ */ -struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv) +static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv) { struct sk_buff *skb; diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 2c83cca34b86..7981a2c7906e 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -74,11 +74,11 @@ MODULE_LICENSE("GPL"); /* * Modules parameters and associated varaibles */ -int fst_txq_low = FST_LOW_WATER_MARK; -int fst_txq_high = FST_HIGH_WATER_MARK; -int fst_max_reads = 7; -int fst_excluded_cards = 0; -int fst_excluded_list[FST_MAX_CARDS]; +static int fst_txq_low = FST_LOW_WATER_MARK; +static int fst_txq_high = FST_HIGH_WATER_MARK; +static int fst_max_reads = 7; +static int fst_excluded_cards = 0; +static int fst_excluded_list[FST_MAX_CARDS]; module_param(fst_txq_low, int, 0); module_param(fst_txq_high, int, 0); @@ -572,13 +572,13 @@ static void do_bottom_half_rx(struct fst_card_info *card); static void fst_process_tx_work_q(unsigned long work_q); static void fst_process_int_work_q(unsigned long work_q); -DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q, 0); -DECLARE_TASKLET(fst_int_task, fst_process_int_work_q, 0); +static DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q, 0); +static DECLARE_TASKLET(fst_int_task, fst_process_int_work_q, 0); -struct fst_card_info *fst_card_array[FST_MAX_CARDS]; -spinlock_t fst_work_q_lock; -u64 fst_work_txq; -u64 fst_work_intq; +static struct fst_card_info *fst_card_array[FST_MAX_CARDS]; +static spinlock_t fst_work_q_lock; +static u64 fst_work_txq; +static u64 fst_work_intq; static void fst_q_work_item(u64 * queue, int card_index) @@ -980,8 +980,7 @@ fst_issue_cmd(struct fst_port_info *port, unsigned short cmd) /* Wait for any previous command to complete */ while (mbval > NAK) { spin_unlock_irqrestore(&card->card_lock, flags); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(1); + schedule_timeout_uninterruptible(1); spin_lock_irqsave(&card->card_lock, flags); if (++safety > 2000) { @@ -1498,7 +1497,7 @@ do_bottom_half_rx(struct fst_card_info *card) * The interrupt service routine * Dev_id is our fst_card_info pointer */ -irqreturn_t +static irqreturn_t fst_intr(int irq, void *dev_id, struct pt_regs *regs) { struct fst_card_info *card; diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index a5d6891c9d4c..e1601d35dced 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -330,7 +330,7 @@ static int pvc_close(struct net_device *dev) -int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { pvc_device *pvc = dev_to_pvc(dev); fr_proto_pvc_info info; diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c index 9dccd9546a17..3b94352b0d03 100644 --- a/drivers/net/wan/lmc/lmc_debug.c +++ b/drivers/net/wan/lmc/lmc_debug.c @@ -8,10 +8,10 @@ /* * Prints out len, max to 80 octets using printk, 20 per line */ -void lmcConsoleLog(char *type, unsigned char *ucData, int iLen) -{ #ifdef DEBUG #ifdef LMC_PACKET_LOG +void lmcConsoleLog(char *type, unsigned char *ucData, int iLen) +{ int iNewLine = 1; char str[80], *pstr; @@ -43,26 +43,24 @@ void lmcConsoleLog(char *type, unsigned char *ucData, int iLen) } sprintf(pstr, "\n"); printk(str); -#endif -#endif } +#endif +#endif #ifdef DEBUG u_int32_t lmcEventLogIndex = 0; u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; -#endif void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3) { -#ifdef DEBUG lmcEventLogBuf[lmcEventLogIndex++] = EventNum; lmcEventLogBuf[lmcEventLogIndex++] = arg2; lmcEventLogBuf[lmcEventLogIndex++] = arg3; lmcEventLogBuf[lmcEventLogIndex++] = jiffies; lmcEventLogIndex &= (LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS) - 1; -#endif } +#endif /* DEBUG */ void lmc_trace(struct net_device *dev, char *msg){ #ifdef LMC_TRACE diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c index f55ce76b00ed..af8b55fdd9d9 100644 --- a/drivers/net/wan/lmc/lmc_media.c +++ b/drivers/net/wan/lmc/lmc_media.c @@ -47,14 +47,6 @@ * of the GNU General Public License version 2, incorporated herein by reference. */ -/* - * For lack of a better place, put the SSI cable stuff here. - */ -char *lmc_t1_cables[] = { - "V.10/RS423", "EIA530A", "reserved", "X.21", "V.35", - "EIA449/EIA530/V.36", "V.28/EIA232", "none", NULL -}; - /* * protocol independent method. */ diff --git a/drivers/net/wan/pc300.h b/drivers/net/wan/pc300.h index 73401b0f0151..2024b26b99e6 100644 --- a/drivers/net/wan/pc300.h +++ b/drivers/net/wan/pc300.h @@ -472,24 +472,8 @@ enum pc300_loopback_cmds { #ifdef __KERNEL__ /* Function Prototypes */ -int dma_buf_write(pc300_t *, int, ucchar *, int); -int dma_buf_read(pc300_t *, int, struct sk_buff *); void tx_dma_start(pc300_t *, int); -void rx_dma_start(pc300_t *, int); -void tx_dma_stop(pc300_t *, int); -void rx_dma_stop(pc300_t *, int); -int cpc_queue_xmit(struct sk_buff *, struct net_device *); -void cpc_net_rx(struct net_device *); -void cpc_sca_status(pc300_t *, int); -int cpc_change_mtu(struct net_device *, int); -int cpc_ioctl(struct net_device *, struct ifreq *, int); -int ch_config(pc300dev_t *); -int rx_config(pc300dev_t *); -int tx_config(pc300dev_t *); -void cpc_opench(pc300dev_t *); -void cpc_closech(pc300dev_t *); int cpc_open(struct net_device *dev); -int cpc_close(struct net_device *dev); int cpc_set_media(hdlc_device *, int); #endif /* __KERNEL__ */ diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c index 3e7753b10717..a3e65d1bc19b 100644 --- a/drivers/net/wan/pc300_drv.c +++ b/drivers/net/wan/pc300_drv.c @@ -291,6 +291,7 @@ static uclong detect_ram(pc300_t *); static void plx_init(pc300_t *); static void cpc_trace(struct net_device *, struct sk_buff *, char); static int cpc_attach(struct net_device *, unsigned short, unsigned short); +static int cpc_close(struct net_device *dev); #ifdef CONFIG_PC300_MLPPP void cpc_tty_init(pc300dev_t * dev); @@ -437,7 +438,7 @@ static void rx_dma_buf_check(pc300_t * card, int ch) printk("\n"); } -int dma_get_rx_frame_size(pc300_t * card, int ch) +static int dma_get_rx_frame_size(pc300_t * card, int ch) { volatile pcsca_bd_t __iomem *ptdescr; ucshort first_bd = card->chan[ch].rx_first_bd; @@ -462,7 +463,7 @@ int dma_get_rx_frame_size(pc300_t * card, int ch) * dma_buf_write: writes a frame to the Tx DMA buffers * NOTE: this function writes one frame at a time. */ -int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len) +static int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len) { int i, nchar; volatile pcsca_bd_t __iomem *ptdescr; @@ -503,7 +504,7 @@ int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len) * dma_buf_read: reads a frame from the Rx DMA buffers * NOTE: this function reads one frame at a time. */ -int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb) +static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb) { int nchar; pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; @@ -560,7 +561,7 @@ int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb) return (rcvd); } -void tx_dma_stop(pc300_t * card, int ch) +static void tx_dma_stop(pc300_t * card, int ch) { void __iomem *scabase = card->hw.scabase; ucchar drr_ena_bit = 1 << (5 + 2 * ch); @@ -571,7 +572,7 @@ void tx_dma_stop(pc300_t * card, int ch) cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit); } -void rx_dma_stop(pc300_t * card, int ch) +static void rx_dma_stop(pc300_t * card, int ch) { void __iomem *scabase = card->hw.scabase; ucchar drr_ena_bit = 1 << (4 + 2 * ch); @@ -582,7 +583,7 @@ void rx_dma_stop(pc300_t * card, int ch) cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit); } -void rx_dma_start(pc300_t * card, int ch) +static void rx_dma_start(pc300_t * card, int ch) { void __iomem *scabase = card->hw.scabase; pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; @@ -607,7 +608,7 @@ void rx_dma_start(pc300_t * card, int ch) /*************************/ /*** FALC Routines ***/ /*************************/ -void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd) +static void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd) { void __iomem *falcbase = card->hw.falcbase; unsigned long i = 0; @@ -622,7 +623,7 @@ void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd) cpc_writeb(falcbase + F_REG(CMDR, ch), cmd); } -void falc_intr_enable(pc300_t * card, int ch) +static void falc_intr_enable(pc300_t * card, int ch) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; @@ -672,7 +673,7 @@ void falc_intr_enable(pc300_t * card, int ch) } } -void falc_open_timeslot(pc300_t * card, int ch, int timeslot) +static void falc_open_timeslot(pc300_t * card, int ch, int timeslot) { void __iomem *falcbase = card->hw.falcbase; ucchar tshf = card->chan[ch].falc.offset; @@ -688,7 +689,7 @@ void falc_open_timeslot(pc300_t * card, int ch, int timeslot) (0x80 >> (timeslot & 0x07))); } -void falc_close_timeslot(pc300_t * card, int ch, int timeslot) +static void falc_close_timeslot(pc300_t * card, int ch, int timeslot) { void __iomem *falcbase = card->hw.falcbase; ucchar tshf = card->chan[ch].falc.offset; @@ -704,7 +705,7 @@ void falc_close_timeslot(pc300_t * card, int ch, int timeslot) ~(0x80 >> (timeslot & 0x07))); } -void falc_close_all_timeslots(pc300_t * card, int ch) +static void falc_close_all_timeslots(pc300_t * card, int ch) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; @@ -726,7 +727,7 @@ void falc_close_all_timeslots(pc300_t * card, int ch) } } -void falc_open_all_timeslots(pc300_t * card, int ch) +static void falc_open_all_timeslots(pc300_t * card, int ch) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; @@ -758,7 +759,7 @@ void falc_open_all_timeslots(pc300_t * card, int ch) } } -void falc_init_timeslot(pc300_t * card, int ch) +static void falc_init_timeslot(pc300_t * card, int ch) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; @@ -776,7 +777,7 @@ void falc_init_timeslot(pc300_t * card, int ch) } } -void falc_enable_comm(pc300_t * card, int ch) +static void falc_enable_comm(pc300_t * card, int ch) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; falc_t *pfalc = (falc_t *) & chan->falc; @@ -792,7 +793,7 @@ void falc_enable_comm(pc300_t * card, int ch) ~((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch))); } -void falc_disable_comm(pc300_t * card, int ch) +static void falc_disable_comm(pc300_t * card, int ch) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; falc_t *pfalc = (falc_t *) & chan->falc; @@ -806,7 +807,7 @@ void falc_disable_comm(pc300_t * card, int ch) ((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch))); } -void falc_init_t1(pc300_t * card, int ch) +static void falc_init_t1(pc300_t * card, int ch) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; @@ -975,7 +976,7 @@ void falc_init_t1(pc300_t * card, int ch) falc_close_all_timeslots(card, ch); } -void falc_init_e1(pc300_t * card, int ch) +static void falc_init_e1(pc300_t * card, int ch) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; @@ -1155,7 +1156,7 @@ void falc_init_e1(pc300_t * card, int ch) falc_close_all_timeslots(card, ch); } -void falc_init_hdlc(pc300_t * card, int ch) +static void falc_init_hdlc(pc300_t * card, int ch) { void __iomem *falcbase = card->hw.falcbase; pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; @@ -1181,7 +1182,7 @@ void falc_init_hdlc(pc300_t * card, int ch) falc_intr_enable(card, ch); } -void te_config(pc300_t * card, int ch) +static void te_config(pc300_t * card, int ch) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; @@ -1241,7 +1242,7 @@ void te_config(pc300_t * card, int ch) CPC_UNLOCK(card, flags); } -void falc_check_status(pc300_t * card, int ch, unsigned char frs0) +static void falc_check_status(pc300_t * card, int ch, unsigned char frs0) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; @@ -1397,7 +1398,7 @@ void falc_check_status(pc300_t * card, int ch, unsigned char frs0) } } -void falc_update_stats(pc300_t * card, int ch) +static void falc_update_stats(pc300_t * card, int ch) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; @@ -1450,7 +1451,7 @@ void falc_update_stats(pc300_t * card, int ch) * the synchronizer and then sent to the system interface. *---------------------------------------------------------------------------- */ -void falc_remote_loop(pc300_t * card, int ch, int loop_on) +static void falc_remote_loop(pc300_t * card, int ch, int loop_on) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; @@ -1495,7 +1496,7 @@ void falc_remote_loop(pc300_t * card, int ch, int loop_on) * coding must be identical. *---------------------------------------------------------------------------- */ -void falc_local_loop(pc300_t * card, int ch, int loop_on) +static void falc_local_loop(pc300_t * card, int ch, int loop_on) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; falc_t *pfalc = (falc_t *) & chan->falc; @@ -1522,7 +1523,7 @@ void falc_local_loop(pc300_t * card, int ch, int loop_on) * looped. They are originated by the FALC-LH transmitter. *---------------------------------------------------------------------------- */ -void falc_payload_loop(pc300_t * card, int ch, int loop_on) +static void falc_payload_loop(pc300_t * card, int ch, int loop_on) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; @@ -1576,7 +1577,7 @@ void falc_payload_loop(pc300_t * card, int ch, int loop_on) * Description: Turns XLU bit off in the proper register *---------------------------------------------------------------------------- */ -void turn_off_xlu(pc300_t * card, int ch) +static void turn_off_xlu(pc300_t * card, int ch) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; @@ -1597,7 +1598,7 @@ void turn_off_xlu(pc300_t * card, int ch) * Description: Turns XLD bit off in the proper register *---------------------------------------------------------------------------- */ -void turn_off_xld(pc300_t * card, int ch) +static void turn_off_xld(pc300_t * card, int ch) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; @@ -1619,7 +1620,7 @@ void turn_off_xld(pc300_t * card, int ch) * to generate a LOOP activation code over a T1/E1 line. *---------------------------------------------------------------------------- */ -void falc_generate_loop_up_code(pc300_t * card, int ch) +static void falc_generate_loop_up_code(pc300_t * card, int ch) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; @@ -1652,7 +1653,7 @@ void falc_generate_loop_up_code(pc300_t * card, int ch) * to generate a LOOP deactivation code over a T1/E1 line. *---------------------------------------------------------------------------- */ -void falc_generate_loop_down_code(pc300_t * card, int ch) +static void falc_generate_loop_down_code(pc300_t * card, int ch) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; @@ -1682,7 +1683,7 @@ void falc_generate_loop_down_code(pc300_t * card, int ch) * it on the reception side. *---------------------------------------------------------------------------- */ -void falc_pattern_test(pc300_t * card, int ch, unsigned int activate) +static void falc_pattern_test(pc300_t * card, int ch, unsigned int activate) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; @@ -1729,7 +1730,7 @@ void falc_pattern_test(pc300_t * card, int ch, unsigned int activate) * Description: This routine returns the bit error counter value *---------------------------------------------------------------------------- */ -ucshort falc_pattern_test_error(pc300_t * card, int ch) +static ucshort falc_pattern_test_error(pc300_t * card, int ch) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; falc_t *pfalc = (falc_t *) & chan->falc; @@ -1769,7 +1770,7 @@ cpc_trace(struct net_device *dev, struct sk_buff *skb_main, char rx_tx) netif_rx(skb); } -void cpc_tx_timeout(struct net_device *dev) +static void cpc_tx_timeout(struct net_device *dev) { pc300dev_t *d = (pc300dev_t *) dev->priv; pc300ch_t *chan = (pc300ch_t *) d->chan; @@ -1797,7 +1798,7 @@ void cpc_tx_timeout(struct net_device *dev) netif_wake_queue(dev); } -int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev) +static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev) { pc300dev_t *d = (pc300dev_t *) dev->priv; pc300ch_t *chan = (pc300ch_t *) d->chan; @@ -1880,7 +1881,7 @@ int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -void cpc_net_rx(struct net_device *dev) +static void cpc_net_rx(struct net_device *dev) { pc300dev_t *d = (pc300dev_t *) dev->priv; pc300ch_t *chan = (pc300ch_t *) d->chan; @@ -2403,7 +2404,7 @@ static irqreturn_t cpc_intr(int irq, void *dev_id, struct pt_regs *regs) return IRQ_HANDLED; } -void cpc_sca_status(pc300_t * card, int ch) +static void cpc_sca_status(pc300_t * card, int ch) { ucchar ilar; void __iomem *scabase = card->hw.scabase; @@ -2495,7 +2496,7 @@ void cpc_sca_status(pc300_t * card, int ch) } } -void cpc_falc_status(pc300_t * card, int ch) +static void cpc_falc_status(pc300_t * card, int ch) { pc300ch_t *chan = &card->chan[ch]; falc_t *pfalc = (falc_t *) & chan->falc; @@ -2523,7 +2524,7 @@ void cpc_falc_status(pc300_t * card, int ch) CPC_UNLOCK(card, flags); } -int cpc_change_mtu(struct net_device *dev, int new_mtu) +static int cpc_change_mtu(struct net_device *dev, int new_mtu) { if ((new_mtu < 128) || (new_mtu > PC300_DEF_MTU)) return -EINVAL; @@ -2531,7 +2532,7 @@ int cpc_change_mtu(struct net_device *dev, int new_mtu) return 0; } -int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { hdlc_device *hdlc = dev_to_hdlc(dev); pc300dev_t *d = (pc300dev_t *) dev->priv; @@ -2856,7 +2857,7 @@ static int clock_rate_calc(uclong rate, uclong clock, int *br_io) } } -int ch_config(pc300dev_t * d) +static int ch_config(pc300dev_t * d) { pc300ch_t *chan = (pc300ch_t *) d->chan; pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; @@ -3004,7 +3005,7 @@ int ch_config(pc300dev_t * d) return 0; } -int rx_config(pc300dev_t * d) +static int rx_config(pc300dev_t * d) { pc300ch_t *chan = (pc300ch_t *) d->chan; pc300_t *card = (pc300_t *) chan->card; @@ -3035,7 +3036,7 @@ int rx_config(pc300dev_t * d) return 0; } -int tx_config(pc300dev_t * d) +static int tx_config(pc300dev_t * d) { pc300ch_t *chan = (pc300ch_t *) d->chan; pc300_t *card = (pc300_t *) chan->card; @@ -3098,7 +3099,7 @@ static int cpc_attach(struct net_device *dev, unsigned short encoding, return 0; } -void cpc_opench(pc300dev_t * d) +static void cpc_opench(pc300dev_t * d) { pc300ch_t *chan = (pc300ch_t *) d->chan; pc300_t *card = (pc300_t *) chan->card; @@ -3116,7 +3117,7 @@ void cpc_opench(pc300dev_t * d) cpc_readb(scabase + M_REG(CTL, ch)) & ~(CTL_RTS | CTL_DTR)); } -void cpc_closech(pc300dev_t * d) +static void cpc_closech(pc300dev_t * d) { pc300ch_t *chan = (pc300ch_t *) d->chan; pc300_t *card = (pc300_t *) chan->card; @@ -3173,7 +3174,7 @@ int cpc_open(struct net_device *dev) return 0; } -int cpc_close(struct net_device *dev) +static int cpc_close(struct net_device *dev) { hdlc_device *hdlc = dev_to_hdlc(dev); pc300dev_t *d = (pc300dev_t *) dev->priv; diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c index 8454bf6caaa7..52f26b9c69d2 100644 --- a/drivers/net/wan/pc300_tty.c +++ b/drivers/net/wan/pc300_tty.c @@ -112,10 +112,10 @@ typedef struct _st_cpc_tty_area { static struct tty_driver serial_drv; /* local variables */ -st_cpc_tty_area cpc_tty_area[CPC_TTY_NPORTS]; +static st_cpc_tty_area cpc_tty_area[CPC_TTY_NPORTS]; -int cpc_tty_cnt=0; /* number of intrfaces configured with MLPPP */ -int cpc_tty_unreg_flag = 0; +static int cpc_tty_cnt = 0; /* number of intrfaces configured with MLPPP */ +static int cpc_tty_unreg_flag = 0; /* TTY functions prototype */ static int cpc_tty_open(struct tty_struct *tty, struct file *flip); @@ -132,9 +132,9 @@ static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx); static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char); static void cpc_tty_signal_on(pc300dev_t *pc300dev, unsigned char); -int pc300_tiocmset(struct tty_struct *, struct file *, - unsigned int, unsigned int); -int pc300_tiocmget(struct tty_struct *, struct file *); +static int pc300_tiocmset(struct tty_struct *, struct file *, + unsigned int, unsigned int); +static int pc300_tiocmget(struct tty_struct *, struct file *); /* functions called by PC300 driver */ void cpc_tty_init(pc300dev_t *dev); @@ -538,8 +538,8 @@ static int cpc_tty_chars_in_buffer(struct tty_struct *tty) return(0); } -int pc300_tiocmset(struct tty_struct *tty, struct file *file, - unsigned int set, unsigned int clear) +static int pc300_tiocmset(struct tty_struct *tty, struct file *file, + unsigned int set, unsigned int clear) { st_cpc_tty_area *cpc_tty; @@ -565,7 +565,7 @@ int pc300_tiocmset(struct tty_struct *tty, struct file *file, return 0; } -int pc300_tiocmget(struct tty_struct *tty, struct file *file) +static int pc300_tiocmget(struct tty_struct *tty, struct file *file) { unsigned int result; unsigned char status; diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c index 3ac9a45b20fa..036adc4f8ba7 100644 --- a/drivers/net/wan/sdla.c +++ b/drivers/net/wan/sdla.c @@ -182,7 +182,7 @@ static char sdla_byte(struct net_device *dev, int addr) return(byte); } -void sdla_stop(struct net_device *dev) +static void sdla_stop(struct net_device *dev) { struct frad_local *flp; @@ -209,7 +209,7 @@ void sdla_stop(struct net_device *dev) } } -void sdla_start(struct net_device *dev) +static void sdla_start(struct net_device *dev) { struct frad_local *flp; @@ -247,7 +247,7 @@ void sdla_start(struct net_device *dev) * ***************************************************/ -int sdla_z80_poll(struct net_device *dev, int z80_addr, int jiffs, char resp1, char resp2) +static int sdla_z80_poll(struct net_device *dev, int z80_addr, int jiffs, char resp1, char resp2) { unsigned long start, done, now; char resp, *temp; @@ -505,7 +505,7 @@ static int sdla_cmd(struct net_device *dev, int cmd, short dlci, short flags, static int sdla_reconfig(struct net_device *dev); -int sdla_activate(struct net_device *slave, struct net_device *master) +static int sdla_activate(struct net_device *slave, struct net_device *master) { struct frad_local *flp; int i; @@ -527,7 +527,7 @@ int sdla_activate(struct net_device *slave, struct net_device *master) return(0); } -int sdla_deactivate(struct net_device *slave, struct net_device *master) +static int sdla_deactivate(struct net_device *slave, struct net_device *master) { struct frad_local *flp; int i; @@ -549,7 +549,7 @@ int sdla_deactivate(struct net_device *slave, struct net_device *master) return(0); } -int sdla_assoc(struct net_device *slave, struct net_device *master) +static int sdla_assoc(struct net_device *slave, struct net_device *master) { struct frad_local *flp; int i; @@ -585,7 +585,7 @@ int sdla_assoc(struct net_device *slave, struct net_device *master) return(0); } -int sdla_deassoc(struct net_device *slave, struct net_device *master) +static int sdla_deassoc(struct net_device *slave, struct net_device *master) { struct frad_local *flp; int i; @@ -613,7 +613,7 @@ int sdla_deassoc(struct net_device *slave, struct net_device *master) return(0); } -int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get) +static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get) { struct frad_local *flp; struct dlci_local *dlp; @@ -1324,7 +1324,7 @@ NOTE: This is rather a useless action right now, as the return(0); } -int sdla_change_mtu(struct net_device *dev, int new_mtu) +static int sdla_change_mtu(struct net_device *dev, int new_mtu) { struct frad_local *flp; @@ -1337,7 +1337,7 @@ int sdla_change_mtu(struct net_device *dev, int new_mtu) return(-EOPNOTSUPP); } -int sdla_set_config(struct net_device *dev, struct ifmap *map) +static int sdla_set_config(struct net_device *dev, struct ifmap *map) { struct frad_local *flp; int i; diff --git a/drivers/net/wan/sdla_fr.c b/drivers/net/wan/sdla_fr.c index 0497dbdb8631..7f1ce9d4333e 100644 --- a/drivers/net/wan/sdla_fr.c +++ b/drivers/net/wan/sdla_fr.c @@ -822,7 +822,7 @@ static int new_if(struct wan_device* wandev, struct net_device* dev, chan->card = card; /* verify media address */ - if (is_digit(conf->addr[0])) { + if (isdigit(conf->addr[0])) { dlci = dec_to_uint(conf->addr, 0); @@ -3456,7 +3456,7 @@ static unsigned int dec_to_uint (unsigned char* str, int len) if (!len) len = strlen(str); - for (val = 0; len && is_digit(*str); ++str, --len) + for (val = 0; len && isdigit(*str); ++str, --len) val = (val * 10) + (*str - (unsigned)'0'); return val; diff --git a/drivers/net/wan/sdla_x25.c b/drivers/net/wan/sdla_x25.c index 8a95d61a2f8f..63f846d6f3a6 100644 --- a/drivers/net/wan/sdla_x25.c +++ b/drivers/net/wan/sdla_x25.c @@ -957,7 +957,7 @@ static int new_if(struct wan_device* wandev, struct net_device* dev, chan->hold_timeout = (conf->hold_timeout) ? conf->hold_timeout : 10; - }else if (is_digit(conf->addr[0])){ /* PVC */ + }else if (isdigit(conf->addr[0])){ /* PVC */ int lcn = dec_to_uint(conf->addr, 0); if ((lcn >= card->u.x.lo_pvc) && (lcn <= card->u.x.hi_pvc)){ @@ -3875,7 +3875,7 @@ static unsigned int dec_to_uint (unsigned char* str, int len) if (!len) len = strlen(str); - for (val = 0; len && is_digit(*str); ++str, --len) + for (val = 0; len && isdigit(*str); ++str, --len) val = (val * 10) + (*str - (unsigned)'0'); return val; @@ -3896,9 +3896,9 @@ static unsigned int hex_to_uint (unsigned char* str, int len) for (val = 0; len; ++str, --len) { ch = *str; - if (is_digit(ch)) + if (isdigit(ch)) val = (val << 4) + (ch - (unsigned)'0'); - else if (is_hex_digit(ch)) + else if (isxdigit(ch)) val = (val << 4) + ((ch & 0xDF) - (unsigned)'A' + 10); else break; } diff --git a/drivers/net/wan/sdladrv.c b/drivers/net/wan/sdladrv.c index c8bc6da57a41..7c2cf2e76300 100644 --- a/drivers/net/wan/sdladrv.c +++ b/drivers/net/wan/sdladrv.c @@ -642,9 +642,7 @@ int sdla_mapmem (sdlahw_t* hw, unsigned long addr) * Enable interrupt generation. */ -EXPORT_SYMBOL(sdla_inten); - -int sdla_inten (sdlahw_t* hw) +static int sdla_inten (sdlahw_t* hw) { unsigned port = hw->port; int tmp, i; @@ -698,8 +696,7 @@ int sdla_inten (sdlahw_t* hw) * Disable interrupt generation. */ -EXPORT_SYMBOL(sdla_intde); - +#if 0 int sdla_intde (sdlahw_t* hw) { unsigned port = hw->port; @@ -748,14 +745,13 @@ int sdla_intde (sdlahw_t* hw) } return 0; } +#endif /* 0 */ /*============================================================================ * Acknowledge SDLA hardware interrupt. */ -EXPORT_SYMBOL(sdla_intack); - -int sdla_intack (sdlahw_t* hw) +static int sdla_intack (sdlahw_t* hw) { unsigned port = hw->port; int tmp; @@ -827,8 +823,7 @@ void read_S514_int_stat (sdlahw_t* hw, u32* int_status) * Generate an interrupt to adapter's CPU. */ -EXPORT_SYMBOL(sdla_intr); - +#if 0 int sdla_intr (sdlahw_t* hw) { unsigned port = hw->port; @@ -863,6 +858,7 @@ int sdla_intr (sdlahw_t* hw) } return 0; } +#endif /* 0 */ /*============================================================================ * Execute Adapter Command. diff --git a/drivers/net/wan/sdlamain.c b/drivers/net/wan/sdlamain.c index 74e151acef3e..7a8b22a7ea31 100644 --- a/drivers/net/wan/sdlamain.c +++ b/drivers/net/wan/sdlamain.c @@ -57,6 +57,7 @@ #include /* request_region(), release_region() */ #include /* WAN router definitions */ #include /* WANPIPE common user API definitions */ +#include #include #include /* phys_to_virt() */ @@ -1268,37 +1269,41 @@ unsigned long get_ip_address(struct net_device *dev, int option) struct in_ifaddr *ifaddr; struct in_device *in_dev; + unsigned long addr = 0; - if ((in_dev = __in_dev_get(dev)) == NULL){ - return 0; + rcu_read_lock(); + if ((in_dev = __in_dev_get_rcu(dev)) == NULL){ + goto out; } if ((ifaddr = in_dev->ifa_list)== NULL ){ - return 0; + goto out; } switch (option){ case WAN_LOCAL_IP: - return ifaddr->ifa_local; + addr = ifaddr->ifa_local; break; case WAN_POINTOPOINT_IP: - return ifaddr->ifa_address; + addr = ifaddr->ifa_address; break; case WAN_NETMASK_IP: - return ifaddr->ifa_mask; + addr = ifaddr->ifa_mask; break; case WAN_BROADCAST_IP: - return ifaddr->ifa_broadcast; + addr = ifaddr->ifa_broadcast; break; default: - return 0; + break; } - return 0; +out: + rcu_read_unlock(); + return addr; } void add_gateway(sdla_t *card, struct net_device *dev) diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c index b56a7b516d24..2d1bba06a085 100644 --- a/drivers/net/wan/syncppp.c +++ b/drivers/net/wan/syncppp.c @@ -221,7 +221,7 @@ static void sppp_clear_timeout(struct sppp *p) * here. */ -void sppp_input (struct net_device *dev, struct sk_buff *skb) +static void sppp_input (struct net_device *dev, struct sk_buff *skb) { struct ppp_header *h; struct sppp *sp = (struct sppp *)sppp_of(dev); @@ -355,8 +355,6 @@ done: return; } -EXPORT_SYMBOL(sppp_input); - /* * Handle transmit packets. */ @@ -769,7 +767,7 @@ static void sppp_cisco_input (struct sppp *sp, struct sk_buff *skb) u32 addr = 0, mask = ~0; /* FIXME: is the mask correct? */ #ifdef CONFIG_INET rcu_read_lock(); - if ((in_dev = __in_dev_get(dev)) != NULL) + if ((in_dev = __in_dev_get_rcu(dev)) != NULL) { for (ifa=in_dev->ifa_list; ifa != NULL; ifa=ifa->ifa_next) { @@ -990,7 +988,7 @@ EXPORT_SYMBOL(sppp_reopen); * the mtu is out of range. */ -int sppp_change_mtu(struct net_device *dev, int new_mtu) +static int sppp_change_mtu(struct net_device *dev, int new_mtu) { if(new_mtu<128||new_mtu>PPP_MTU||(dev->flags&IFF_UP)) return -EINVAL; @@ -998,8 +996,6 @@ int sppp_change_mtu(struct net_device *dev, int new_mtu) return 0; } -EXPORT_SYMBOL(sppp_change_mtu); - /** * sppp_do_ioctl - Ioctl handler for ppp/hdlc * @dev: Device subject to ioctl @@ -1456,7 +1452,7 @@ static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_t return 0; } -struct packet_type sppp_packet_type = { +static struct packet_type sppp_packet_type = { .type = __constant_htons(ETH_P_WAN_PPP), .func = sppp_rcv, }; diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 00a07f32a81e..7187958e40ca 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -243,7 +243,7 @@ config IPW_DEBUG config AIRO tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" - depends on NET_RADIO && ISA && (PCI || BROKEN) + depends on NET_RADIO && ISA_DMA_API && (PCI || BROKEN) ---help--- This is the standard Linux driver to support Cisco/Aironet ISA and PCI 802.11 wireless cards. diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index 06998c2240d9..cb429e783749 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -1046,7 +1046,6 @@ static WifiCtlHdr wifictlhdr8023 = { } }; -#ifdef WIRELESS_EXT // Frequency list (map channels to frequencies) static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484 }; @@ -1067,7 +1066,6 @@ typedef struct wep_key_t { /* List of Wireless Handlers (new API) */ static const struct iw_handler_def airo_handler_def; -#endif /* WIRELESS_EXT */ static const char version[] = "airo.c 0.6 (Ben Reed & Javier Achirica)"; @@ -1110,10 +1108,8 @@ static irqreturn_t airo_interrupt( int irq, void* dev_id, struct pt_regs static int airo_thread(void *data); static void timer_func( struct net_device *dev ); static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -#ifdef WIRELESS_EXT static struct iw_statistics *airo_get_wireless_stats (struct net_device *dev); static void airo_read_wireless_stats (struct airo_info *local); -#endif /* WIRELESS_EXT */ #ifdef CISCO_EXT static int readrids(struct net_device *dev, aironet_ioctl *comp); static int writerids(struct net_device *dev, aironet_ioctl *comp); @@ -1187,12 +1183,10 @@ struct airo_info { int fid; } xmit, xmit11; struct net_device *wifidev; -#ifdef WIRELESS_EXT struct iw_statistics wstats; // wireless stats unsigned long scan_timestamp; /* Time started to scan */ struct iw_spy_data spy_data; struct iw_public_data wireless_data; -#endif /* WIRELESS_EXT */ #ifdef MICSUPPORT /* MIC stuff */ struct crypto_tfm *tfm; @@ -2527,7 +2521,8 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci, unsigned long mem_start, mem_len, aux_start, aux_len; int rc = -1; int i; - unsigned char *busaddroff,*vpackoff; + dma_addr_t busaddroff; + unsigned char *vpackoff; unsigned char __iomem *pciaddroff; mem_start = pci_resource_start(pci, 1); @@ -2570,7 +2565,7 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci, /* * Setup descriptor RX, TX, CONFIG */ - busaddroff = (unsigned char *)ai->shared_dma; + busaddroff = ai->shared_dma; pciaddroff = ai->pciaux + AUX_OFFSET; vpackoff = ai->shared; @@ -2579,7 +2574,7 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci, ai->rxfids[i].pending = 0; ai->rxfids[i].card_ram_off = pciaddroff; ai->rxfids[i].virtual_host_addr = vpackoff; - ai->rxfids[i].rx_desc.host_addr = (dma_addr_t) busaddroff; + ai->rxfids[i].rx_desc.host_addr = busaddroff; ai->rxfids[i].rx_desc.valid = 1; ai->rxfids[i].rx_desc.len = PKTSIZE; ai->rxfids[i].rx_desc.rdy = 0; @@ -2594,7 +2589,7 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci, ai->txfids[i].card_ram_off = pciaddroff; ai->txfids[i].virtual_host_addr = vpackoff; ai->txfids[i].tx_desc.valid = 1; - ai->txfids[i].tx_desc.host_addr = (dma_addr_t) busaddroff; + ai->txfids[i].tx_desc.host_addr = busaddroff; memcpy(ai->txfids[i].virtual_host_addr, &wifictlhdr8023, sizeof(wifictlhdr8023)); @@ -2607,8 +2602,8 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci, /* Rid descriptor setup */ ai->config_desc.card_ram_off = pciaddroff; ai->config_desc.virtual_host_addr = vpackoff; - ai->config_desc.rid_desc.host_addr = (dma_addr_t) busaddroff; - ai->ridbus = (dma_addr_t)busaddroff; + ai->config_desc.rid_desc.host_addr = busaddroff; + ai->ridbus = busaddroff; ai->config_desc.rid_desc.rid = 0; ai->config_desc.rid_desc.len = RIDSIZE; ai->config_desc.rid_desc.valid = 1; @@ -2647,9 +2642,7 @@ static void wifi_setup(struct net_device *dev) dev->get_stats = &airo_get_stats; dev->set_mac_address = &airo_set_mac_address; dev->do_ioctl = &airo_ioctl; -#ifdef WIRELESS_EXT dev->wireless_handlers = &airo_handler_def; -#endif /* WIRELESS_EXT */ dev->change_mtu = &airo_change_mtu; dev->open = &airo_open; dev->stop = &airo_close; @@ -2675,9 +2668,7 @@ static struct net_device *init_wifidev(struct airo_info *ai, dev->priv = ethdev->priv; dev->irq = ethdev->irq; dev->base_addr = ethdev->base_addr; -#ifdef WIRELESS_EXT dev->wireless_data = ethdev->wireless_data; -#endif /* WIRELESS_EXT */ memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len); err = register_netdev(dev); if (err<0) { @@ -2755,11 +2746,9 @@ static struct net_device *_init_airo_card( unsigned short irq, int port, dev->set_multicast_list = &airo_set_multicast_list; dev->set_mac_address = &airo_set_mac_address; dev->do_ioctl = &airo_ioctl; -#ifdef WIRELESS_EXT dev->wireless_handlers = &airo_handler_def; ai->wireless_data.spy_data = &ai->spy_data; dev->wireless_data = &ai->wireless_data; -#endif /* WIRELESS_EXT */ dev->change_mtu = &airo_change_mtu; dev->open = &airo_open; dev->stop = &airo_close; @@ -5515,12 +5504,13 @@ static int airo_pci_resume(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct airo_info *ai = dev->priv; Resp rsp; + pci_power_t prev_state = pdev->current_state; - pci_set_power_state(pdev, 0); + pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - pci_enable_wake(pdev, pci_choose_state(pdev, ai->power), 0); + pci_enable_wake(pdev, PCI_D0, 0); - if (ai->power.event > 1) { + if (prev_state != PCI_D1) { reset_card(dev, 0); mpi_init_descriptors(ai); setup_card(ai, dev->dev_addr, 0); @@ -5598,7 +5588,6 @@ static void __exit airo_cleanup_module( void ) remove_proc_entry("aironet", proc_root_driver); } -#ifdef WIRELESS_EXT /* * Initial Wireless Extension code for Aironet driver by : * Jean Tourrilhes - HPL - 17 November 00 @@ -7107,8 +7096,6 @@ static const struct iw_handler_def airo_handler_def = .get_wireless_stats = airo_get_wireless_stats, }; -#endif /* WIRELESS_EXT */ - /* * This defines the configuration part of the Wireless Extensions * Note : irq and spinlock protection will occur in the subroutines @@ -7187,7 +7174,6 @@ static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return rc; } -#ifdef WIRELESS_EXT /* * Get the Wireless stats out of the driver * Note : irq and spinlock protection will occur in the subroutines @@ -7260,7 +7246,6 @@ static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev) return &local->wstats; } -#endif /* WIRELESS_EXT */ #ifdef CISCO_EXT /* diff --git a/drivers/net/wireless/airport.c b/drivers/net/wireless/airport.c index 9d496703c465..7b321f7cf358 100644 --- a/drivers/net/wireless/airport.c +++ b/drivers/net/wireless/airport.c @@ -15,28 +15,11 @@ #define PFX DRIVER_NAME ": " #include - #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include +#include #include -#include -#include #include "orinoco.h" diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c index 587869d86eee..d57011028b72 100644 --- a/drivers/net/wireless/atmel.c +++ b/drivers/net/wireless/atmel.c @@ -618,12 +618,12 @@ static int atmel_lock_mac(struct atmel_private *priv); static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data); static void atmel_command_irq(struct atmel_private *priv); static int atmel_validate_channel(struct atmel_private *priv, int channel); -static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header, +static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, u16 frame_len, u8 rssi); static void atmel_management_timer(u_long a); static void atmel_send_command(struct atmel_private *priv, int command, void *cmd, int cmd_size); static int atmel_send_command_wait(struct atmel_private *priv, int command, void *cmd, int cmd_size); -static void atmel_transmit_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header, +static void atmel_transmit_management_frame(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, u8 *body, int body_len); static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index); @@ -827,7 +827,7 @@ static void tx_update_descriptor(struct atmel_private *priv, int is_bcast, u16 l static int start_tx (struct sk_buff *skb, struct net_device *dev) { struct atmel_private *priv = netdev_priv(dev); - struct ieee80211_hdr header; + struct ieee80211_hdr_4addr header; unsigned long flags; u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; u8 SNAP_RFC1024[6] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; @@ -902,7 +902,7 @@ static int start_tx (struct sk_buff *skb, struct net_device *dev) } static void atmel_transmit_management_frame(struct atmel_private *priv, - struct ieee80211_hdr *header, + struct ieee80211_hdr_4addr *header, u8 *body, int body_len) { u16 buff; @@ -917,7 +917,7 @@ static void atmel_transmit_management_frame(struct atmel_private *priv, tx_update_descriptor(priv, header->addr1[0] & 0x01, len, buff, TX_PACKET_TYPE_MGMT); } -static void fast_rx_path(struct atmel_private *priv, struct ieee80211_hdr *header, +static void fast_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, u16 msdu_size, u16 rx_packet_loc, u32 crc) { /* fast path: unfragmented packet copy directly into skbuf */ @@ -990,7 +990,7 @@ static int probe_crc(struct atmel_private *priv, u16 packet_loc, u16 msdu_size) return (crc ^ 0xffffffff) == netcrc; } -static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr *header, +static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no, u8 frag_no, int more_frags) { u8 mac4[6]; @@ -1082,7 +1082,7 @@ static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr *heade static void rx_done_irq(struct atmel_private *priv) { int i; - struct ieee80211_hdr header; + struct ieee80211_hdr_4addr header; for (i = 0; atmel_rmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head)) == RX_DESC_FLAG_VALID && @@ -2650,7 +2650,7 @@ static void handle_beacon_probe(struct atmel_private *priv, u16 capability, u8 c static void send_authentication_request(struct atmel_private *priv, u8 *challenge, int challenge_len) { - struct ieee80211_hdr header; + struct ieee80211_hdr_4addr header; struct auth_body auth; header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); @@ -2688,7 +2688,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc) { u8 *ssid_el_p; int bodysize; - struct ieee80211_hdr header; + struct ieee80211_hdr_4addr header; struct ass_req_format { u16 capability; u16 listen_interval; @@ -2738,7 +2738,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc) atmel_transmit_management_frame(priv, &header, (void *)&body, bodysize); } -static int is_frame_from_current_bss(struct atmel_private *priv, struct ieee80211_hdr *header) +static int is_frame_from_current_bss(struct atmel_private *priv, struct ieee80211_hdr_4addr *header) { if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS) return memcmp(header->addr3, priv->CurrentBSSID, 6) == 0; @@ -2788,7 +2788,7 @@ static int retrieve_bss(struct atmel_private *priv) } -static void store_bss_info(struct atmel_private *priv, struct ieee80211_hdr *header, +static void store_bss_info(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, u16 capability, u16 beacon_period, u8 channel, u8 rssi, u8 ssid_len, u8 *ssid, int is_beacon) { @@ -3072,7 +3072,7 @@ static void atmel_smooth_qual(struct atmel_private *priv) } /* deals with incoming managment frames. */ -static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header, +static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr_4addr *header, u16 frame_len, u8 rssi) { u16 subtype; diff --git a/drivers/net/wireless/hermes.c b/drivers/net/wireless/hermes.c index 21c3d0d227e6..eba0d9d2b7c5 100644 --- a/drivers/net/wireless/hermes.c +++ b/drivers/net/wireless/hermes.c @@ -39,17 +39,10 @@ */ #include - #include -#include -#include -#include -#include -#include -#include #include -#include -#include +#include +#include #include "hermes.h" diff --git a/drivers/net/wireless/hermes.h b/drivers/net/wireless/hermes.h index 8c9e874c9118..ad28e3294360 100644 --- a/drivers/net/wireless/hermes.h +++ b/drivers/net/wireless/hermes.h @@ -30,9 +30,8 @@ * access to the hermes_t structure, and to the hardware */ -#include #include -#include +#include /* * Limits and constants @@ -192,13 +191,13 @@ #define HERMES_RXSTAT_WMP (0x6000) /* Wavelan-II Management Protocol frame */ struct hermes_tx_descriptor { - u16 status; - u16 reserved1; - u16 reserved2; - u32 sw_support; + __le16 status; + __le16 reserved1; + __le16 reserved2; + __le32 sw_support; u8 retry_count; u8 tx_rate; - u16 tx_control; + __le16 tx_control; } __attribute__ ((packed)); #define HERMES_TXSTAT_RETRYERR (0x0001) @@ -222,60 +221,60 @@ struct hermes_tx_descriptor { #define HERMES_INQ_SEC_STAT_AGERE (0xF202) struct hermes_tallies_frame { - u16 TxUnicastFrames; - u16 TxMulticastFrames; - u16 TxFragments; - u16 TxUnicastOctets; - u16 TxMulticastOctets; - u16 TxDeferredTransmissions; - u16 TxSingleRetryFrames; - u16 TxMultipleRetryFrames; - u16 TxRetryLimitExceeded; - u16 TxDiscards; - u16 RxUnicastFrames; - u16 RxMulticastFrames; - u16 RxFragments; - u16 RxUnicastOctets; - u16 RxMulticastOctets; - u16 RxFCSErrors; - u16 RxDiscards_NoBuffer; - u16 TxDiscardsWrongSA; - u16 RxWEPUndecryptable; - u16 RxMsgInMsgFragments; - u16 RxMsgInBadMsgFragments; + __le16 TxUnicastFrames; + __le16 TxMulticastFrames; + __le16 TxFragments; + __le16 TxUnicastOctets; + __le16 TxMulticastOctets; + __le16 TxDeferredTransmissions; + __le16 TxSingleRetryFrames; + __le16 TxMultipleRetryFrames; + __le16 TxRetryLimitExceeded; + __le16 TxDiscards; + __le16 RxUnicastFrames; + __le16 RxMulticastFrames; + __le16 RxFragments; + __le16 RxUnicastOctets; + __le16 RxMulticastOctets; + __le16 RxFCSErrors; + __le16 RxDiscards_NoBuffer; + __le16 TxDiscardsWrongSA; + __le16 RxWEPUndecryptable; + __le16 RxMsgInMsgFragments; + __le16 RxMsgInBadMsgFragments; /* Those last are probably not available in very old firmwares */ - u16 RxDiscards_WEPICVError; - u16 RxDiscards_WEPExcluded; + __le16 RxDiscards_WEPICVError; + __le16 RxDiscards_WEPExcluded; } __attribute__ ((packed)); /* Grabbed from wlan-ng - Thanks Mark... - Jean II * This is the result of a scan inquiry command */ /* Structure describing info about an Access Point */ struct prism2_scan_apinfo { - u16 channel; /* Channel where the AP sits */ - u16 noise; /* Noise level */ - u16 level; /* Signal level */ + __le16 channel; /* Channel where the AP sits */ + __le16 noise; /* Noise level */ + __le16 level; /* Signal level */ u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */ - u16 beacon_interv; /* Beacon interval */ - u16 capabilities; /* Capabilities */ - u16 essid_len; /* ESSID length */ + __le16 beacon_interv; /* Beacon interval */ + __le16 capabilities; /* Capabilities */ + __le16 essid_len; /* ESSID length */ u8 essid[32]; /* ESSID of the network */ u8 rates[10]; /* Bit rate supported */ - u16 proberesp_rate; /* Data rate of the response frame */ - u16 atim; /* ATIM window time, Kus (hostscan only) */ + __le16 proberesp_rate; /* Data rate of the response frame */ + __le16 atim; /* ATIM window time, Kus (hostscan only) */ } __attribute__ ((packed)); /* Same stuff for the Lucent/Agere card. * Thanks to h1kari - Jean II */ struct agere_scan_apinfo { - u16 channel; /* Channel where the AP sits */ - u16 noise; /* Noise level */ - u16 level; /* Signal level */ + __le16 channel; /* Channel where the AP sits */ + __le16 noise; /* Noise level */ + __le16 level; /* Signal level */ u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */ - u16 beacon_interv; /* Beacon interval */ - u16 capabilities; /* Capabilities */ + __le16 beacon_interv; /* Beacon interval */ + __le16 capabilities; /* Capabilities */ /* bits: 0-ess, 1-ibss, 4-privacy [wep] */ - u16 essid_len; /* ESSID length */ + __le16 essid_len; /* ESSID length */ u8 essid[32]; /* ESSID of the network */ } __attribute__ ((packed)); @@ -283,16 +282,16 @@ struct agere_scan_apinfo { struct symbol_scan_apinfo { u8 channel; /* Channel where the AP sits */ u8 unknown1; /* 8 in 2.9x and 3.9x f/w, 0 otherwise */ - u16 noise; /* Noise level */ - u16 level; /* Signal level */ + __le16 noise; /* Noise level */ + __le16 level; /* Signal level */ u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */ - u16 beacon_interv; /* Beacon interval */ - u16 capabilities; /* Capabilities */ + __le16 beacon_interv; /* Beacon interval */ + __le16 capabilities; /* Capabilities */ /* bits: 0-ess, 1-ibss, 4-privacy [wep] */ - u16 essid_len; /* ESSID length */ + __le16 essid_len; /* ESSID length */ u8 essid[32]; /* ESSID of the network */ - u16 rates[5]; /* Bit rate supported */ - u16 basic_rates; /* Basic rates bitmask */ + __le16 rates[5]; /* Bit rate supported */ + __le16 basic_rates; /* Basic rates bitmask */ u8 unknown2[6]; /* Always FF:FF:FF:FF:00:00 */ u8 unknown3[8]; /* Always 0, appeared in f/w 3.91-68 */ } __attribute__ ((packed)); @@ -312,7 +311,7 @@ union hermes_scan_info { #define HERMES_LINKSTATUS_ASSOC_FAILED (0x0006) struct hermes_linkstatus { - u16 linkstatus; /* Link status */ + __le16 linkstatus; /* Link status */ } __attribute__ ((packed)); struct hermes_response { @@ -321,8 +320,8 @@ struct hermes_response { /* "ID" structure - used for ESSID and station nickname */ struct hermes_idstring { - u16 len; - u16 val[16]; + __le16 len; + __le16 val[16]; } __attribute__ ((packed)); struct hermes_multicast { @@ -447,7 +446,7 @@ static inline void hermes_clear_words(struct hermes *hw, int off, unsigned count static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word) { - u16 rec; + __le16 rec; int err; err = HERMES_READ_RECORD(hw, bap, rid, &rec); @@ -457,7 +456,7 @@ static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word) static inline int hermes_write_wordrec(hermes_t *hw, int bap, u16 rid, u16 word) { - u16 rec = cpu_to_le16(word); + __le16 rec = cpu_to_le16(word); return HERMES_WRITE_RECORD(hw, bap, rid, &rec); } diff --git a/drivers/net/wireless/hostap/hostap.c b/drivers/net/wireless/hostap/hostap.c index e7f5821b4942..6a96cd9f2685 100644 --- a/drivers/net/wireless/hostap/hostap.c +++ b/drivers/net/wireless/hostap/hostap.c @@ -716,9 +716,6 @@ static int prism2_close(struct net_device *dev) hostap_deauth_all_stas(dev, local->ap, 1); #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ - if (local->func->dev_close && local->func->dev_close(local)) - return 0; - if (dev == local->dev) { local->func->hw_shutdown(dev, HOSTAP_HW_ENABLE_CMDCOMPL); } @@ -766,9 +763,6 @@ static int prism2_open(struct net_device *dev) local->hw_downloading) return -ENODEV; - if (local->func->dev_open && local->func->dev_open(local)) - return 1; - if (!try_module_get(local->hw_module)) return -ENODEV; local->num_dev_open++; diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c index b0501243b175..ffac50899454 100644 --- a/drivers/net/wireless/hostap/hostap_80211_rx.c +++ b/drivers/net/wireless/hostap/hostap_80211_rx.c @@ -6,10 +6,10 @@ void hostap_dump_rx_80211(const char *name, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats) { - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; u16 fc; - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; printk(KERN_DEBUG "%s: RX signal=%d noise=%d rate=%d len=%d " "jiffies=%ld\n", @@ -51,7 +51,7 @@ int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb, int hdrlen, phdrlen, head_need, tail_need; u16 fc; int prism_header, ret; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; iface = netdev_priv(dev); local = iface->local; @@ -70,7 +70,7 @@ int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb, phdrlen = 0; } - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; fc = le16_to_cpu(hdr->frame_ctl); if (type == PRISM2_RX_MGMT && (fc & IEEE80211_FCTL_VERS)) { @@ -215,7 +215,7 @@ prism2_frag_cache_find(local_info_t *local, unsigned int seq, /* Called only as a tasklet (software IRQ) */ static struct sk_buff * -prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr *hdr) +prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr_4addr *hdr) { struct sk_buff *skb = NULL; u16 sc; @@ -229,7 +229,7 @@ prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr *hdr) if (frag == 0) { /* Reserve enough space to fit maximum frame length */ skb = dev_alloc_skb(local->dev->mtu + - sizeof(struct ieee80211_hdr) + + sizeof(struct ieee80211_hdr_4addr) + 8 /* LLC */ + 2 /* alignment */ + 8 /* WEP */ + ETH_ALEN /* WDS */); @@ -267,7 +267,7 @@ prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr *hdr) /* Called only as a tasklet (software IRQ) */ static int prism2_frag_cache_invalidate(local_info_t *local, - struct ieee80211_hdr *hdr) + struct ieee80211_hdr_4addr *hdr) { u16 sc; unsigned int seq; @@ -441,7 +441,7 @@ hostap_rx_frame_mgmt(local_info_t *local, struct sk_buff *skb, u16 stype) { if (local->iw_mode == IW_MODE_MASTER) { - hostap_update_sta_ps(local, (struct ieee80211_hdr *) + hostap_update_sta_ps(local, (struct ieee80211_hdr_4addr *) skb->data); } @@ -520,7 +520,7 @@ static inline struct net_device *prism2_rx_get_wds(local_info_t *local, static inline int -hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr *hdr, +hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr, u16 fc, struct net_device **wds) { /* FIX: is this really supposed to accept WDS frames only in Master @@ -579,13 +579,13 @@ static int hostap_is_eapol_frame(local_info_t *local, struct sk_buff *skb) { struct net_device *dev = local->dev; u16 fc, ethertype; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; u8 *pos; if (skb->len < 24) return 0; - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; fc = le16_to_cpu(hdr->frame_ctl); /* check that the frame is unicast frame to us */ @@ -619,13 +619,13 @@ static inline int hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb, struct ieee80211_crypt_data *crypt) { - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; int res, hdrlen; if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL) return 0; - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; hdrlen = hostap_80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); if (local->tkip_countermeasures && @@ -658,13 +658,13 @@ static inline int hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb, int keyidx, struct ieee80211_crypt_data *crypt) { - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; int res, hdrlen; if (crypt == NULL || crypt->ops->decrypt_msdu == NULL) return 0; - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; hdrlen = hostap_80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); atomic_inc(&crypt->refcnt); @@ -689,7 +689,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, { struct hostap_interface *iface; local_info_t *local; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; size_t hdrlen; u16 fc, type, stype, sc; struct net_device *wds = NULL; @@ -716,7 +716,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, dev = local->ddev; iface = netdev_priv(dev); - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; stats = hostap_get_stats(dev); if (skb->len < 10) @@ -737,7 +737,8 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, struct iw_quality wstats; wstats.level = rx_stats->signal; wstats.noise = rx_stats->noise; - wstats.updated = 6; /* No qual value */ + wstats.updated = IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_UPDATED + | IW_QUAL_QUAL_INVALID | IW_QUAL_DBM; /* Update spy records */ wireless_spy_update(dev, hdr->addr2, &wstats); } @@ -889,7 +890,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, if (local->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) && (keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0) goto rx_dropped; - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; /* skb: hdr + (possibly fragmented) plaintext payload */ @@ -941,7 +942,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, /* this was the last fragment and the frame will be * delivered, so remove skb from fragment cache */ skb = frag_skb; - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; prism2_frag_cache_invalidate(local, hdr); } @@ -952,7 +953,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, hostap_rx_frame_decrypt_msdu(local, skb, keyidx, crypt)) goto rx_dropped; - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !local->open_wep) { if (local->ieee_802_1x && hostap_is_eapol_frame(local, skb)) { diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c index 6358015f6526..9d24f8a38ac5 100644 --- a/drivers/net/wireless/hostap/hostap_80211_tx.c +++ b/drivers/net/wireless/hostap/hostap_80211_tx.c @@ -1,9 +1,9 @@ void hostap_dump_tx_80211(const char *name, struct sk_buff *skb) { - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; u16 fc; - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; printk(KERN_DEBUG "%s: TX len=%d jiffies=%ld\n", name, skb->len, jiffies); @@ -41,7 +41,7 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev) struct hostap_interface *iface; local_info_t *local; int need_headroom, need_tailroom = 0; - struct ieee80211_hdr hdr; + struct ieee80211_hdr_4addr hdr; u16 fc, ethertype = 0; enum { WDS_NO = 0, WDS_OWN_FRAME, WDS_COMPLIANT_FRAME @@ -244,7 +244,7 @@ int hostap_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev) struct hostap_interface *iface; local_info_t *local; struct hostap_skb_tx_data *meta; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; u16 fc; iface = netdev_priv(dev); @@ -266,7 +266,7 @@ int hostap_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev) meta->iface = iface; if (skb->len >= IEEE80211_DATA_HDR3_LEN + sizeof(rfc1042_header) + 2) { - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; fc = le16_to_cpu(hdr->frame_ctl); if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA && WLAN_FC_GET_STYPE(fc) == IEEE80211_STYPE_DATA) { @@ -289,7 +289,7 @@ struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb, { struct hostap_interface *iface; local_info_t *local; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; u16 fc; int hdr_len, res; @@ -303,7 +303,7 @@ struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb, if (local->tkip_countermeasures && crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) { - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; if (net_ratelimit()) { printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " "TX packet to " MACSTR "\n", @@ -317,15 +317,15 @@ struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb, if (skb == NULL) return NULL; - if ((skb_headroom(skb) < crypt->ops->extra_prefix_len || - skb_tailroom(skb) < crypt->ops->extra_postfix_len) && - pskb_expand_head(skb, crypt->ops->extra_prefix_len, - crypt->ops->extra_postfix_len, GFP_ATOMIC)) { + if ((skb_headroom(skb) < crypt->ops->extra_mpdu_prefix_len || + skb_tailroom(skb) < crypt->ops->extra_mpdu_postfix_len) && + pskb_expand_head(skb, crypt->ops->extra_mpdu_prefix_len, + crypt->ops->extra_mpdu_postfix_len, GFP_ATOMIC)) { kfree_skb(skb); return NULL; } - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; fc = le16_to_cpu(hdr->frame_ctl); hdr_len = hostap_80211_get_hdrlen(fc); @@ -360,7 +360,7 @@ int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev) ap_tx_ret tx_ret; struct hostap_skb_tx_data *meta; int no_encrypt = 0; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; iface = netdev_priv(dev); local = iface->local; @@ -403,7 +403,7 @@ int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_ret = hostap_handle_sta_tx(local, &tx); skb = tx.skb; meta = (struct hostap_skb_tx_data *) skb->cb; - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; fc = le16_to_cpu(hdr->frame_ctl); switch (tx_ret) { case AP_TX_CONTINUE: diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index 930cef8367f2..9da94ab7f05f 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c @@ -591,14 +591,14 @@ static void hostap_ap_tx_cb(struct sk_buff *skb, int ok, void *data) { struct ap_data *ap = data; u16 fc; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; if (!ap->local->hostapd || !ap->local->apdev) { dev_kfree_skb(skb); return; } - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; fc = le16_to_cpu(hdr->frame_ctl); /* Pass the TX callback frame to the hostapd; use 802.11 header version @@ -623,7 +623,7 @@ static void hostap_ap_tx_cb_auth(struct sk_buff *skb, int ok, void *data) { struct ap_data *ap = data; struct net_device *dev = ap->local->dev; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; u16 fc, *pos, auth_alg, auth_transaction, status; struct sta_info *sta = NULL; char *txt = NULL; @@ -633,7 +633,7 @@ static void hostap_ap_tx_cb_auth(struct sk_buff *skb, int ok, void *data) return; } - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; fc = le16_to_cpu(hdr->frame_ctl); if (WLAN_FC_GET_TYPE(fc) != IEEE80211_FTYPE_MGMT || WLAN_FC_GET_STYPE(fc) != IEEE80211_STYPE_AUTH || @@ -692,7 +692,7 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data) { struct ap_data *ap = data; struct net_device *dev = ap->local->dev; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; u16 fc, *pos, status; struct sta_info *sta = NULL; char *txt = NULL; @@ -702,7 +702,7 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data) return; } - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; fc = le16_to_cpu(hdr->frame_ctl); if (WLAN_FC_GET_TYPE(fc) != IEEE80211_FTYPE_MGMT || (WLAN_FC_GET_STYPE(fc) != IEEE80211_STYPE_ASSOC_RESP && @@ -757,12 +757,12 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data) static void hostap_ap_tx_cb_poll(struct sk_buff *skb, int ok, void *data) { struct ap_data *ap = data; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; struct sta_info *sta; if (skb->len < 24) goto fail; - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; if (ok) { spin_lock(&ap->sta_table_lock); sta = ap_get_sta(ap, hdr->addr1); @@ -918,7 +918,7 @@ static void prism2_send_mgmt(struct net_device *dev, { struct hostap_interface *iface; local_info_t *local; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; u16 fc; struct sk_buff *skb; struct hostap_skb_tx_data *meta; @@ -944,7 +944,7 @@ static void prism2_send_mgmt(struct net_device *dev, fc = type_subtype; hdrlen = hostap_80211_get_hdrlen(fc); - hdr = (struct ieee80211_hdr *) skb_put(skb, hdrlen); + hdr = (struct ieee80211_hdr_4addr *) skb_put(skb, hdrlen); if (body) memcpy(skb_put(skb, body_len), body, body_len); @@ -1256,14 +1256,14 @@ static char * ap_auth_make_challenge(struct ap_data *ap) } skb = dev_alloc_skb(WLAN_AUTH_CHALLENGE_LEN + - ap->crypt->extra_prefix_len + - ap->crypt->extra_postfix_len); + ap->crypt->extra_mpdu_prefix_len + + ap->crypt->extra_mpdu_postfix_len); if (skb == NULL) { kfree(tmpbuf); return NULL; } - skb_reserve(skb, ap->crypt->extra_prefix_len); + skb_reserve(skb, ap->crypt->extra_mpdu_prefix_len); memset(skb_put(skb, WLAN_AUTH_CHALLENGE_LEN), 0, WLAN_AUTH_CHALLENGE_LEN); if (ap->crypt->encrypt_mpdu(skb, 0, ap->crypt_priv)) { @@ -1272,7 +1272,7 @@ static char * ap_auth_make_challenge(struct ap_data *ap) return NULL; } - memcpy(tmpbuf, skb->data + ap->crypt->extra_prefix_len, + memcpy(tmpbuf, skb->data + ap->crypt->extra_mpdu_prefix_len, WLAN_AUTH_CHALLENGE_LEN); dev_kfree_skb(skb); @@ -1285,7 +1285,7 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats) { struct net_device *dev = local->dev; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data; size_t hdrlen; struct ap_data *ap = local->ap; char body[8 + WLAN_AUTH_CHALLENGE_LEN], *challenge = NULL; @@ -1498,7 +1498,7 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats, int reassoc) { struct net_device *dev = local->dev; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data; char body[12], *p, *lpos; int len, left; u16 *pos; @@ -1705,7 +1705,7 @@ static void handle_deauth(local_info_t *local, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats) { struct net_device *dev = local->dev; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data; char *body = (char *) (skb->data + IEEE80211_MGMT_HDR_LEN); int len; u16 reason_code, *pos; @@ -1746,7 +1746,7 @@ static void handle_disassoc(local_info_t *local, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats) { struct net_device *dev = local->dev; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data; char *body = skb->data + IEEE80211_MGMT_HDR_LEN; int len; u16 reason_code, *pos; @@ -1784,7 +1784,7 @@ static void handle_disassoc(local_info_t *local, struct sk_buff *skb, /* Called only as a scheduled task for pending AP frames. */ static void ap_handle_data_nullfunc(local_info_t *local, - struct ieee80211_hdr *hdr) + struct ieee80211_hdr_4addr *hdr) { struct net_device *dev = local->dev; @@ -1801,7 +1801,7 @@ static void ap_handle_data_nullfunc(local_info_t *local, /* Called only as a scheduled task for pending AP frames. */ static void ap_handle_dropped_data(local_info_t *local, - struct ieee80211_hdr *hdr) + struct ieee80211_hdr_4addr *hdr) { struct net_device *dev = local->dev; struct sta_info *sta; @@ -1860,7 +1860,7 @@ static void pspoll_send_buffered(local_info_t *local, struct sta_info *sta, /* Called only as a scheduled task for pending AP frames. */ static void handle_pspoll(local_info_t *local, - struct ieee80211_hdr *hdr, + struct ieee80211_hdr_4addr *hdr, struct hostap_80211_rx_status *rx_stats) { struct net_device *dev = local->dev; @@ -1979,7 +1979,7 @@ static void handle_wds_oper_queue(void *data) static void handle_beacon(local_info_t *local, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data; char *body = skb->data + IEEE80211_MGMT_HDR_LEN; int len, left; u16 *pos, beacon_int, capability; @@ -2137,11 +2137,11 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb, struct net_device *dev = local->dev; #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ u16 fc, type, stype; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; /* FIX: should give skb->len to handler functions and check that the * buffer is long enough */ - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; fc = le16_to_cpu(hdr->frame_ctl); type = WLAN_FC_GET_TYPE(fc); stype = WLAN_FC_GET_STYPE(fc); @@ -2258,7 +2258,7 @@ void hostap_rx(struct net_device *dev, struct sk_buff *skb, struct hostap_interface *iface; local_info_t *local; u16 fc; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; iface = netdev_priv(dev); local = iface->local; @@ -2268,7 +2268,7 @@ void hostap_rx(struct net_device *dev, struct sk_buff *skb, local->stats.rx_packets++; - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; fc = le16_to_cpu(hdr->frame_ctl); if (local->ap->ap_policy == AP_OTHER_AP_SKIP_ALL && @@ -2289,7 +2289,7 @@ void hostap_rx(struct net_device *dev, struct sk_buff *skb, static void schedule_packet_send(local_info_t *local, struct sta_info *sta) { struct sk_buff *skb; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; struct hostap_80211_rx_status rx_stats; if (skb_queue_empty(&sta->tx_buf)) @@ -2302,7 +2302,7 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta) return; } - hdr = (struct ieee80211_hdr *) skb_put(skb, 16); + hdr = (struct ieee80211_hdr_4addr *) skb_put(skb, 16); /* Generate a fake pspoll frame to start packet delivery */ hdr->frame_ctl = __constant_cpu_to_le16( @@ -2349,7 +2349,7 @@ static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[], qual[count].noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence); qual[count].updated = sta->last_rx_updated; - sta->last_rx_updated = 0; + sta->last_rx_updated = IW_QUAL_DBM; count++; if (count >= buf_size) @@ -2467,7 +2467,7 @@ static int prism2_ap_translate_scan(struct net_device *dev, char *buffer) } #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ - sta->last_rx_updated = 0; + sta->last_rx_updated = IW_QUAL_DBM; /* To be continued, we should make good use of IWEVCUSTOM */ } @@ -2685,7 +2685,7 @@ ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx) struct sta_info *sta = NULL; struct sk_buff *skb = tx->skb; int set_tim, ret; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; struct hostap_skb_tx_data *meta; meta = (struct hostap_skb_tx_data *) skb->cb; @@ -2694,7 +2694,7 @@ ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx) meta->iface->type == HOSTAP_INTERFACE_STA) goto out; - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; if (hdr->addr1[0] & 0x01) { /* broadcast/multicast frame - no AP related processing */ @@ -2821,10 +2821,10 @@ void hostap_handle_sta_release(void *ptr) void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb) { struct sta_info *sta; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; struct hostap_skb_tx_data *meta; - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; meta = (struct hostap_skb_tx_data *) skb->cb; spin_lock(&local->ap->sta_table_lock); @@ -2892,7 +2892,7 @@ static void hostap_update_sta_ps2(local_info_t *local, struct sta_info *sta, /* Called only as a tasklet (software IRQ). Called for each RX frame to update * STA power saving state. pwrmgt is a flag from 802.11 frame_ctl field. */ -int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr *hdr) +int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr_4addr *hdr) { struct sta_info *sta; u16 fc; @@ -2925,12 +2925,12 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev, int ret; struct sta_info *sta; u16 fc, type, stype; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; if (local->ap == NULL) return AP_RX_CONTINUE; - hdr = (struct ieee80211_hdr *) skb->data; + hdr = (struct ieee80211_hdr_4addr *) skb->data; fc = le16_to_cpu(hdr->frame_ctl); type = WLAN_FC_GET_TYPE(fc); @@ -3058,7 +3058,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev, /* Called only as a tasklet (software IRQ) */ int hostap_handle_sta_crypto(local_info_t *local, - struct ieee80211_hdr *hdr, + struct ieee80211_hdr_4addr *hdr, struct ieee80211_crypt_data **crypt, void **sta_ptr) { @@ -3160,7 +3160,7 @@ int hostap_add_sta(struct ap_data *ap, u8 *sta_addr) /* Called only as a tasklet (software IRQ) */ int hostap_update_rx_stats(struct ap_data *ap, - struct ieee80211_hdr *hdr, + struct ieee80211_hdr_4addr *hdr, struct hostap_80211_rx_status *rx_stats) { struct sta_info *sta; @@ -3174,7 +3174,7 @@ int hostap_update_rx_stats(struct ap_data *ap, sta->last_rx_silence = rx_stats->noise; sta->last_rx_signal = rx_stats->signal; sta->last_rx_rate = rx_stats->rate; - sta->last_rx_updated = 7; + sta->last_rx_updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; if (rx_stats->rate == 10) sta->rx_count[0]++; else if (rx_stats->rate == 20) diff --git a/drivers/net/wireless/hostap/hostap_ap.h b/drivers/net/wireless/hostap/hostap_ap.h index 816a52bcea8f..6d00df69c2e3 100644 --- a/drivers/net/wireless/hostap/hostap_ap.h +++ b/drivers/net/wireless/hostap/hostap_ap.h @@ -233,7 +233,7 @@ struct hostap_tx_data { ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx); void hostap_handle_sta_release(void *ptr); void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb); -int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr *hdr); +int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr_4addr *hdr); typedef enum { AP_RX_CONTINUE, AP_RX_DROP, AP_RX_EXIT, AP_RX_CONTINUE_NOT_AUTHORIZED } ap_rx_ret; @@ -241,13 +241,13 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats, int wds); -int hostap_handle_sta_crypto(local_info_t *local, struct ieee80211_hdr *hdr, +int hostap_handle_sta_crypto(local_info_t *local, struct ieee80211_hdr_4addr *hdr, struct ieee80211_crypt_data **crypt, void **sta_ptr); int hostap_is_sta_assoc(struct ap_data *ap, u8 *sta_addr); int hostap_is_sta_authorized(struct ap_data *ap, u8 *sta_addr); int hostap_add_sta(struct ap_data *ap, u8 *sta_addr); -int hostap_update_rx_stats(struct ap_data *ap, struct ieee80211_hdr *hdr, +int hostap_update_rx_stats(struct ap_data *ap, struct ieee80211_hdr_4addr *hdr, struct hostap_80211_rx_status *rx_stats); void hostap_update_rates(local_info_t *local); void hostap_add_wds_links(local_info_t *local); diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c index faa83badf0a1..2643976a6677 100644 --- a/drivers/net/wireless/hostap/hostap_cs.c +++ b/drivers/net/wireless/hostap/hostap_cs.c @@ -492,42 +492,10 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr) } -static int prism2_pccard_dev_open(local_info_t *local) -{ - struct hostap_cs_priv *hw_priv = local->hw_priv; - hw_priv->link->open++; - return 0; -} - - -static int prism2_pccard_dev_close(local_info_t *local) -{ - struct hostap_cs_priv *hw_priv; - - if (local == NULL || local->hw_priv == NULL) - return 1; - hw_priv = local->hw_priv; - if (hw_priv->link == NULL) - return 1; - - if (!hw_priv->link->open) { - printk(KERN_WARNING "%s: prism2_pccard_dev_close(): " - "link not open?!\n", local->dev->name); - return 1; - } - - hw_priv->link->open--; - - return 0; -} - - static struct prism2_helper_functions prism2_pccard_funcs = { .card_present = prism2_pccard_card_present, .cor_sreset = prism2_pccard_cor_sreset, - .dev_open = prism2_pccard_dev_open, - .dev_close = prism2_pccard_dev_close, .genesis_reset = prism2_pccard_genesis_reset, .hw_type = HOSTAP_HW_PCCARD, }; @@ -597,13 +565,14 @@ static void prism2_detach(dev_link_t *link) *linkp = link->next; /* release net devices */ if (link->priv) { + struct hostap_cs_priv *hw_priv; struct net_device *dev; struct hostap_interface *iface; dev = link->priv; iface = netdev_priv(dev); - kfree(iface->local->hw_priv); - iface->local->hw_priv = NULL; + hw_priv = iface->local->hw_priv; prism2_free_local_data(dev); + kfree(hw_priv); } kfree(link); } @@ -883,6 +852,13 @@ static int prism2_event(event_t event, int priority, { dev_link_t *link = args->client_data; struct net_device *dev = (struct net_device *) link->priv; + int dev_open = 0; + + if (link->state & DEV_CONFIG) { + struct hostap_interface *iface = netdev_priv(dev); + if (iface && iface->local) + dev_open = iface->local->num_dev_open > 0; + } switch (event) { case CS_EVENT_CARD_INSERTION: @@ -911,7 +887,7 @@ static int prism2_event(event_t event, int priority, case CS_EVENT_RESET_PHYSICAL: PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_RESET_PHYSICAL\n", dev_info); if (link->state & DEV_CONFIG) { - if (link->open) { + if (dev_open) { netif_stop_queue(dev); netif_device_detach(dev); } @@ -931,8 +907,8 @@ static int prism2_event(event_t event, int priority, pcmcia_request_configuration(link->handle, &link->conf); prism2_hw_shutdown(dev, 1); - prism2_hw_config(dev, link->open ? 0 : 1); - if (link->open) { + prism2_hw_config(dev, dev_open ? 0 : 1); + if (dev_open) { netif_device_attach(dev); netif_start_queue(dev); } diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index e533a663deda..59fc15572395 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c @@ -3322,6 +3322,18 @@ static void prism2_free_local_data(struct net_device *dev) iface = netdev_priv(dev); local = iface->local; + /* Unregister all netdevs before freeing local data. */ + list_for_each_safe(ptr, n, &local->hostap_interfaces) { + iface = list_entry(ptr, struct hostap_interface, list); + if (iface->type == HOSTAP_INTERFACE_MASTER) { + /* special handling for this interface below */ + continue; + } + hostap_remove_interface(iface->dev, 0, 1); + } + + unregister_netdev(local->dev); + flush_scheduled_work(); if (timer_pending(&local->crypt_deinit_timer)) @@ -3382,15 +3394,6 @@ static void prism2_free_local_data(struct net_device *dev) prism2_download_free_data(local->dl_sec); #endif /* PRISM2_DOWNLOAD_SUPPORT */ - list_for_each_safe(ptr, n, &local->hostap_interfaces) { - iface = list_entry(ptr, struct hostap_interface, list); - if (iface->type == HOSTAP_INTERFACE_MASTER) { - /* special handling for this interface below */ - continue; - } - hostap_remove_interface(iface->dev, 0, 1); - } - prism2_clear_set_tim_queue(local); list_for_each_safe(ptr, n, &local->bss_list) { @@ -3403,7 +3406,6 @@ static void prism2_free_local_data(struct net_device *dev) kfree(local->last_scan_results); kfree(local->generic_elem); - unregister_netdev(local->dev); free_netdev(local->dev); } diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c index e720369a3515..53f5246c40aa 100644 --- a/drivers/net/wireless/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/hostap/hostap_ioctl.c @@ -50,7 +50,8 @@ static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev) #endif /* in_atomic */ if (update && prism2_update_comms_qual(dev) == 0) - wstats->qual.updated = 7; + wstats->qual.updated = IW_QUAL_ALL_UPDATED | + IW_QUAL_DBM; wstats->qual.qual = local->comms_qual; wstats->qual.level = local->avg_signal; @@ -59,7 +60,7 @@ static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev) wstats->qual.qual = 0; wstats->qual.level = 0; wstats->qual.noise = 0; - wstats->qual.updated = 0; + wstats->qual.updated = IW_QUAL_ALL_INVALID; } return wstats; @@ -1827,13 +1828,6 @@ static char * __prism2_translate_scan(local_info_t *local, iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, bssid, ETH_ALEN); - /* FIX: - * I do not know how this is possible, but iwe_stream_add_event - * seems to re-order memcpy execution so that len is set only - * after copying.. Pre-setting len here "fixes" this, but real - * problems should be solved (after which these iwe.len - * settings could be removed from this function). */ - iwe.len = IW_EV_ADDR_LEN; current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_ADDR_LEN); @@ -1843,7 +1837,6 @@ static char * __prism2_translate_scan(local_info_t *local, iwe.cmd = SIOCGIWESSID; iwe.u.data.length = ssid_len; iwe.u.data.flags = 1; - iwe.len = IW_EV_POINT_LEN + iwe.u.data.length; current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, ssid); memset(&iwe, 0, sizeof(iwe)); @@ -1859,7 +1852,6 @@ static char * __prism2_translate_scan(local_info_t *local, iwe.u.mode = IW_MODE_MASTER; else iwe.u.mode = IW_MODE_ADHOC; - iwe.len = IW_EV_UINT_LEN; current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_UINT_LEN); } @@ -1877,7 +1869,6 @@ static char * __prism2_translate_scan(local_info_t *local, if (chan > 0) { iwe.u.freq.m = freq_list[le16_to_cpu(chan - 1)] * 100000; iwe.u.freq.e = 1; - iwe.len = IW_EV_FREQ_LEN; current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_FREQ_LEN); } @@ -1894,7 +1885,10 @@ static char * __prism2_translate_scan(local_info_t *local, iwe.u.qual.noise = HFA384X_LEVEL_TO_dBm(le16_to_cpu(scan->anl)); } - iwe.len = IW_EV_QUAL_LEN; + iwe.u.qual.updated = IW_QUAL_LEVEL_UPDATED + | IW_QUAL_NOISE_UPDATED + | IW_QUAL_QUAL_INVALID + | IW_QUAL_DBM; current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_QUAL_LEN); } @@ -1906,7 +1900,6 @@ static char * __prism2_translate_scan(local_info_t *local, else iwe.u.data.flags = IW_ENCODE_DISABLED; iwe.u.data.length = 0; - iwe.len = IW_EV_POINT_LEN + iwe.u.data.length; current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, ""); /* TODO: add SuppRates into BSS table */ @@ -1930,7 +1923,7 @@ static char * __prism2_translate_scan(local_info_t *local, } /* TODO: add BeaconInt,resp_rate,atim into BSS table */ - buf = kmalloc(MAX_WPA_IE_LEN * 2 + 30, GFP_KERNEL); + buf = kmalloc(MAX_WPA_IE_LEN * 2 + 30, GFP_ATOMIC); if (buf && scan) { memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVCUSTOM; diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c index 025f8cdb5566..da0c80fb941c 100644 --- a/drivers/net/wireless/hostap/hostap_pci.c +++ b/drivers/net/wireless/hostap/hostap_pci.c @@ -59,11 +59,13 @@ static struct pci_device_id prism2_pci_id_table[] __devinitdata = { static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v) { struct hostap_interface *iface; + struct hostap_pci_priv *hw_priv; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; + hw_priv = local->hw_priv; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v); @@ -74,12 +76,14 @@ static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v) static inline u8 hfa384x_inb_debug(struct net_device *dev, int a) { struct hostap_interface *iface; + struct hostap_pci_priv *hw_priv; local_info_t *local; unsigned long flags; u8 v; iface = netdev_priv(dev); local = iface->local; + hw_priv = local->hw_priv; spin_lock_irqsave(&local->lock, flags); v = readb(hw_priv->mem_start + a); @@ -91,11 +95,13 @@ static inline u8 hfa384x_inb_debug(struct net_device *dev, int a) static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v) { struct hostap_interface *iface; + struct hostap_pci_priv *hw_priv; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; + hw_priv = local->hw_priv; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v); @@ -106,12 +112,14 @@ static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v) static inline u16 hfa384x_inw_debug(struct net_device *dev, int a) { struct hostap_interface *iface; + struct hostap_pci_priv *hw_priv; local_info_t *local; unsigned long flags; u16 v; iface = netdev_priv(dev); local = iface->local; + hw_priv = local->hw_priv; spin_lock_irqsave(&local->lock, flags); v = readw(hw_priv->mem_start + a); @@ -277,8 +285,6 @@ static struct prism2_helper_functions prism2_pci_funcs = { .card_present = NULL, .cor_sreset = prism2_pci_cor_sreset, - .dev_open = NULL, - .dev_close = NULL, .genesis_reset = prism2_pci_genesis_reset, .hw_type = HOSTAP_HW_PCI, }; @@ -352,8 +358,6 @@ static int prism2_pci_probe(struct pci_dev *pdev, return hostap_hw_ready(dev); fail: - kfree(hw_priv); - if (irq_registered && dev) free_irq(dev->irq, dev); @@ -364,10 +368,8 @@ static int prism2_pci_probe(struct pci_dev *pdev, err_out_disable: pci_disable_device(pdev); - kfree(hw_priv); - if (local) - local->hw_priv = NULL; prism2_free_local_data(dev); + kfree(hw_priv); return -ENODEV; } @@ -392,9 +394,8 @@ static void prism2_pci_remove(struct pci_dev *pdev) free_irq(dev->irq, dev); mem_start = hw_priv->mem_start; - kfree(hw_priv); - iface->local->hw_priv = NULL; prism2_free_local_data(dev); + kfree(hw_priv); iounmap(mem_start); @@ -441,7 +442,7 @@ static int prism2_pci_resume(struct pci_dev *pdev) MODULE_DEVICE_TABLE(pci, prism2_pci_id_table); static struct pci_driver prism2_pci_drv_id = { - .name = "prism2_pci", + .name = "hostap_pci", .id_table = prism2_pci_id_table, .probe = prism2_pci_probe, .remove = prism2_pci_remove, diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c index 474ef83d813e..78d67b408b2f 100644 --- a/drivers/net/wireless/hostap/hostap_plx.c +++ b/drivers/net/wireless/hostap/hostap_plx.c @@ -328,8 +328,6 @@ static struct prism2_helper_functions prism2_plx_funcs = { .card_present = NULL, .cor_sreset = prism2_plx_cor_sreset, - .dev_open = NULL, - .dev_close = NULL, .genesis_reset = prism2_plx_genesis_reset, .hw_type = HOSTAP_HW_PLX, }; @@ -570,10 +568,8 @@ static int prism2_plx_probe(struct pci_dev *pdev, return hostap_hw_ready(dev); fail: - kfree(hw_priv); - if (local) - local->hw_priv = NULL; prism2_free_local_data(dev); + kfree(hw_priv); if (irq_registered && dev) free_irq(dev->irq, dev); @@ -606,9 +602,8 @@ static void prism2_plx_remove(struct pci_dev *pdev) if (dev->irq) free_irq(dev->irq, dev); - kfree(iface->local->hw_priv); - iface->local->hw_priv = NULL; prism2_free_local_data(dev); + kfree(hw_priv); pci_disable_device(pdev); } @@ -616,7 +611,7 @@ static void prism2_plx_remove(struct pci_dev *pdev) MODULE_DEVICE_TABLE(pci, prism2_plx_id_table); static struct pci_driver prism2_plx_drv_id = { - .name = "prism2_plx", + .name = "hostap_plx", .id_table = prism2_plx_id_table, .probe = prism2_plx_probe, .remove = prism2_plx_remove, diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h index cc061e1560d3..cfd801559492 100644 --- a/drivers/net/wireless/hostap/hostap_wlan.h +++ b/drivers/net/wireless/hostap/hostap_wlan.h @@ -552,8 +552,6 @@ struct prism2_helper_functions { * (hostap_{cs,plx,pci}.c */ int (*card_present)(local_info_t *local); void (*cor_sreset)(local_info_t *local); - int (*dev_open)(local_info_t *local); - int (*dev_close)(local_info_t *local); void (*genesis_reset)(local_info_t *local, int hcr); /* the following functions are from hostap_hw.c, but they may have some diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index 2414e6493aa5..ad7f8cd76db9 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c @@ -800,8 +800,7 @@ static int ipw2100_hw_send_command(struct ipw2100_priv *priv, * doesn't seem to have as many firmware restart cycles... * * As a test, we're sticking in a 1/100s delay here */ - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HZ / 100); + schedule_timeout_uninterruptible(msecs_to_jiffies(10)); return 0; @@ -1256,8 +1255,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv) IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n"); i = 5000; do { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(40 * HZ / 1000); + schedule_timeout_uninterruptible(msecs_to_jiffies(40)); /* Todo... wait for sync command ... */ read_register(priv->net_dev, IPW_REG_INTA, &inta); @@ -1411,8 +1409,7 @@ static int ipw2100_hw_phy_off(struct ipw2100_priv *priv) (val2 & IPW2100_COMMAND_PHY_OFF)) return 0; - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HW_PHY_OFF_LOOP_DELAY); + schedule_timeout_uninterruptible(HW_PHY_OFF_LOOP_DELAY); } return -EIO; @@ -1466,7 +1463,7 @@ fail_up: static int ipw2100_hw_stop_adapter(struct ipw2100_priv *priv) { -#define HW_POWER_DOWN_DELAY (HZ / 10) +#define HW_POWER_DOWN_DELAY (msecs_to_jiffies(100)) struct host_command cmd = { .host_command = HOST_PRE_POWER_DOWN, @@ -1520,10 +1517,8 @@ static int ipw2100_hw_stop_adapter(struct ipw2100_priv *priv) printk(KERN_WARNING DRV_NAME ": " "%s: Power down command failed: Error %d\n", priv->net_dev->name, err); - else { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(HW_POWER_DOWN_DELAY); - } + else + schedule_timeout_uninterruptible(HW_POWER_DOWN_DELAY); } priv->status &= ~STATUS_ENABLED; @@ -2953,7 +2948,7 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv) int next = txq->next; int i = 0; struct ipw2100_data_header *ipw_hdr; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_3addr *hdr; while (!list_empty(&priv->tx_pend_list)) { /* if there isn't enough space in TBD queue, then @@ -2989,7 +2984,7 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv) packet->index = txq->next; ipw_hdr = packet->info.d_struct.data; - hdr = (struct ieee80211_hdr *)packet->info.d_struct.txb-> + hdr = (struct ieee80211_hdr_3addr *)packet->info.d_struct.txb-> fragments[0]->data; if (priv->ieee->iw_mode == IW_MODE_INFRA) { @@ -3274,7 +3269,8 @@ static irqreturn_t ipw2100_interrupt(int irq, void *data, return IRQ_NONE; } -static int ipw2100_tx(struct ieee80211_txb *txb, struct net_device *dev) +static int ipw2100_tx(struct ieee80211_txb *txb, struct net_device *dev, + int pri) { struct ipw2100_priv *priv = ieee80211_priv(dev); struct list_head *element; diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2100.h index 2a3cdbd50168..c9e99ce15d66 100644 --- a/drivers/net/wireless/ipw2100.h +++ b/drivers/net/wireless/ipw2100.h @@ -808,7 +808,7 @@ struct ipw2100_priv { struct ipw2100_rx { union { unsigned char payload[IPW_RX_NIC_BUFFER_LENGTH]; - struct ieee80211_hdr header; + struct ieee80211_hdr_4addr header; u32 status; struct ipw2100_notification notification; struct ipw2100_cmd_header command; diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index b7f275c00de3..de4e6c23e4b8 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c @@ -4904,7 +4904,7 @@ static void ipw_rx(struct ipw_priv *priv) { struct ipw_rx_mem_buffer *rxb; struct ipw_rx_packet *pkt; - struct ieee80211_hdr *header; + struct ieee80211_hdr_4addr *header; u32 r, w, i; u8 network_packet; @@ -4967,8 +4967,9 @@ static void ipw_rx(struct ipw_priv *priv) #endif header = - (struct ieee80211_hdr *)(rxb->skb->data + - IPW_RX_FRAME_SIZE); + (struct ieee80211_hdr_4addr *)(rxb->skb-> + data + + IPW_RX_FRAME_SIZE); /* TODO: Check Ad-Hoc dest/source and make sure * that we are actually parsing these packets * correctly -- we should probably use the @@ -5317,8 +5318,6 @@ static int ipw_wx_set_freq(struct net_device *dev, IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m); return ipw_set_channel(priv, (u8) fwrq->m); - - return 0; } static int ipw_wx_get_freq(struct net_device *dev, @@ -6010,12 +6009,12 @@ static int ipw_wx_set_wireless_mode(struct net_device *dev, } if (priv->adapter == IPW_2915ABG) { - priv->ieee->abg_ture = 1; + priv->ieee->abg_true = 1; if (mode & IEEE_A) { band |= IEEE80211_52GHZ_BAND; modulation |= IEEE80211_OFDM_MODULATION; } else - priv->ieee->abg_ture = 0; + priv->ieee->abg_true = 0; } else { if (mode & IEEE_A) { IPW_WARNING("Attempt to set 2200BG into " @@ -6023,20 +6022,20 @@ static int ipw_wx_set_wireless_mode(struct net_device *dev, return -EINVAL; } - priv->ieee->abg_ture = 0; + priv->ieee->abg_true = 0; } if (mode & IEEE_B) { band |= IEEE80211_24GHZ_BAND; modulation |= IEEE80211_CCK_MODULATION; } else - priv->ieee->abg_ture = 0; + priv->ieee->abg_true = 0; if (mode & IEEE_G) { band |= IEEE80211_24GHZ_BAND; modulation |= IEEE80211_OFDM_MODULATION; } else - priv->ieee->abg_ture = 0; + priv->ieee->abg_true = 0; priv->ieee->mode = mode; priv->ieee->freq_band = band; @@ -6325,7 +6324,7 @@ we need to heavily modify the ieee80211_skb_to_txb. static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) + struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *) txb->fragments[0]->data; int i = 0; struct tfd_frame *tfd; @@ -6448,7 +6447,7 @@ static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb) } static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb, - struct net_device *dev) + struct net_device *dev, int pri) { struct ipw_priv *priv = ieee80211_priv(dev); unsigned long flags; @@ -7108,7 +7107,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) printk(KERN_INFO DRV_NAME ": Detected Intel PRO/Wireless 2915ABG Network " "Connection\n"); - priv->ieee->abg_ture = 1; + priv->ieee->abg_true = 1; band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND; modulation = IEEE80211_OFDM_MODULATION | IEEE80211_CCK_MODULATION; @@ -7124,7 +7123,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ": Detected Intel PRO/Wireless 2200BG Network " "Connection\n"); - priv->ieee->abg_ture = 0; + priv->ieee->abg_true = 0; band = IEEE80211_24GHZ_BAND; modulation = IEEE80211_OFDM_MODULATION | IEEE80211_CCK_MODULATION; diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h index 5b00882133f9..e9cf32bf3e31 100644 --- a/drivers/net/wireless/ipw2200.h +++ b/drivers/net/wireless/ipw2200.h @@ -1654,12 +1654,12 @@ static const long ipw_frequencies[] = { #define IPW_MAX_CONFIG_RETRIES 10 -static inline u32 frame_hdr_len(struct ieee80211_hdr *hdr) +static inline u32 frame_hdr_len(struct ieee80211_hdr_4addr *hdr) { u32 retval; u16 fc; - retval = sizeof(struct ieee80211_hdr); + retval = sizeof(struct ieee80211_hdr_3addr); fc = le16_to_cpu(hdr->frame_ctl); /* diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c index ca6c03c89926..92793b958e32 100644 --- a/drivers/net/wireless/netwave_cs.c +++ b/drivers/net/wireless/netwave_cs.c @@ -57,9 +57,7 @@ #include #ifdef CONFIG_NET_RADIO #include -#if WIRELESS_EXT > 12 #include -#endif /* WIRELESS_EXT > 12 */ #endif #include @@ -225,10 +223,7 @@ static void update_stats(struct net_device *dev); static struct net_device_stats *netwave_get_stats(struct net_device *dev); /* Wireless extensions */ -#ifdef WIRELESS_EXT static struct iw_statistics* netwave_get_wireless_stats(struct net_device *dev); -#endif -static int netwave_ioctl(struct net_device *, struct ifreq *, int); static void set_multicast_list(struct net_device *dev); @@ -260,26 +255,7 @@ static dev_link_t *dev_list; because they generally can't be allocated dynamically. */ -#if WIRELESS_EXT <= 12 -/* Wireless extensions backward compatibility */ - -/* Part of iw_handler prototype we need */ -struct iw_request_info -{ - __u16 cmd; /* Wireless Extension command */ - __u16 flags; /* More to come ;-) */ -}; - -/* Wireless Extension Backward compatibility - Jean II - * If the new wireless device private ioctl range is not defined, - * default to standard device private ioctl range */ -#ifndef SIOCIWFIRSTPRIV -#define SIOCIWFIRSTPRIV SIOCDEVPRIVATE -#endif /* SIOCIWFIRSTPRIV */ - -#else /* WIRELESS_EXT <= 12 */ static const struct iw_handler_def netwave_handler_def; -#endif /* WIRELESS_EXT <= 12 */ #define SIOCGIPSNAP SIOCIWFIRSTPRIV + 1 /* Site Survey Snapshot */ @@ -319,9 +295,7 @@ typedef struct netwave_private { struct timer_list watchdog; /* To avoid blocking state */ struct site_survey nss; struct net_device_stats stats; -#ifdef WIRELESS_EXT struct iw_statistics iw_stats; /* Wireless stats */ -#endif } netwave_private; #ifdef NETWAVE_STATS @@ -353,7 +327,6 @@ static inline void wait_WOC(unsigned int iobase) while ((inb(iobase + NETWAVE_REG_ASR) & 0x8) != 0x8) ; } -#ifdef WIRELESS_EXT static void netwave_snapshot(netwave_private *priv, u_char __iomem *ramBase, kio_addr_t iobase) { u_short resultBuffer; @@ -376,9 +349,7 @@ static void netwave_snapshot(netwave_private *priv, u_char __iomem *ramBase, sizeof(struct site_survey)); } } -#endif -#ifdef WIRELESS_EXT /* * Function netwave_get_wireless_stats (dev) * @@ -411,7 +382,6 @@ static struct iw_statistics *netwave_get_wireless_stats(struct net_device *dev) return &priv->iw_stats; } -#endif /* * Function netwave_attach (void) @@ -471,13 +441,7 @@ static dev_link_t *netwave_attach(void) dev->get_stats = &netwave_get_stats; dev->set_multicast_list = &set_multicast_list; /* wireless extensions */ -#if WIRELESS_EXT <= 16 - dev->get_wireless_stats = &netwave_get_wireless_stats; -#endif /* WIRELESS_EXT <= 16 */ -#if WIRELESS_EXT > 12 dev->wireless_handlers = (struct iw_handler_def *)&netwave_handler_def; -#endif /* WIRELESS_EXT > 12 */ - dev->do_ioctl = &netwave_ioctl; dev->tx_timeout = &netwave_watchdog; dev->watchdog_timeo = TX_TIMEOUT; @@ -576,13 +540,8 @@ static int netwave_set_nwid(struct net_device *dev, /* Disable interrupts & save flags */ spin_lock_irqsave(&priv->spinlock, flags); -#if WIRELESS_EXT > 8 if(!wrqu->nwid.disabled) { domain = wrqu->nwid.value; -#else /* WIRELESS_EXT > 8 */ - if(wrqu->nwid.on) { - domain = wrqu->nwid.nwid; -#endif /* WIRELESS_EXT > 8 */ printk( KERN_DEBUG "Setting domain to 0x%x%02x\n", (domain >> 8) & 0x01, domain & 0xff); wait_WOC(iobase); @@ -606,15 +565,9 @@ static int netwave_get_nwid(struct net_device *dev, union iwreq_data *wrqu, char *extra) { -#if WIRELESS_EXT > 8 wrqu->nwid.value = domain; wrqu->nwid.disabled = 0; wrqu->nwid.fixed = 1; -#else /* WIRELESS_EXT > 8 */ - wrqu->nwid.nwid = domain; - wrqu->nwid.on = 1; -#endif /* WIRELESS_EXT > 8 */ - return 0; } @@ -657,17 +610,11 @@ static int netwave_get_scramble(struct net_device *dev, { key[1] = scramble_key & 0xff; key[0] = (scramble_key>>8) & 0xff; -#if WIRELESS_EXT > 8 wrqu->encoding.flags = IW_ENCODE_ENABLED; wrqu->encoding.length = 2; -#else /* WIRELESS_EXT > 8 */ - wrqu->encoding.method = 1; -#endif /* WIRELESS_EXT > 8 */ - return 0; } -#if WIRELESS_EXT > 8 /* * Wireless Handler : get mode */ @@ -683,7 +630,6 @@ static int netwave_get_mode(struct net_device *dev, return 0; } -#endif /* WIRELESS_EXT > 8 */ /* * Wireless Handler : get range info @@ -702,11 +648,9 @@ static int netwave_get_range(struct net_device *dev, /* Set all the info we don't care or don't know about to zero */ memset(range, 0, sizeof(struct iw_range)); -#if WIRELESS_EXT > 10 /* Set the Wireless Extension versions */ range->we_version_compiled = WIRELESS_EXT; range->we_version_source = 9; /* Nothing for us in v10 and v11 */ -#endif /* WIRELESS_EXT > 10 */ /* Set information in the range struct */ range->throughput = 450 * 1000; /* don't argue on this ! */ @@ -720,16 +664,12 @@ static int netwave_get_range(struct net_device *dev, range->max_qual.level = 255; range->max_qual.noise = 0; -#if WIRELESS_EXT > 7 range->num_bitrates = 1; range->bitrate[0] = 1000000; /* 1 Mb/s */ -#endif /* WIRELESS_EXT > 7 */ -#if WIRELESS_EXT > 8 range->encoding_size[0] = 2; /* 16 bits scrambling */ range->num_encoding_sizes = 1; range->max_encoding_tokens = 1; /* Only one key possible */ -#endif /* WIRELESS_EXT > 8 */ return ret; } @@ -775,8 +715,6 @@ static const struct iw_priv_args netwave_private_args[] = { "getsitesurvey" }, }; -#if WIRELESS_EXT > 12 - static const iw_handler netwave_handler[] = { NULL, /* SIOCSIWNAME */ @@ -839,131 +777,8 @@ static const struct iw_handler_def netwave_handler_def = .standard = (iw_handler *) netwave_handler, .private = (iw_handler *) netwave_private_handler, .private_args = (struct iw_priv_args *) netwave_private_args, -#if WIRELESS_EXT > 16 .get_wireless_stats = netwave_get_wireless_stats, -#endif /* WIRELESS_EXT > 16 */ }; -#endif /* WIRELESS_EXT > 12 */ - -/* - * Function netwave_ioctl (dev, rq, cmd) - * - * Perform ioctl : config & info stuff - * This is the stuff that are treated the wireless extensions (iwconfig) - * - */ -static int netwave_ioctl(struct net_device *dev, /* ioctl device */ - struct ifreq *rq, /* Data passed */ - int cmd) /* Ioctl number */ -{ - int ret = 0; -#ifdef WIRELESS_EXT -#if WIRELESS_EXT <= 12 - struct iwreq *wrq = (struct iwreq *) rq; -#endif -#endif - - DEBUG(0, "%s: ->netwave_ioctl(cmd=0x%X)\n", dev->name, cmd); - - /* Look what is the request */ - switch(cmd) { - /* --------------- WIRELESS EXTENSIONS --------------- */ -#ifdef WIRELESS_EXT -#if WIRELESS_EXT <= 12 - case SIOCGIWNAME: - netwave_get_name(dev, NULL, &(wrq->u), NULL); - break; - case SIOCSIWNWID: - ret = netwave_set_nwid(dev, NULL, &(wrq->u), NULL); - break; - case SIOCGIWNWID: - ret = netwave_get_nwid(dev, NULL, &(wrq->u), NULL); - break; -#if WIRELESS_EXT > 8 /* Note : The API did change... */ - case SIOCGIWENCODE: - /* Get scramble key */ - if(wrq->u.encoding.pointer != (caddr_t) 0) - { - char key[2]; - ret = netwave_get_scramble(dev, NULL, &(wrq->u), key); - if(copy_to_user(wrq->u.encoding.pointer, key, 2)) - ret = -EFAULT; - } - break; - case SIOCSIWENCODE: - /* Set scramble key */ - if(wrq->u.encoding.pointer != (caddr_t) 0) - { - char key[2]; - if(copy_from_user(key, wrq->u.encoding.pointer, 2)) - { - ret = -EFAULT; - break; - } - ret = netwave_set_scramble(dev, NULL, &(wrq->u), key); - } - break; - case SIOCGIWMODE: - /* Mode of operation */ - ret = netwave_get_mode(dev, NULL, &(wrq->u), NULL); - break; -#else /* WIRELESS_EXT > 8 */ - case SIOCGIWENCODE: - /* Get scramble key */ - ret = netwave_get_scramble(dev, NULL, &(wrq->u), - (char *) &wrq->u.encoding.code); - break; - case SIOCSIWENCODE: - /* Set scramble key */ - ret = netwave_set_scramble(dev, NULL, &(wrq->u), - (char *) &wrq->u.encoding.code); - break; -#endif /* WIRELESS_EXT > 8 */ - case SIOCGIWRANGE: - /* Basic checking... */ - if(wrq->u.data.pointer != (caddr_t) 0) { - struct iw_range range; - ret = netwave_get_range(dev, NULL, &(wrq->u), (char *) &range); - if (copy_to_user(wrq->u.data.pointer, &range, - sizeof(struct iw_range))) - ret = -EFAULT; - } - break; - case SIOCGIWPRIV: - /* Basic checking... */ - if(wrq->u.data.pointer != (caddr_t) 0) { - /* Set the number of ioctl available */ - wrq->u.data.length = sizeof(netwave_private_args) / sizeof(netwave_private_args[0]); - - /* Copy structure to the user buffer */ - if(copy_to_user(wrq->u.data.pointer, - (u_char *) netwave_private_args, - sizeof(netwave_private_args))) - ret = -EFAULT; - } - break; - case SIOCGIPSNAP: - if(wrq->u.data.pointer != (caddr_t) 0) { - char buffer[sizeof( struct site_survey)]; - ret = netwave_get_snap(dev, NULL, &(wrq->u), buffer); - /* Copy structure to the user buffer */ - if(copy_to_user(wrq->u.data.pointer, - buffer, - sizeof( struct site_survey))) - { - printk(KERN_DEBUG "Bad buffer!\n"); - break; - } - } - break; -#endif /* WIRELESS_EXT <= 12 */ -#endif /* WIRELESS_EXT */ - default: - ret = -EOPNOTSUPP; - } - - return ret; -} /* * Function netwave_pcmcia_config (link) diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index 8de49fe57233..d3d4ec9e242e 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c @@ -77,30 +77,16 @@ #define DRIVER_NAME "orinoco" #include - #include #include #include -#include -#include -#include -#include -#include #include -#include #include #include #include #include #include -#include - -#include -#include -#include - -#include "hermes.h" #include "hermes_rid.h" #include "orinoco.h" @@ -137,7 +123,7 @@ MODULE_PARM_DESC(force_monitor, "Allow monitor mode for all firmware versions"); /* We do this this way to avoid ifdefs in the actual code */ #ifdef WIRELESS_SPY -#define SPY_NUMBER(priv) (priv->spy_number) +#define SPY_NUMBER(priv) (priv->spy_data.spy_number) #else #define SPY_NUMBER(priv) 0 #endif /* WIRELESS_SPY */ @@ -216,31 +202,32 @@ static struct { /********************************************************************/ /* Used in Event handling. - * We avoid nested structres as they break on ARM -- Moustafa */ + * We avoid nested structures as they break on ARM -- Moustafa */ struct hermes_tx_descriptor_802_11 { /* hermes_tx_descriptor */ - u16 status; - u16 reserved1; - u16 reserved2; - u32 sw_support; + __le16 status; + __le16 reserved1; + __le16 reserved2; + __le32 sw_support; u8 retry_count; u8 tx_rate; - u16 tx_control; + __le16 tx_control; - /* ieee802_11_hdr */ - u16 frame_ctl; - u16 duration_id; + /* ieee80211_hdr */ + __le16 frame_ctl; + __le16 duration_id; u8 addr1[ETH_ALEN]; u8 addr2[ETH_ALEN]; u8 addr3[ETH_ALEN]; - u16 seq_ctl; + __le16 seq_ctl; u8 addr4[ETH_ALEN]; - u16 data_len; + + __le16 data_len; /* ethhdr */ - unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ - unsigned char h_source[ETH_ALEN]; /* source ether addr */ - unsigned short h_proto; /* packet type ID field */ + u8 h_dest[ETH_ALEN]; /* destination eth addr */ + u8 h_source[ETH_ALEN]; /* source ether addr */ + __be16 h_proto; /* packet type ID field */ /* p8022_hdr */ u8 dsap; @@ -248,31 +235,31 @@ struct hermes_tx_descriptor_802_11 { u8 ctrl; u8 oui[3]; - u16 ethertype; + __be16 ethertype; } __attribute__ ((packed)); /* Rx frame header except compatibility 802.3 header */ struct hermes_rx_descriptor { /* Control */ - u16 status; - u32 time; + __le16 status; + __le32 time; u8 silence; u8 signal; u8 rate; u8 rxflow; - u32 reserved; + __le32 reserved; /* 802.11 header */ - u16 frame_ctl; - u16 duration_id; + __le16 frame_ctl; + __le16 duration_id; u8 addr1[ETH_ALEN]; u8 addr2[ETH_ALEN]; u8 addr3[ETH_ALEN]; - u16 seq_ctl; + __le16 seq_ctl; u8 addr4[ETH_ALEN]; /* Data length */ - u16 data_len; + __le16 data_len; } __attribute__ ((packed)); /********************************************************************/ @@ -396,14 +383,14 @@ static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev) /* If a spy address is defined, we report stats of the * first spy address - Jean II */ if (SPY_NUMBER(priv)) { - wstats->qual.qual = priv->spy_stat[0].qual; - wstats->qual.level = priv->spy_stat[0].level; - wstats->qual.noise = priv->spy_stat[0].noise; - wstats->qual.updated = priv->spy_stat[0].updated; + wstats->qual.qual = priv->spy_data.spy_stat[0].qual; + wstats->qual.level = priv->spy_data.spy_stat[0].level; + wstats->qual.noise = priv->spy_data.spy_stat[0].noise; + wstats->qual.updated = priv->spy_data.spy_stat[0].updated; } } else { struct { - u16 qual, signal, noise; + __le16 qual, signal, noise; } __attribute__ ((packed)) cq; err = HERMES_READ_RECORD(hw, USER_BAP, @@ -503,9 +490,12 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } - /* Length of the packet body */ - /* FIXME: what if the skb is smaller than this? */ - len = max_t(int,skb->len - ETH_HLEN, ETH_ZLEN - ETH_HLEN); + /* Check packet length, pad short packets, round up odd length */ + len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN); + skb = skb_padto(skb, len); + if (skb == NULL) + goto fail; + len -= ETH_HLEN; eh = (struct ethhdr *)skb->data; @@ -557,8 +547,7 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev) p = skb->data; } - /* Round up for odd length packets */ - err = hermes_bap_pwrite(hw, USER_BAP, p, ALIGN(data_len, 2), + err = hermes_bap_pwrite(hw, USER_BAP, p, data_len, txfid, data_off); if (err) { printk(KERN_ERR "%s: Error %d writing packet to BAP\n", @@ -574,8 +563,9 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev) txfid, NULL); if (err) { netif_start_queue(dev); - printk(KERN_ERR "%s: Error %d transmitting packet\n", - dev->name, err); + if (net_ratelimit()) + printk(KERN_ERR "%s: Error %d transmitting packet\n", + dev->name, err); stats->tx_errors++; goto fail; } @@ -629,16 +619,17 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw) struct orinoco_private *priv = netdev_priv(dev); struct net_device_stats *stats = &priv->stats; u16 fid = hermes_read_regn(hw, TXCOMPLFID); + u16 status; struct hermes_tx_descriptor_802_11 hdr; int err = 0; if (fid == DUMMY_FID) return; /* Nothing's really happened */ - /* Read the frame header */ + /* Read part of the frame header - we need status and addr1 */ err = hermes_bap_pread(hw, IRQ_BAP, &hdr, - sizeof(struct hermes_tx_descriptor) + - sizeof(struct ieee80211_hdr), + offsetof(struct hermes_tx_descriptor_802_11, + addr2), fid, 0); hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID); @@ -658,8 +649,8 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw) * exceeded, because that's the only status that really mean * that this particular node went away. * Other errors means that *we* screwed up. - Jean II */ - hdr.status = le16_to_cpu(hdr.status); - if (hdr.status & (HERMES_TXSTAT_RETRYERR | HERMES_TXSTAT_AGEDERR)) { + status = le16_to_cpu(hdr.status); + if (status & (HERMES_TXSTAT_RETRYERR | HERMES_TXSTAT_AGEDERR)) { union iwreq_data wrqu; /* Copy 802.11 dest address. @@ -718,18 +709,13 @@ static inline int is_ethersnap(void *_hdr) static inline void orinoco_spy_gather(struct net_device *dev, u_char *mac, int level, int noise) { - struct orinoco_private *priv = netdev_priv(dev); - int i; - - /* Gather wireless spy statistics: for each packet, compare the - * source address with out list, and if match, get the stats... */ - for (i = 0; i < priv->spy_number; i++) - if (!memcmp(mac, priv->spy_address[i], ETH_ALEN)) { - priv->spy_stat[i].level = level - 0x95; - priv->spy_stat[i].noise = noise - 0x95; - priv->spy_stat[i].qual = (level > noise) ? (level - noise) : 0; - priv->spy_stat[i].updated = 7; - } + struct iw_quality wstats; + wstats.level = level - 0x95; + wstats.noise = noise - 0x95; + wstats.qual = (level > noise) ? (level - noise) : 0; + wstats.updated = 7; + /* Update spy records */ + wireless_spy_update(dev, mac, &wstats); } static void orinoco_stat_gather(struct net_device *dev, @@ -1050,7 +1036,7 @@ static void orinoco_join_ap(struct net_device *dev) unsigned long flags; struct join_req { u8 bssid[ETH_ALEN]; - u16 channel; + __le16 channel; } __attribute__ ((packed)) req; const int atom_len = offsetof(struct prism2_scan_apinfo, atim); struct prism2_scan_apinfo *atom = NULL; @@ -1065,7 +1051,7 @@ static void orinoco_join_ap(struct net_device *dev) return; if (orinoco_lock(priv, &flags) != 0) - goto out; + goto fail_lock; /* Sanity checks in case user changed something in the meantime */ if (! priv->bssid_fixed) @@ -1110,8 +1096,10 @@ static void orinoco_join_ap(struct net_device *dev) printk(KERN_ERR "%s: Error issuing join request\n", dev->name); out: - kfree(buf); orinoco_unlock(priv, &flags); + + fail_lock: + kfree(buf); } /* Send new BSSID to userspace */ @@ -1129,12 +1117,14 @@ static void orinoco_send_wevents(struct net_device *dev) err = hermes_read_ltv(hw, IRQ_BAP, HERMES_RID_CURRENTBSSID, ETH_ALEN, NULL, wrqu.ap_addr.sa_data); if (err != 0) - return; + goto out; wrqu.ap_addr.sa_family = ARPHRD_ETHER; /* Send event to user space */ wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); + + out: orinoco_unlock(priv, &flags); } @@ -1143,8 +1133,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw) struct orinoco_private *priv = netdev_priv(dev); u16 infofid; struct { - u16 len; - u16 type; + __le16 len; + __le16 type; } __attribute__ ((packed)) info; int len, type; int err; @@ -2458,8 +2448,11 @@ struct net_device *alloc_orinocodev(int sizeof_card, dev->watchdog_timeo = HZ; /* 1 second timeout */ dev->get_stats = orinoco_get_stats; dev->ethtool_ops = &orinoco_ethtool_ops; - dev->get_wireless_stats = orinoco_get_wireless_stats; dev->wireless_handlers = (struct iw_handler_def *)&orinoco_handler_def; +#ifdef WIRELESS_SPY + priv->wireless_data.spy_data = &priv->spy_data; + dev->wireless_data = &priv->wireless_data; +#endif dev->change_mtu = orinoco_change_mtu; dev->set_multicast_list = orinoco_set_multicast_list; /* we use the default eth_mac_addr for setting the MAC addr */ @@ -2831,7 +2824,7 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev, } } - if ((priv->iw_mode == IW_MODE_ADHOC) && (priv->spy_number == 0)){ + if ((priv->iw_mode == IW_MODE_ADHOC) && (!SPY_NUMBER(priv))){ /* Quality stats meaningless in ad-hoc mode */ } else { range->max_qual.qual = 0x8b - 0x2f; @@ -2878,6 +2871,14 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev, range->min_r_time = 0; range->max_r_time = 65535 * 1000; /* ??? */ + /* Event capability (kernel) */ + IW_EVENT_CAPA_SET_KERNEL(range->event_capa); + /* Event capability (driver) */ + IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWTHRSPY); + IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); + IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); + IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP); + TRACE_EXIT(dev->name); return 0; @@ -3837,92 +3838,6 @@ static int orinoco_ioctl_getrid(struct net_device *dev, return err; } -/* Spy is used for link quality/strength measurements in Ad-Hoc mode - * Jean II */ -static int orinoco_ioctl_setspy(struct net_device *dev, - struct iw_request_info *info, - struct iw_point *srq, - char *extra) - -{ - struct orinoco_private *priv = netdev_priv(dev); - struct sockaddr *address = (struct sockaddr *) extra; - int number = srq->length; - int i; - unsigned long flags; - - /* Make sure nobody mess with the structure while we do */ - if (orinoco_lock(priv, &flags) != 0) - return -EBUSY; - - /* orinoco_lock() doesn't disable interrupts, so make sure the - * interrupt rx path don't get confused while we copy */ - priv->spy_number = 0; - - if (number > 0) { - /* Extract the addresses */ - for (i = 0; i < number; i++) - memcpy(priv->spy_address[i], address[i].sa_data, - ETH_ALEN); - /* Reset stats */ - memset(priv->spy_stat, 0, - sizeof(struct iw_quality) * IW_MAX_SPY); - /* Set number of addresses */ - priv->spy_number = number; - } - - /* Now, let the others play */ - orinoco_unlock(priv, &flags); - - /* Do NOT call commit handler */ - return 0; -} - -static int orinoco_ioctl_getspy(struct net_device *dev, - struct iw_request_info *info, - struct iw_point *srq, - char *extra) -{ - struct orinoco_private *priv = netdev_priv(dev); - struct sockaddr *address = (struct sockaddr *) extra; - int number; - int i; - unsigned long flags; - - if (orinoco_lock(priv, &flags) != 0) - return -EBUSY; - - number = priv->spy_number; - /* Create address struct */ - for (i = 0; i < number; i++) { - memcpy(address[i].sa_data, priv->spy_address[i], ETH_ALEN); - address[i].sa_family = AF_UNIX; - } - if (number > 0) { - /* Create address struct */ - for (i = 0; i < number; i++) { - memcpy(address[i].sa_data, priv->spy_address[i], - ETH_ALEN); - address[i].sa_family = AF_UNIX; - } - /* Copy stats */ - /* In theory, we should disable irqs while copying the stats - * because the rx path might update it in the middle... - * Bah, who care ? - Jean II */ - memcpy(extra + (sizeof(struct sockaddr) * number), - priv->spy_stat, sizeof(struct iw_quality) * number); - } - /* Reset updated flags. */ - for (i = 0; i < number; i++) - priv->spy_stat[i].updated = 0; - - orinoco_unlock(priv, &flags); - - srq->length = number; - - return 0; -} - /* Trigger a scan (look for other cells in the vicinity */ static int orinoco_ioctl_setscan(struct net_device *dev, struct iw_request_info *info, @@ -3995,7 +3910,7 @@ static int orinoco_ioctl_setscan(struct net_device *dev, HERMES_HOSTSCAN_SYMBOL_BCAST); break; case FIRMWARE_TYPE_INTERSIL: { - u16 req[3]; + __le16 req[3]; req[0] = cpu_to_le16(0x3fff); /* All channels */ req[1] = cpu_to_le16(0x0001); /* rate 1 Mbps */ @@ -4069,7 +3984,7 @@ static inline int orinoco_translate_scan(struct net_device *dev, case FIRMWARE_TYPE_INTERSIL: offset = 4; if (priv->has_hostscan) { - atom_len = le16_to_cpup((u16 *)scan); + atom_len = le16_to_cpup((__le16 *)scan); /* Sanity check for atom_len */ if (atom_len < sizeof(struct prism2_scan_apinfo)) { printk(KERN_ERR "%s: Invalid atom_len in scan data: %d\n", @@ -4353,8 +4268,10 @@ static const iw_handler orinoco_handler[] = { [SIOCSIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setsens, [SIOCGIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getsens, [SIOCGIWRANGE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwrange, - [SIOCSIWSPY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setspy, - [SIOCGIWSPY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getspy, + [SIOCSIWSPY -SIOCIWFIRST] = (iw_handler) iw_handler_set_spy, + [SIOCGIWSPY -SIOCIWFIRST] = (iw_handler) iw_handler_get_spy, + [SIOCSIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_set_thrspy, + [SIOCGIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_get_thrspy, [SIOCSIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setwap, [SIOCGIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getwap, [SIOCSIWSCAN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setscan, @@ -4399,6 +4316,7 @@ static const struct iw_handler_def orinoco_handler_def = { .standard = orinoco_handler, .private = orinoco_private_handler, .private_args = orinoco_privtab, + .get_wireless_stats = orinoco_get_wireless_stats, }; static void orinoco_get_drvinfo(struct net_device *dev, diff --git a/drivers/net/wireless/orinoco.h b/drivers/net/wireless/orinoco.h index 2f213a7103fe..7a17bb31fc89 100644 --- a/drivers/net/wireless/orinoco.h +++ b/drivers/net/wireless/orinoco.h @@ -7,12 +7,11 @@ #ifndef _ORINOCO_H #define _ORINOCO_H -#define DRIVER_VERSION "0.15rc2" +#define DRIVER_VERSION "0.15rc3" -#include -#include #include #include +#include #include #include "hermes.h" @@ -28,7 +27,7 @@ #define ORINOCO_MAX_KEYS 4 struct orinoco_key { - u16 len; /* always stored as little-endian */ + __le16 len; /* always stored as little-endian */ char data[ORINOCO_MAX_KEY_SIZE]; } __attribute__ ((packed)); @@ -36,14 +35,14 @@ struct header_struct { /* 802.3 */ u8 dest[ETH_ALEN]; u8 src[ETH_ALEN]; - u16 len; + __be16 len; /* 802.2 */ u8 dsap; u8 ssap; u8 ctrl; /* SNAP */ u8 oui[3]; - u16 ethertype; + unsigned short ethertype; } __attribute__ ((packed)); typedef enum { @@ -112,9 +111,8 @@ struct orinoco_private { u16 pm_on, pm_mcast, pm_period, pm_timeout; u16 preamble; #ifdef WIRELESS_SPY - int spy_number; - u_char spy_address[IW_MAX_SPY][ETH_ALEN]; - struct iw_quality spy_stat[IW_MAX_SPY]; + struct iw_spy_data spy_data; /* iwspy support */ + struct iw_public_data wireless_data; #endif /* Configuration dependent variables */ diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c index bedd7f9f23e4..dc1128a00971 100644 --- a/drivers/net/wireless/orinoco_cs.c +++ b/drivers/net/wireless/orinoco_cs.c @@ -14,33 +14,16 @@ #define PFX DRIVER_NAME ": " #include -#ifdef __IN_PCMCIA_PACKAGE__ -#include -#endif /* __IN_PCMCIA_PACKAGE__ */ - #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include - +#include #include #include #include #include #include -#include -#include -#include - #include "orinoco.h" /********************************************************************/ @@ -97,17 +80,8 @@ static dev_link_t *dev_list; /* = NULL */ /* Function prototypes */ /********************************************************************/ -/* device methods */ -static int orinoco_cs_hard_reset(struct orinoco_private *priv); - -/* PCMCIA gumpf */ -static void orinoco_cs_config(dev_link_t * link); -static void orinoco_cs_release(dev_link_t * link); -static int orinoco_cs_event(event_t event, int priority, - event_callback_args_t * args); - -static dev_link_t *orinoco_cs_attach(void); -static void orinoco_cs_detach(dev_link_t *); +static void orinoco_cs_release(dev_link_t *link); +static void orinoco_cs_detach(dev_link_t *link); /********************************************************************/ /* Device methods */ @@ -603,49 +577,85 @@ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION "Pavel Roskin , et al)"; static struct pcmcia_device_id orinoco_cs_ids[] = { - PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), - PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), - PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), - PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), - PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), - PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), - PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), - PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), - PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), - PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), - PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), - PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), - PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), - PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), - PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), - PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), - PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), - PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), + PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */ + PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), /* Sohoware NCP110, Philips 802.11b */ + PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0002), /* AnyPoint(TM) Wireless II PC Card */ + PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */ + PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), /* PROXIM RangeLAN-DS/LAN PC CARD */ + PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), /* Compaq WL100 11 Mbps Wireless Adapter */ + PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), /* Lucent Orinoco and old Intersil */ + PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */ + PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), /* Nortel Networks eMobility 802.11 Wireless Adapter */ + PCMCIA_DEVICE_MANF_CARD(0x01ff, 0x0008), /* Intermec MobileLAN 11Mbps 802.11b WLAN Card */ + PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */ + PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */ + PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), /* ARtem Onair */ + PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), /* Buffalo WLI-PCM-S11 */ + PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */ + PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */ + PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), /* Compaq HNW-100 11 Mbps Wireless Adapter */ + PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */ + PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */ + PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */ + PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */ + PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */ + PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */ + PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0008), /* CONTEC FLEXSCAN/FX-DDS110-PCC */ + PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), /* Conceptronic CON11Cpro, EMTAC A2424i */ + PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), /* Safeway 802.11b, ZCOMAX AirRunner/XI-300 */ + PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), /* D-Link DCF660, Sandisk Connect SDWCFB-000 */ + PCMCIA_DEVICE_PROD_ID12(" ", "IEEE 802.11 Wireless LAN/PC Card", 0x3b6e20c8, 0xefccafe9), PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3), - PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0), PCMCIA_DEVICE_PROD_ID12("ACTIONTEC", "PRISM Wireless LAN PC Card", 0x393089da, 0xa71e69d5), + PCMCIA_DEVICE_PROD_ID12("Addtron", "AWP-100 Wireless PCMCIA", 0xe6ec52ce, 0x08649af2), + PCMCIA_DEVICE_PROD_ID123("AIRVAST", "IEEE 802.11b Wireless PCMCIA Card", "HFA3863", 0xea569531, 0x4bcb9645, 0x355cb092), + PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f), + PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842), + PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e), PCMCIA_DEVICE_PROD_ID12("Avaya Communication", "Avaya Wireless PC Card", 0xd8a43b78, 0x0d341169), + PCMCIA_DEVICE_PROD_ID12("BENQ", "AWL100 PCMCIA ADAPTER", 0x35dadc74, 0x01f7fedb), PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-PCM-L11G", 0x2decece3, 0xf57ca4b3), + PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 0x2decece3, 0x82067c18), PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90), + PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card", 0x54f7c49c, 0x15a75e5b), + PCMCIA_DEVICE_PROD_ID123("corega", "WL PCCL-11", "ISL37300P", 0x0a21501a, 0x59868926, 0xc9049a39), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9), PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae), PCMCIA_DEVICE_PROD_ID12("D", "Link DRC-650 11Mbps WLAN Card", 0x71b18589, 0xf144e3ac), PCMCIA_DEVICE_PROD_ID12("D", "Link DWL-650 11Mbps WLAN Card", 0x71b18589, 0xb6f1b0ab), + PCMCIA_DEVICE_PROD_ID12("D-Link Corporation", "D-Link DWL-650H 11Mbps WLAN Adapter", 0xef544d24, 0xcd8ea916), + PCMCIA_DEVICE_PROD_ID12("Digital Data Communications", "WPC-0100", 0xfdd73470, 0xe0b6f146), PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3), PCMCIA_DEVICE_PROD_ID12("HyperLink", "Wireless PC Card 11Mbps", 0x56cc3f1a, 0x0bcf220c), + PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0), + PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless 2011 LAN PC Card", 0x816cc815, 0x07f58077), PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18), + PCMCIA_DEVICE_PROD_ID12("INTERSIL", "I-GATE 11M PC Card / PC Card plus", 0x74c5e40d, 0x8304ff77), + PCMCIA_DEVICE_PROD_ID12("Intersil", "PRISM 2_5 PCMCIA ADAPTER", 0x4b801a17, 0x6345a0bf), + PCMCIA_DEVICE_PROD_ID123("Intersil", "PRISM Freedom PCMCIA Adapter", "ISL37100P", 0x4b801a17, 0xf222ec2d, 0x630d52b2), + PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92), + PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card", 0x0733cc81, 0x0c52f395), PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a), PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11", 0x481e0094, 0x7360e410), PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11G", 0x481e0094, 0xf57ca4b3), PCMCIA_DEVICE_PROD_ID12("Microsoft", "Wireless Notebook Adapter MN-520", 0x5961bf85, 0x6eec8c01), PCMCIA_DEVICE_PROD_ID12("NCR", "WaveLAN/IEEE", 0x24358cd4, 0xc562e72a), + PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card", 0xa37434e9, 0x9762e8f1), PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401RA Wireless PC", "Card", 0x0306467f, 0x9762e8f1), + PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767), + PCMCIA_DEVICE_PROD_ID12("OEM", "PRISM2 IEEE 802.11 PC-Card", 0xfea54c90, 0x48f2bdd6), + PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed), + PCMCIA_DEVICE_PROD_ID123("PCMCIA", "11M WLAN Card v2.5", "ISL37300P", 0x281f1c5d, 0x6e440487, 0xc9049a39), PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-CF110", 0x209f40ab, 0xd9715264), + PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-NS110", 0x209f40ab, 0x46263178), PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9), PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26), PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b), - PCMCIA_DEVICE_PROD_ID1("Symbol Technologies", 0x3f02b4d6), + PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a), + PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e), + PCMCIA_DEVICE_PROD_ID123("The Linksys Group, Inc.", "Instant Wireless Network PC Card", "ISL37300P", 0xa5f472c2, 0x590eb502, 0xc9049a39), + PCMCIA_DEVICE_PROD_ID12("ZoomAir 11Mbps High", "Rate wireless Networking", 0x273fe3db, 0x32a1eaee), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids); @@ -656,8 +666,8 @@ static struct pcmcia_driver orinoco_driver = { .name = DRIVER_NAME, }, .attach = orinoco_cs_attach, - .event = orinoco_cs_event, .detach = orinoco_cs_detach, + .event = orinoco_cs_event, .id_table = orinoco_cs_ids, }; diff --git a/drivers/net/wireless/orinoco_nortel.c b/drivers/net/wireless/orinoco_nortel.c index 86fa58e5cfac..d8afd51ff8a5 100644 --- a/drivers/net/wireless/orinoco_nortel.c +++ b/drivers/net/wireless/orinoco_nortel.c @@ -40,29 +40,13 @@ #define PFX DRIVER_NAME ": " #include - #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include #include -#include - #include -#include "hermes.h" #include "orinoco.h" #define COR_OFFSET (0xe0) /* COR attribute offset of Prism2 PC card */ @@ -108,7 +92,7 @@ static int nortel_pci_cor_reset(struct orinoco_private *priv) return 0; } -int nortel_pci_hw_init(struct nortel_pci_card *card) +static int nortel_pci_hw_init(struct nortel_pci_card *card) { int i; u32 reg; diff --git a/drivers/net/wireless/orinoco_pci.c b/drivers/net/wireless/orinoco_pci.c index 42e03438291b..5362c214fc8e 100644 --- a/drivers/net/wireless/orinoco_pci.c +++ b/drivers/net/wireless/orinoco_pci.c @@ -93,28 +93,12 @@ #define PFX DRIVER_NAME ": " #include - #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include #include -#include -#include -#include -#include - -#include "hermes.h" #include "orinoco.h" /* All the magic there is from wlan-ng */ diff --git a/drivers/net/wireless/orinoco_plx.c b/drivers/net/wireless/orinoco_plx.c index 7ab05b89fb3f..210e73776545 100644 --- a/drivers/net/wireless/orinoco_plx.c +++ b/drivers/net/wireless/orinoco_plx.c @@ -117,29 +117,13 @@ #define PFX DRIVER_NAME ": " #include - #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include #include -#include - #include -#include "hermes.h" #include "orinoco.h" #define COR_OFFSET (0x3e0) /* COR attribute offset of Prism2 PC card */ diff --git a/drivers/net/wireless/orinoco_tmd.c b/drivers/net/wireless/orinoco_tmd.c index 85893f42445b..5e68b7026186 100644 --- a/drivers/net/wireless/orinoco_tmd.c +++ b/drivers/net/wireless/orinoco_tmd.c @@ -53,29 +53,13 @@ #define PFX DRIVER_NAME ": " #include - #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include #include -#include - #include -#include "hermes.h" #include "orinoco.h" #define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */ diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c index 9a8790e3580c..5c1a1adf1ff8 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.c +++ b/drivers/net/wireless/prism54/isl_ioctl.c @@ -462,14 +462,12 @@ prism54_get_range(struct net_device *ndev, struct iw_request_info *info, /* txpower is supported in dBm's */ range->txpower_capa = IW_TXPOW_DBM; -#if WIRELESS_EXT > 16 /* Event capability (kernel + driver) */ range->event_capa[0] = (IW_EVENT_CAPA_K_0 | IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) | IW_EVENT_CAPA_MASK(SIOCGIWAP)); range->event_capa[1] = IW_EVENT_CAPA_K_1; range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVCUSTOM); -#endif /* WIRELESS_EXT > 16 */ if (islpci_get_state(priv) < PRV_STATE_INIT) return 0; @@ -693,14 +691,13 @@ prism54_get_scan(struct net_device *ndev, struct iw_request_info *info, extra + dwrq->length, &(bsslist->bsslist[i]), noise); -#if WIRELESS_EXT > 16 + /* Check if there is space for one more entry */ if((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) { /* Ask user space to try again with a bigger buffer */ rvalue = -E2BIG; break; } -#endif /* WIRELESS_EXT > 16 */ } kfree(bsslist); @@ -2727,12 +2724,7 @@ const struct iw_handler_def prism54_handler_def = { .standard = (iw_handler *) prism54_handler, .private = (iw_handler *) prism54_private_handler, .private_args = (struct iw_priv_args *) prism54_private_args, -#if WIRELESS_EXT > 16 .get_wireless_stats = prism54_get_wireless_stats, -#endif /* WIRELESS_EXT > 16 */ -#if WIRELESS_EXT == 16 - .spy_offset = offsetof(islpci_private, spy_data), -#endif /* WIRELESS_EXT == 16 */ }; /* For wpa_supplicant */ diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c index 6f13d4a8e2d3..6c9584a9f284 100644 --- a/drivers/net/wireless/prism54/islpci_dev.c +++ b/drivers/net/wireless/prism54/islpci_dev.c @@ -439,8 +439,7 @@ prism54_bring_down(islpci_private *priv) wmb(); /* wait a while for the device to reset */ - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(50*HZ/1000); + schedule_timeout_uninterruptible(msecs_to_jiffies(50)); return 0; } @@ -491,8 +490,7 @@ islpci_reset_if(islpci_private *priv) /* The software reset acknowledge needs about 220 msec here. * Be conservative and wait for up to one second. */ - set_current_state(TASK_UNINTERRUPTIBLE); - remaining = schedule_timeout(HZ); + remaining = schedule_timeout_uninterruptible(HZ); if(remaining > 0) { result = 0; @@ -839,13 +837,9 @@ islpci_setup(struct pci_dev *pdev) priv->ndev->type = (priv->iw_mode == IW_MODE_MONITOR) ? priv->monitor_type : ARPHRD_ETHER; -#if WIRELESS_EXT > 16 /* Add pointers to enable iwspy support. */ priv->wireless_data.spy_data = &priv->spy_data; ndev->wireless_data = &priv->wireless_data; -#else /* WIRELESS_EXT > 16 */ - ndev->get_wireless_stats = &prism54_get_wireless_stats; -#endif /* WIRELESS_EXT > 16 */ /* save the start and end address of the PCI memory area */ ndev->mem_start = (unsigned long) priv->device_base; diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/prism54/islpci_dev.h index 32a1019f1b36..efbed4397951 100644 --- a/drivers/net/wireless/prism54/islpci_dev.h +++ b/drivers/net/wireless/prism54/islpci_dev.h @@ -100,9 +100,7 @@ typedef struct { struct iw_spy_data spy_data; /* iwspy support */ -#if WIRELESS_EXT > 16 struct iw_public_data wireless_data; -#endif /* WIRELESS_EXT > 16 */ int monitor_type; /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_PRISM */ diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c index b6f2e5a223be..4937a5ad4b2c 100644 --- a/drivers/net/wireless/prism54/islpci_mgt.c +++ b/drivers/net/wireless/prism54/islpci_mgt.c @@ -455,7 +455,7 @@ islpci_mgt_transaction(struct net_device *ndev, struct islpci_mgmtframe **recvframe) { islpci_private *priv = netdev_priv(ndev); - const long wait_cycle_jiffies = (ISL38XX_WAIT_CYCLE * 10 * HZ) / 1000; + const long wait_cycle_jiffies = msecs_to_jiffies(ISL38XX_WAIT_CYCLE * 10); long timeout_left = ISL38XX_MAX_WAIT_CYCLES * wait_cycle_jiffies; int err; DEFINE_WAIT(wait); @@ -475,8 +475,7 @@ islpci_mgt_transaction(struct net_device *ndev, int timeleft; struct islpci_mgmtframe *frame; - set_current_state(TASK_UNINTERRUPTIBLE); - timeleft = schedule_timeout(wait_cycle_jiffies); + timeleft = schedule_timeout_uninterruptible(wait_cycle_jiffies); frame = xchg(&priv->mgmt_received, NULL); if (frame) { if (frame->header->oid == oid) { diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index e9c5ea0f5535..70fd6fd8feb9 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -1649,28 +1649,28 @@ static iw_stats * ray_get_wireless_stats(struct net_device * dev) */ static const iw_handler ray_handler[] = { - [SIOCSIWCOMMIT-SIOCIWFIRST] (iw_handler) ray_commit, - [SIOCGIWNAME -SIOCIWFIRST] (iw_handler) ray_get_name, - [SIOCSIWFREQ -SIOCIWFIRST] (iw_handler) ray_set_freq, - [SIOCGIWFREQ -SIOCIWFIRST] (iw_handler) ray_get_freq, - [SIOCSIWMODE -SIOCIWFIRST] (iw_handler) ray_set_mode, - [SIOCGIWMODE -SIOCIWFIRST] (iw_handler) ray_get_mode, - [SIOCGIWRANGE -SIOCIWFIRST] (iw_handler) ray_get_range, + [SIOCSIWCOMMIT-SIOCIWFIRST] = (iw_handler) ray_commit, + [SIOCGIWNAME -SIOCIWFIRST] = (iw_handler) ray_get_name, + [SIOCSIWFREQ -SIOCIWFIRST] = (iw_handler) ray_set_freq, + [SIOCGIWFREQ -SIOCIWFIRST] = (iw_handler) ray_get_freq, + [SIOCSIWMODE -SIOCIWFIRST] = (iw_handler) ray_set_mode, + [SIOCGIWMODE -SIOCIWFIRST] = (iw_handler) ray_get_mode, + [SIOCGIWRANGE -SIOCIWFIRST] = (iw_handler) ray_get_range, #ifdef WIRELESS_SPY - [SIOCSIWSPY -SIOCIWFIRST] (iw_handler) iw_handler_set_spy, - [SIOCGIWSPY -SIOCIWFIRST] (iw_handler) iw_handler_get_spy, - [SIOCSIWTHRSPY-SIOCIWFIRST] (iw_handler) iw_handler_set_thrspy, - [SIOCGIWTHRSPY-SIOCIWFIRST] (iw_handler) iw_handler_get_thrspy, + [SIOCSIWSPY -SIOCIWFIRST] = (iw_handler) iw_handler_set_spy, + [SIOCGIWSPY -SIOCIWFIRST] = (iw_handler) iw_handler_get_spy, + [SIOCSIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_set_thrspy, + [SIOCGIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_get_thrspy, #endif /* WIRELESS_SPY */ - [SIOCGIWAP -SIOCIWFIRST] (iw_handler) ray_get_wap, - [SIOCSIWESSID -SIOCIWFIRST] (iw_handler) ray_set_essid, - [SIOCGIWESSID -SIOCIWFIRST] (iw_handler) ray_get_essid, - [SIOCSIWRATE -SIOCIWFIRST] (iw_handler) ray_set_rate, - [SIOCGIWRATE -SIOCIWFIRST] (iw_handler) ray_get_rate, - [SIOCSIWRTS -SIOCIWFIRST] (iw_handler) ray_set_rts, - [SIOCGIWRTS -SIOCIWFIRST] (iw_handler) ray_get_rts, - [SIOCSIWFRAG -SIOCIWFIRST] (iw_handler) ray_set_frag, - [SIOCGIWFRAG -SIOCIWFIRST] (iw_handler) ray_get_frag, + [SIOCGIWAP -SIOCIWFIRST] = (iw_handler) ray_get_wap, + [SIOCSIWESSID -SIOCIWFIRST] = (iw_handler) ray_set_essid, + [SIOCGIWESSID -SIOCIWFIRST] = (iw_handler) ray_get_essid, + [SIOCSIWRATE -SIOCIWFIRST] = (iw_handler) ray_set_rate, + [SIOCGIWRATE -SIOCIWFIRST] = (iw_handler) ray_get_rate, + [SIOCSIWRTS -SIOCIWFIRST] = (iw_handler) ray_set_rts, + [SIOCGIWRTS -SIOCIWFIRST] = (iw_handler) ray_get_rts, + [SIOCSIWFRAG -SIOCIWFIRST] = (iw_handler) ray_set_frag, + [SIOCGIWFRAG -SIOCIWFIRST] = (iw_handler) ray_get_frag, }; #define SIOCSIPFRAMING SIOCIWFIRSTPRIV /* Set framing mode */ @@ -1678,9 +1678,9 @@ static const iw_handler ray_handler[] = { #define SIOCGIPCOUNTRY SIOCIWFIRSTPRIV + 3 /* Get country code */ static const iw_handler ray_private_handler[] = { - [0] (iw_handler) ray_set_framing, - [1] (iw_handler) ray_get_framing, - [3] (iw_handler) ray_get_country, + [0] = (iw_handler) ray_set_framing, + [1] = (iw_handler) ray_get_framing, + [3] = (iw_handler) ray_get_country, }; static const struct iw_priv_args ray_private_args[] = { diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c index 39c6cdf7f3f7..b1bbc8e8e91f 100644 --- a/drivers/net/wireless/spectrum_cs.c +++ b/drivers/net/wireless/spectrum_cs.c @@ -22,58 +22,23 @@ #define PFX DRIVER_NAME ": " #include -#ifdef __IN_PCMCIA_PACKAGE__ -#include -#endif /* __IN_PCMCIA_PACKAGE__ */ - #include #include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include - +#include +#include #include #include #include #include #include -#include -#include -#include - #include "orinoco.h" -/* - * If SPECTRUM_FW_INCLUDED is defined, the firmware is hardcoded into - * the driver. Use get_symbol_fw script to generate spectrum_fw.h and - * copy it to the same directory as spectrum_cs.c. - * - * If SPECTRUM_FW_INCLUDED is not defined, the firmware is loaded at the - * runtime using hotplug. Use the same get_symbol_fw script to generate - * files symbol_sp24t_prim_fw symbol_sp24t_sec_fw, copy them to the - * hotplug firmware directory (typically /usr/lib/hotplug/firmware) and - * make sure that you have hotplug installed and enabled in the kernel. - */ -/* #define SPECTRUM_FW_INCLUDED 1 */ - -#ifdef SPECTRUM_FW_INCLUDED -/* Header with the firmware */ -#include "spectrum_fw.h" -#else /* !SPECTRUM_FW_INCLUDED */ -#include static unsigned char *primsym; static unsigned char *secsym; static const char primary_fw_name[] = "symbol_sp24t_prim_fw"; static const char secondary_fw_name[] = "symbol_sp24t_sec_fw"; -#endif /* !SPECTRUM_FW_INCLUDED */ /********************************************************************/ /* Module stuff */ @@ -124,17 +89,8 @@ static dev_link_t *dev_list; /* = NULL */ /* Function prototypes */ /********************************************************************/ -/* device methods */ -static int spectrum_cs_hard_reset(struct orinoco_private *priv); - -/* PCMCIA gumpf */ -static void spectrum_cs_config(dev_link_t * link); -static void spectrum_cs_release(dev_link_t * link); -static int spectrum_cs_event(event_t event, int priority, - event_callback_args_t * args); - -static dev_link_t *spectrum_cs_attach(void); -static void spectrum_cs_detach(dev_link_t *); +static void spectrum_cs_release(dev_link_t *link); +static void spectrum_cs_detach(dev_link_t *link); /********************************************************************/ /* Firmware downloader */ @@ -182,8 +138,8 @@ static void spectrum_cs_detach(dev_link_t *); * Each block has the following structure. */ struct dblock { - u32 _addr; /* adapter address where to write the block */ - u16 _len; /* length of the data only, in bytes */ + __le32 _addr; /* adapter address where to write the block */ + __le16 _len; /* length of the data only, in bytes */ char data[0]; /* data to be written */ } __attribute__ ((packed)); @@ -193,9 +149,9 @@ struct dblock { * items with matching ID should be written. */ struct pdr { - u32 _id; /* record ID */ - u32 _addr; /* adapter address where to write the data */ - u32 _len; /* expected length of the data, in bytes */ + __le32 _id; /* record ID */ + __le32 _addr; /* adapter address where to write the data */ + __le32 _len; /* expected length of the data, in bytes */ char next[0]; /* next PDR starts here */ } __attribute__ ((packed)); @@ -206,8 +162,8 @@ struct pdr { * be plugged into the secondary firmware. */ struct pdi { - u16 _len; /* length of ID and data, in words */ - u16 _id; /* record ID */ + __le16 _len; /* length of ID and data, in words */ + __le16 _id; /* record ID */ char data[0]; /* plug data */ } __attribute__ ((packed));; @@ -414,7 +370,7 @@ spectrum_plug_pdi(hermes_t *hw, struct pdr *first_pdr, struct pdi *pdi) /* Read PDA from the adapter */ static int -spectrum_read_pda(hermes_t *hw, u16 *pda, int pda_len) +spectrum_read_pda(hermes_t *hw, __le16 *pda, int pda_len) { int ret; int pda_size; @@ -445,7 +401,7 @@ spectrum_read_pda(hermes_t *hw, u16 *pda, int pda_len) /* Parse PDA and write the records into the adapter */ static int spectrum_apply_pda(hermes_t *hw, const struct dblock *first_block, - u16 *pda) + __le16 *pda) { int ret; struct pdi *pdi; @@ -511,7 +467,7 @@ spectrum_dl_image(hermes_t *hw, dev_link_t *link, const struct dblock *first_block; /* Plug Data Area (PDA) */ - u16 pda[PDA_WORDS]; + __le16 pda[PDA_WORDS]; /* Binary block begins after the 0x1A marker */ ptr = image; @@ -571,8 +527,6 @@ spectrum_dl_firmware(hermes_t *hw, dev_link_t *link) { int ret; client_handle_t handle = link->handle; - -#ifndef SPECTRUM_FW_INCLUDED const struct firmware *fw_entry; if (request_firmware(&fw_entry, primary_fw_name, @@ -592,7 +546,6 @@ spectrum_dl_firmware(hermes_t *hw, dev_link_t *link) secondary_fw_name); return -ENOENT; } -#endif /* Load primary firmware */ ret = spectrum_dl_image(hw, link, primsym); @@ -1085,7 +1038,7 @@ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION static struct pcmcia_device_id spectrum_cs_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x026c, 0x0001), /* Symbol Spectrum24 LA4100 */ PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0001), /* Socket Communications CF */ - PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0001), /* Intel PRO/Wireless 2011B */ + PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless LAN PC Card", 0x816cc815, 0x6fbf459a), /* 2011B, not 2011 */ PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, spectrum_cs_ids); @@ -1096,8 +1049,8 @@ static struct pcmcia_driver orinoco_driver = { .name = DRIVER_NAME, }, .attach = spectrum_cs_attach, - .event = spectrum_cs_event, .detach = spectrum_cs_detach, + .event = spectrum_cs_event, .id_table = spectrum_cs_ids, }; diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c index 4b0acae22b0d..7bc7fc823128 100644 --- a/drivers/net/wireless/strip.c +++ b/drivers/net/wireless/strip.c @@ -1352,7 +1352,7 @@ static unsigned char *strip_make_packet(unsigned char *buffer, struct in_device *in_dev; rcu_read_lock(); - in_dev = __in_dev_get(strip_info->dev); + in_dev = __in_dev_get_rcu(strip_info->dev); if (in_dev == NULL) { rcu_read_unlock(); return NULL; @@ -1508,7 +1508,7 @@ static void strip_send(struct strip *strip_info, struct sk_buff *skb) brd = addr = 0; rcu_read_lock(); - in_dev = __in_dev_get(strip_info->dev); + in_dev = __in_dev_get_rcu(strip_info->dev); if (in_dev) { if (in_dev->ifa_list) { brd = in_dev->ifa_list->ifa_broadcast; diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c index 7a5e20a17890..b0d8b5b03152 100644 --- a/drivers/net/wireless/wavelan.c +++ b/drivers/net/wireless/wavelan.c @@ -430,7 +430,6 @@ static void fee_read(unsigned long ioaddr, /* I/O port of the card */ } } -#ifdef WIRELESS_EXT /* if the wireless extension exists in the kernel */ /*------------------------------------------------------------------*/ /* @@ -514,7 +513,6 @@ static void fee_write(unsigned long ioaddr, /* I/O port of the card */ fee_wait(ioaddr, 10, 100); #endif /* EEPROM_IS_PROTECTED */ } -#endif /* WIRELESS_EXT */ /************************ I82586 SUBROUTINES *************************/ /* @@ -973,11 +971,9 @@ static void wv_mmc_show(struct net_device * dev) mmc_read(ioaddr, 0, (u8 *) & m, sizeof(m)); mmc_out(ioaddr, mmwoff(0, mmw_freeze), 0); -#ifdef WIRELESS_EXT /* if wireless extension exists in the kernel */ /* Don't forget to update statistics */ lp->wstats.discard.nwid += (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l; -#endif /* WIRELESS_EXT */ printk(KERN_DEBUG "##### WaveLAN modem status registers: #####\n"); #ifdef DEBUG_SHOW_UNUSED @@ -1499,7 +1495,6 @@ static int wavelan_set_mac_address(struct net_device * dev, void *addr) } #endif /* SET_MAC_ADDRESS */ -#ifdef WIRELESS_EXT /* if wireless extensions exist in the kernel */ /*------------------------------------------------------------------*/ /* @@ -2473,7 +2468,6 @@ static iw_stats *wavelan_get_wireless_stats(struct net_device * dev) #endif return &lp->wstats; } -#endif /* WIRELESS_EXT */ /************************* PACKET RECEPTION *************************/ /* @@ -4194,11 +4188,9 @@ static int __init wavelan_config(struct net_device *dev, unsigned short ioaddr) dev->set_mac_address = &wavelan_set_mac_address; #endif /* SET_MAC_ADDRESS */ -#ifdef WIRELESS_EXT /* if wireless extension exists in the kernel */ dev->wireless_handlers = &wavelan_handler_def; lp->wireless_data.spy_data = &lp->spy_data; dev->wireless_data = &lp->wireless_data; -#endif dev->mtu = WAVELAN_MTU; diff --git a/drivers/net/wireless/wavelan.p.h b/drivers/net/wireless/wavelan.p.h index 509ff22a6caa..166e28b9a4f7 100644 --- a/drivers/net/wireless/wavelan.p.h +++ b/drivers/net/wireless/wavelan.p.h @@ -409,11 +409,9 @@ #define MULTICAST_AVOID /* Avoid extra multicast (I'm sceptical). */ #undef SET_MAC_ADDRESS /* Experimental */ -#ifdef WIRELESS_EXT /* If wireless extensions exist in the kernel */ /* Warning: this stuff will slow down the driver. */ #define WIRELESS_SPY /* Enable spying addresses. */ #undef HISTOGRAM /* Enable histogram of signal level. */ -#endif /****************************** DEBUG ******************************/ @@ -506,12 +504,10 @@ struct net_local u_short tx_first_free; u_short tx_first_in_use; -#ifdef WIRELESS_EXT iw_stats wstats; /* Wireless-specific statistics */ struct iw_spy_data spy_data; struct iw_public_data wireless_data; -#endif #ifdef HISTOGRAM int his_number; /* number of intervals */ diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c index 183c4732ef65..4b3c98f5c564 100644 --- a/drivers/net/wireless/wavelan_cs.c +++ b/drivers/net/wireless/wavelan_cs.c @@ -415,7 +415,6 @@ fee_read(u_long base, /* i/o port of the card */ } } -#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */ /*------------------------------------------------------------------*/ /* @@ -500,7 +499,6 @@ fee_write(u_long base, /* i/o port of the card */ fee_wait(base, 10, 100); #endif /* EEPROM_IS_PROTECTED */ } -#endif /* WIRELESS_EXT */ /******************* WaveLAN Roaming routines... ********************/ @@ -1161,10 +1159,8 @@ wv_mmc_show(struct net_device * dev) mmc_read(base, 0, (u_char *)&m, sizeof(m)); mmc_out(base, mmwoff(0, mmw_freeze), 0); -#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */ /* Don't forget to update statistics */ lp->wstats.discard.nwid += (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l; -#endif /* WIRELESS_EXT */ spin_unlock_irqrestore(&lp->spinlock, flags); @@ -1550,7 +1546,6 @@ wavelan_set_mac_address(struct net_device * dev, } #endif /* SET_MAC_ADDRESS */ -#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */ /*------------------------------------------------------------------*/ /* @@ -2793,7 +2788,6 @@ wavelan_get_wireless_stats(struct net_device * dev) #endif return &lp->wstats; } -#endif /* WIRELESS_EXT */ /************************* PACKET RECEPTION *************************/ /* @@ -4679,11 +4673,9 @@ wavelan_attach(void) dev->watchdog_timeo = WATCHDOG_JIFFIES; SET_ETHTOOL_OPS(dev, &ops); -#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */ dev->wireless_handlers = &wavelan_handler_def; lp->wireless_data.spy_data = &lp->spy_data; dev->wireless_data = &lp->wireless_data; -#endif /* Other specific data */ dev->mtu = WAVELAN_MTU; diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/net/wireless/wavelan_cs.p.h index 01d882be8790..724a715089c9 100644 --- a/drivers/net/wireless/wavelan_cs.p.h +++ b/drivers/net/wireless/wavelan_cs.p.h @@ -472,11 +472,9 @@ #define MULTICAST_AVOID /* Avoid extra multicast (I'm sceptical) */ #undef SET_MAC_ADDRESS /* Experimental */ -#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */ /* Warning : these stuff will slow down the driver... */ #define WIRELESS_SPY /* Enable spying addresses */ #undef HISTOGRAM /* Enable histogram of sig level... */ -#endif /****************************** DEBUG ******************************/ @@ -624,12 +622,10 @@ struct net_local int rfp; /* Last DMA machine receive pointer */ int overrunning; /* Receiver overrun flag */ -#ifdef WIRELESS_EXT iw_stats wstats; /* Wireless specific stats */ struct iw_spy_data spy_data; struct iw_public_data wireless_data; -#endif #ifdef HISTOGRAM int his_number; /* Number of intervals */ diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h index 7fcbe589c3f2..4303c50c2ab6 100644 --- a/drivers/net/wireless/wl3501.h +++ b/drivers/net/wireless/wl3501.h @@ -548,7 +548,7 @@ struct wl3501_80211_tx_plcp_hdr { struct wl3501_80211_tx_hdr { struct wl3501_80211_tx_plcp_hdr pclp_hdr; - struct ieee80211_hdr mac_hdr; + struct ieee80211_hdr_4addr mac_hdr; } __attribute__ ((packed)); /* diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 0e98a9d9834c..a3bd91a61827 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -836,7 +836,7 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size, * This function implements the pci_alloc_consistent function. */ static void * -ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, int flag) +ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { void *ret; #if 0 diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index e90fb72a6962..286902298e33 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -358,9 +359,10 @@ static __inline__ int led_get_net_activity(void) /* we are running as tasklet, so locking dev_base * for reading should be OK */ read_lock(&dev_base_lock); + rcu_read_lock(); for (dev = dev_base; dev; dev = dev->next) { struct net_device_stats *stats; - struct in_device *in_dev = __in_dev_get(dev); + struct in_device *in_dev = __in_dev_get_rcu(dev); if (!in_dev || !in_dev->ifa_list) continue; if (LOOPBACK(in_dev->ifa_list->ifa_local)) @@ -371,6 +373,7 @@ static __inline__ int led_get_net_activity(void) rx_total += stats->rx_packets; tx_total += stats->tx_packets; } + rcu_read_unlock(); read_unlock(&dev_base_lock); retval = 0; diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 82ea68b55df4..bd8b3e5a5cd7 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -986,7 +986,7 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, * See Documentation/DMA-mapping.txt */ static void *sba_alloc_consistent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, int gfp) + dma_addr_t *dma_handle, gfp_t gfp) { void *ret; diff --git a/drivers/pci/.gitignore b/drivers/pci/.gitignore new file mode 100644 index 000000000000..f297ca8d313e --- /dev/null +++ b/drivers/pci/.gitignore @@ -0,0 +1,4 @@ +classlist.h +devlist.h +gen-devlist + diff --git a/drivers/pci/hotplug.c b/drivers/pci/hotplug.c index 10444988a10b..e1743be31909 100644 --- a/drivers/pci/hotplug.c +++ b/drivers/pci/hotplug.c @@ -7,7 +7,6 @@ int pci_hotplug (struct device *dev, char **envp, int num_envp, char *buffer, int buffer_size) { struct pci_dev *pdev; - char *scratch; int i = 0; int length = 0; @@ -18,9 +17,6 @@ int pci_hotplug (struct device *dev, char **envp, int num_envp, if (!pdev) return -ENODEV; - scratch = buffer; - - if (add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size, &length, "PCI_CLASS=%04X", pdev->class)) diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c index 752e6513c447..db69be85b458 100644 --- a/drivers/pci/hotplug/rpadlpar_sysfs.c +++ b/drivers/pci/hotplug/rpadlpar_sysfs.c @@ -62,7 +62,7 @@ static ssize_t add_slot_store(struct dlpar_io_attr *dlpar_attr, char drc_name[MAX_DRC_NAME_LEN]; char *end; - if (nbytes > MAX_DRC_NAME_LEN) + if (nbytes >= MAX_DRC_NAME_LEN) return 0; memcpy(drc_name, buf, nbytes); @@ -83,7 +83,7 @@ static ssize_t remove_slot_store(struct dlpar_io_attr *dlpar_attr, char drc_name[MAX_DRC_NAME_LEN]; char *end; - if (nbytes > MAX_DRC_NAME_LEN) + if (nbytes >= MAX_DRC_NAME_LEN) return 0; memcpy(drc_name, buf, nbytes); diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index b1409441c1cd..a32ae82e5922 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c @@ -159,7 +159,7 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot, pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus); - slot = kcalloc(1, sizeof(*slot), GFP_KERNEL); + slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) return -ENOMEM; bss_hotplug_slot->private = slot; @@ -491,7 +491,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus) if (sn_pci_slot_valid(pci_bus, device) != 1) continue; - bss_hotplug_slot = kcalloc(1, sizeof(*bss_hotplug_slot), + bss_hotplug_slot = kzalloc(sizeof(*bss_hotplug_slot), GFP_KERNEL); if (!bss_hotplug_slot) { rc = -ENOMEM; @@ -499,7 +499,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus) } bss_hotplug_slot->info = - kcalloc(1, sizeof(struct hotplug_slot_info), + kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL); if (!bss_hotplug_slot->info) { rc = -ENOMEM; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 56a3b397efee..2898830c496f 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -360,7 +360,7 @@ pci_create_resource_files(struct pci_dev *pdev) continue; /* allocate attribute structure, piggyback attribute name */ - res_attr = kcalloc(1, sizeof(*res_attr) + 10, GFP_ATOMIC); + res_attr = kzalloc(sizeof(*res_attr) + 10, GFP_ATOMIC); if (res_attr) { char *res_attr_name = (char *)(res_attr + 1); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 26a55d08b506..005786416bb5 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -165,7 +165,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) if (l == 0xffffffff) l = 0; if ((l & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) { - sz = pci_size(l, sz, PCI_BASE_ADDRESS_MEM_MASK); + sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK); if (!sz) continue; res->start = l & PCI_BASE_ADDRESS_MEM_MASK; @@ -215,7 +215,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) if (l == 0xffffffff) l = 0; if (sz && sz != 0xffffffff) { - sz = pci_size(l, sz, PCI_ROM_ADDRESS_MASK); + sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK); if (sz) { res->flags = (l & IORESOURCE_ROM_ENABLE) | IORESOURCE_MEM | IORESOURCE_PREFETCH | @@ -402,6 +402,12 @@ static void pci_enable_crs(struct pci_dev *dev) static void __devinit pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max) { struct pci_bus *parent = child->parent; + + /* Attempts to fix that up are really dangerous unless + we're going to re-assign all bus numbers. */ + if (!pcibios_assign_all_busses()) + return; + while (parent->parent && parent->subordinate < max) { parent->subordinate = max; pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max); @@ -478,8 +484,18 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max * We need to assign a number to this bus which we always * do in the second pass. */ - if (!pass) + if (!pass) { + if (pcibios_assign_all_busses()) + /* Temporarily disable forwarding of the + configuration cycles on all bridges in + this bus segment to avoid possible + conflicts in the second pass between two + bridges programmed with overlapping + bus ranges. */ + pci_write_config_dword(dev, PCI_PRIMARY_BUS, + buses & ~0xffffff); return max; + } /* Clear errors */ pci_write_config_word(dev, PCI_STATUS, 0xffff); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 11ca44387cb0..7992bc8cc6a4 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -241,7 +241,8 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M ); -static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, unsigned size, int nr) +static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, + unsigned size, int nr, const char *name) { region &= ~(size-1); if (region) { @@ -259,6 +260,7 @@ static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, unsi pcibios_bus_to_resource(dev, res, &bus_region); pci_claim_resource(dev, nr); + printk("PCI quirk: region %04x-%04x claimed by %s\n", region, region + size - 1, name); } } @@ -291,25 +293,98 @@ static void __devinit quirk_ali7101_acpi(struct pci_dev *dev) u16 region; pci_read_config_word(dev, 0xE0, ®ion); - quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES); + quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI"); pci_read_config_word(dev, 0xE2, ®ion); - quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1); + quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi ); +static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable) +{ + u32 devres; + u32 mask, size, base; + + pci_read_config_dword(dev, port, &devres); + if ((devres & enable) != enable) + return; + mask = (devres >> 16) & 15; + base = devres & 0xffff; + size = 16; + for (;;) { + unsigned bit = size >> 1; + if ((bit & mask) == bit) + break; + size = bit; + } + /* + * For now we only print it out. Eventually we'll want to + * reserve it (at least if it's in the 0x1000+ range), but + * let's get enough confirmation reports first. + */ + base &= -size; + printk("%s PIO at %04x-%04x\n", name, base, base + size - 1); +} + +static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable) +{ + u32 devres; + u32 mask, size, base; + + pci_read_config_dword(dev, port, &devres); + if ((devres & enable) != enable) + return; + base = devres & 0xffff0000; + mask = (devres & 0x3f) << 16; + size = 128 << 16; + for (;;) { + unsigned bit = size >> 1; + if ((bit & mask) == bit) + break; + size = bit; + } + /* + * For now we only print it out. Eventually we'll want to + * reserve it, but let's get enough confirmation reports first. + */ + base &= -size; + printk("%s MMIO at %04x-%04x\n", name, base, base + size - 1); +} + /* * PIIX4 ACPI: Two IO regions pointed to by longwords at * 0x40 (64 bytes of ACPI registers) * 0x90 (32 bytes of SMB registers) + * and a few strange programmable PIIX4 device resources. */ static void __devinit quirk_piix4_acpi(struct pci_dev *dev) { - u32 region; + u32 region, res_a; pci_read_config_dword(dev, 0x40, ®ion); - quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES); + quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI"); pci_read_config_dword(dev, 0x90, ®ion); - quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1); + quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB"); + + /* Device resource A has enables for some of the other ones */ + pci_read_config_dword(dev, 0x5c, &res_a); + + piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21); + piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21); + + /* Device resource D is just bitfields for static resources */ + + /* Device 12 enabled? */ + if (res_a & (1 << 29)) { + piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20); + piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7); + } + /* Device 13 enabled? */ + if (res_a & (1 << 30)) { + piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20); + piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7); + } + piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20); + piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi ); @@ -323,10 +398,10 @@ static void __devinit quirk_ich4_lpc_acpi(struct pci_dev *dev) u32 region; pci_read_config_dword(dev, 0x40, ®ion); - quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES); + quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO"); pci_read_config_dword(dev, 0x58, ®ion); - quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1); + quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi ); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi ); @@ -352,7 +427,7 @@ static void __devinit quirk_vt82c586_acpi(struct pci_dev *dev) if (rev & 0x10) { pci_read_config_dword(dev, 0x48, ®ion); region &= PCI_BASE_ADDRESS_IO_MASK; - quirk_io_region(dev, region, 256, PCI_BRIDGE_RESOURCES); + quirk_io_region(dev, region, 256, PCI_BRIDGE_RESOURCES, "vt82c586 ACPI"); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi ); @@ -372,11 +447,11 @@ static void __devinit quirk_vt82c686_acpi(struct pci_dev *dev) pci_read_config_word(dev, 0x70, &hm); hm &= PCI_BASE_ADDRESS_IO_MASK; - quirk_io_region(dev, hm, 128, PCI_BRIDGE_RESOURCES + 1); + quirk_io_region(dev, hm, 128, PCI_BRIDGE_RESOURCES + 1, "vt82c868 HW-mon"); pci_read_config_dword(dev, 0x90, &smb); smb &= PCI_BASE_ADDRESS_IO_MASK; - quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 2); + quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 2, "vt82c868 SMB"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi ); @@ -391,11 +466,11 @@ static void __devinit quirk_vt8235_acpi(struct pci_dev *dev) pci_read_config_word(dev, 0x88, &pm); pm &= PCI_BASE_ADDRESS_IO_MASK; - quirk_io_region(dev, pm, 128, PCI_BRIDGE_RESOURCES); + quirk_io_region(dev, pm, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM"); pci_read_config_word(dev, 0xd0, &smb); smb &= PCI_BASE_ADDRESS_IO_MASK; - quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 1); + quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 1, "vt8235 SMB"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi); @@ -1233,7 +1308,7 @@ static void __init quirk_alder_ioapic(struct pci_dev *pdev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic ); #endif -#ifdef CONFIG_SCSI_SATA +#ifdef CONFIG_SCSI_SATA_INTEL_COMBINED static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev) { u8 prog, comb, tmp; @@ -1310,7 +1385,7 @@ static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev) request_region(0x170, 8, "libata"); /* port 1 */ } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_intel_ide_combined ); -#endif /* CONFIG_SCSI_SATA */ +#endif /* CONFIG_SCSI_SATA_INTEL_COMBINED */ int pcie_mch_quirk; diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 657be948baf7..28ce3a7ee434 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -40,7 +40,7 @@ * FIXME: IO should be max 256 bytes. However, since we may * have a P2P bridge below a cardbus bridge, we need 4K. */ -#define CARDBUS_IO_SIZE (4*1024) +#define CARDBUS_IO_SIZE (256) #define CARDBUS_MEM_SIZE (32*1024*1024) static void __devinit diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig index ddc741e6ecbf..36cc9a96a338 100644 --- a/drivers/pcmcia/Kconfig +++ b/drivers/pcmcia/Kconfig @@ -146,7 +146,7 @@ config I82365 config TCIC tristate "Databook TCIC host bridge support" - depends on PCMCIA + depends on PCMCIA && ISA select PCCARD_NONSTATIC help Say Y here to include support for the Databook TCIC family of PCMCIA diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c index 1d755e20880c..3f6d51d11374 100644 --- a/drivers/pcmcia/cardbus.c +++ b/drivers/pcmcia/cardbus.c @@ -228,6 +228,11 @@ int cb_alloc(struct pcmcia_socket * s) pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); cardbus_assign_irqs(bus, s->pci_irq); + + /* socket specific tune function */ + if (s->tune_bridge) + s->tune_bridge(s, bus); + pci_enable_bridges(bus); pci_bus_add_devices(bus); diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c index fabd3529cebc..d5e76423a0ee 100644 --- a/drivers/pcmcia/cs.c +++ b/drivers/pcmcia/cs.c @@ -689,6 +689,9 @@ static int pccardd(void *__skt) schedule(); try_to_freeze(); } + /* make sure we are running before we exit */ + set_current_state(TASK_RUNNING); + remove_wait_queue(&skt->thread_wait, &wait); /* remove from the device core */ diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c index 08d1c9288264..94be9e51654e 100644 --- a/drivers/pcmcia/omap_cf.c +++ b/drivers/pcmcia/omap_cf.c @@ -22,7 +22,6 @@ #include #include -#include #include #include diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index c42455d20eb6..f9a5c70284b5 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c @@ -691,7 +691,7 @@ static int adjust_memory(struct pcmcia_socket *s, unsigned int action, unsigned unsigned long size = end - start + 1; int ret = 0; - if (end <= start) + if (end < start) return -EINVAL; down(&rsrc_sem); @@ -724,7 +724,7 @@ static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long unsigned long size = end - start + 1; int ret = 0; - if (end <= start) + if (end < start) return -EINVAL; if (end > IO_SPACE_LIMIT) @@ -817,7 +817,7 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s) /* if we got at least one of IO, and one of MEM, we can be glad and * activate the PCMCIA subsystem */ - if (done & (IORESOURCE_MEM | IORESOURCE_IO)) + if (done == (IORESOURCE_MEM | IORESOURCE_IO)) s->resource_setup_done = 1; return 0; @@ -925,7 +925,7 @@ static ssize_t store_io_db(struct class_device *class_dev, const char *buf, size return -EINVAL; } } - if (end_addr <= start_addr) + if (end_addr < start_addr) return -EINVAL; ret = adjust_io(s, add, start_addr, end_addr); @@ -977,7 +977,7 @@ static ssize_t store_mem_db(struct class_device *class_dev, const char *buf, siz return -EINVAL; } } - if (end_addr <= start_addr) + if (end_addr < start_addr) return -EINVAL; ret = adjust_memory(s, add, start_addr, end_addr); diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c index 888b70e6a484..9e7ccd8a4321 100644 --- a/drivers/pcmcia/soc_common.c +++ b/drivers/pcmcia/soc_common.c @@ -66,7 +66,7 @@ void soc_pcmcia_debug(struct soc_pcmcia_socket *skt, const char *func, if (pc_debug > lvl) { printk(KERN_DEBUG "skt%u: %s: ", skt->nr, func); va_start(args, fmt); - printk(fmt, args); + vprintk(fmt, args); va_end(args); } } @@ -321,8 +321,6 @@ soc_common_pcmcia_get_socket(struct pcmcia_socket *sock, socket_state_t *state) * less punt all of this work and let the kernel handle the details * of power configuration, reset, &c. We also record the value of * `state' in order to regurgitate it to the PCMCIA core later. - * - * Returns: 0 */ static int soc_common_pcmcia_set_socket(struct pcmcia_socket *sock, socket_state_t *state) @@ -407,7 +405,7 @@ soc_common_pcmcia_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *m * the map speed as requested, but override the address ranges * supplied by Card Services. * - * Returns: 0 on success, -1 on error + * Returns: 0 on success, -ERRNO on error */ static int soc_common_pcmcia_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *map) @@ -655,8 +653,8 @@ static void soc_pcmcia_cpufreq_unregister(void) } #else -#define soc_pcmcia_cpufreq_register() -#define soc_pcmcia_cpufreq_unregister() +static int soc_pcmcia_cpufreq_register(void) { return 0; } +static void soc_pcmcia_cpufreq_unregister(void) {} #endif int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, int first, int nr) @@ -738,7 +736,7 @@ int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops goto out_err_5; } - if ( list_empty(&soc_pcmcia_sockets) ) + if (list_empty(&soc_pcmcia_sockets)) soc_pcmcia_cpufreq_register(); list_add(&skt->node, &soc_pcmcia_sockets); @@ -839,7 +837,7 @@ int soc_common_drv_pcmcia_remove(struct device *dev) release_resource(&skt->res_io); release_resource(&skt->res_skt); } - if ( list_empty(&soc_pcmcia_sockets) ) + if (list_empty(&soc_pcmcia_sockets)) soc_pcmcia_cpufreq_unregister(); up(&soc_pcmcia_sockets_lock); diff --git a/drivers/pcmcia/ti113x.h b/drivers/pcmcia/ti113x.h index fbe233e19ceb..539b5cd1a598 100644 --- a/drivers/pcmcia/ti113x.h +++ b/drivers/pcmcia/ti113x.h @@ -59,6 +59,7 @@ #define TI122X_SCR_SER_STEP 0xc0000000 #define TI122X_SCR_INTRTIE 0x20000000 +#define TIXX21_SCR_TIEALL 0x10000000 #define TI122X_SCR_CBRSVD 0x00400000 #define TI122X_SCR_MRBURSTDN 0x00008000 #define TI122X_SCR_MRBURSTUP 0x00004000 @@ -153,6 +154,12 @@ /* EnE test register */ #define ENE_TEST_C9 0xc9 /* 8bit */ #define ENE_TEST_C9_TLTENABLE 0x02 +#define ENE_TEST_C9_PFENABLE_F0 0x04 +#define ENE_TEST_C9_PFENABLE_F1 0x08 +#define ENE_TEST_C9_PFENABLE (ENE_TEST_C9_PFENABLE_F0 | ENE_TEST_C9_PFENABLE_F0) +#define ENE_TEST_C9_WPDISALBLE_F0 0x40 +#define ENE_TEST_C9_WPDISALBLE_F1 0x80 +#define ENE_TEST_C9_WPDISALBLE (ENE_TEST_C9_WPDISALBLE_F0 | ENE_TEST_C9_WPDISALBLE_F1) /* * Texas Instruments CardBus controller overrides. @@ -618,6 +625,7 @@ static int ti12xx_2nd_slot_empty(struct yenta_socket *socket) int devfn; unsigned int state; int ret = 1; + u32 sysctl; /* catch the two-slot controllers */ switch (socket->dev->device) { @@ -640,6 +648,24 @@ static int ti12xx_2nd_slot_empty(struct yenta_socket *socket) */ break; + case PCI_DEVICE_ID_TI_X515: + case PCI_DEVICE_ID_TI_X420: + case PCI_DEVICE_ID_TI_X620: + case PCI_DEVICE_ID_TI_XX21_XX11: + case PCI_DEVICE_ID_TI_7410: + case PCI_DEVICE_ID_TI_7610: + /* + * those are either single or dual slot CB with additional functions + * like 1394, smartcard reader, etc. check the TIEALL flag for them + * the TIEALL flag binds the IRQ of all functions toghether. + * we catch the single slot variants later. + */ + sysctl = config_readl(socket, TI113X_SYSTEM_CONTROL); + if (sysctl & TIXX21_SCR_TIEALL) + return 0; + + break; + /* single-slot controllers have the 2nd slot empty always :) */ default: return 1; @@ -652,6 +678,15 @@ static int ti12xx_2nd_slot_empty(struct yenta_socket *socket) if (!func) return 1; + /* + * check that the device id of both slots match. this is needed for the + * XX21 and the XX11 controller that share the same device id for single + * and dual slot controllers. return '2nd slot empty'. we already checked + * if the interrupt is tied to another function. + */ + if (socket->dev->device != func->device) + goto out; + slot2 = pci_get_drvdata(func); if (!slot2) goto out; @@ -790,16 +825,6 @@ static int ti12xx_override(struct yenta_socket *socket) if (val_orig != val) config_writel(socket, TI113X_SYSTEM_CONTROL, val); - /* - * for EnE bridges only: clear testbit TLTEnable. this makes the - * RME Hammerfall DSP sound card working. - */ - if (socket->dev->vendor == PCI_VENDOR_ID_ENE) { - u8 test_c9 = config_readb(socket, ENE_TEST_C9); - test_c9 &= ~ENE_TEST_C9_TLTENABLE; - config_writeb(socket, ENE_TEST_C9, test_c9); - } - /* * Yenta expects controllers to use CSCINT to route * CSC interrupts to PCI rather than INTVAL. @@ -841,5 +866,78 @@ static int ti1250_override(struct yenta_socket *socket) return ti12xx_override(socket); } + +/** + * EnE specific part. EnE bridges are register compatible with TI bridges but + * have their own test registers and more important their own little problems. + * Some fixup code to make everybody happy (TM). + */ + +#ifdef CONFIG_CARDBUS +/** + * set/clear various test bits: + * Defaults to clear the bit. + * - mask (u8) defines what bits to change + * - bits (u8) is the values to change them to + * -> it's + * current = (current & ~mask) | bits + */ +/* pci ids of devices that wants to have the bit set */ +#define DEVID(_vend,_dev,_subvend,_subdev,mask,bits) { \ + .vendor = _vend, \ + .device = _dev, \ + .subvendor = _subvend, \ + .subdevice = _subdev, \ + .driver_data = ((mask) << 8 | (bits)), \ + } +static struct pci_device_id ene_tune_tbl[] = { + /* Echo Audio products based on motorola DSP56301 and DSP56361 */ + DEVID(PCI_VENDOR_ID_MOTOROLA, 0x1801, 0xECC0, PCI_ANY_ID, + ENE_TEST_C9_TLTENABLE | ENE_TEST_C9_PFENABLE, ENE_TEST_C9_TLTENABLE), + DEVID(PCI_VENDOR_ID_MOTOROLA, 0x3410, 0xECC0, PCI_ANY_ID, + ENE_TEST_C9_TLTENABLE | ENE_TEST_C9_PFENABLE, ENE_TEST_C9_TLTENABLE), + + {} +}; + +static void ene_tune_bridge(struct pcmcia_socket *sock, struct pci_bus *bus) +{ + struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket); + struct pci_dev *dev; + struct pci_device_id *id = NULL; + u8 test_c9, old_c9, mask, bits; + + list_for_each_entry(dev, &bus->devices, bus_list) { + id = (struct pci_device_id *) pci_match_id(ene_tune_tbl, dev); + if (id) + break; + } + + test_c9 = old_c9 = config_readb(socket, ENE_TEST_C9); + if (id) { + mask = (id->driver_data >> 8) & 0xFF; + bits = id->driver_data & 0xFF; + + test_c9 = (test_c9 & ~mask) | bits; + } + else + /* default to clear TLTEnable bit, old behaviour */ + test_c9 &= ~ENE_TEST_C9_TLTENABLE; + + printk(KERN_INFO "yenta EnE: chaning testregister 0xC9, %02x -> %02x\n", old_c9, test_c9); + config_writeb(socket, ENE_TEST_C9, test_c9); +} + +static int ene_override(struct yenta_socket *socket) +{ + /* install tune_bridge() function */ + socket->socket.tune_bridge = ene_tune_bridge; + + return ti1250_override(socket); +} +#else +# define ene_override ti1250_override +#endif + #endif /* _LINUX_TI113X_H */ diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index ba4d78e5b121..db9f952f9e3c 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -559,12 +559,6 @@ static void yenta_interrogate(struct yenta_socket *socket) static int yenta_sock_init(struct pcmcia_socket *sock) { struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket); - u16 bridge; - - bridge = config_readw(socket, CB_BRIDGE_CONTROL) & ~CB_BRIDGE_INTR; - if (!socket->cb_irq) - bridge |= CB_BRIDGE_INTR; - config_writew(socket, CB_BRIDGE_CONTROL, bridge); exca_writeb(socket, I365_GBLCTL, 0x00); exca_writeb(socket, I365_GENCTL, 0x00); @@ -819,6 +813,7 @@ enum { CARDBUS_TYPE_TOPIC95, CARDBUS_TYPE_TOPIC97, CARDBUS_TYPE_O2MICRO, + CARDBUS_TYPE_ENE, }; /* @@ -865,6 +860,12 @@ static struct cardbus_type cardbus_type[] = { .override = o2micro_override, .restore_state = o2micro_restore_state, }, + [CARDBUS_TYPE_ENE] = { + .override = ene_override, + .save_state = ti_save_state, + .restore_state = ti_restore_state, + .sock_init = ti_init, + }, }; @@ -883,16 +884,8 @@ static unsigned int yenta_probe_irq(struct yenta_socket *socket, u32 isa_irq_mas { int i; unsigned long val; - u16 bridge_ctrl; u32 mask; - /* Set up ISA irq routing to probe the ISA irqs.. */ - bridge_ctrl = config_readw(socket, CB_BRIDGE_CONTROL); - if (!(bridge_ctrl & CB_BRIDGE_INTR)) { - bridge_ctrl |= CB_BRIDGE_INTR; - config_writew(socket, CB_BRIDGE_CONTROL, bridge_ctrl); - } - /* * Probe for usable interrupts using the force * register to generate bogus card status events. @@ -914,9 +907,6 @@ static unsigned int yenta_probe_irq(struct yenta_socket *socket, u32 isa_irq_mas mask = probe_irq_mask(val) & 0xffff; - bridge_ctrl &= ~CB_BRIDGE_INTR; - config_writew(socket, CB_BRIDGE_CONTROL, bridge_ctrl); - return mask; } @@ -944,18 +934,11 @@ static irqreturn_t yenta_probe_handler(int irq, void *dev_id, struct pt_regs *re /* probes the PCI interrupt, use only on override functions */ static int yenta_probe_cb_irq(struct yenta_socket *socket) { - u16 bridge_ctrl; - if (!socket->cb_irq) return -1; socket->probe_status = 0; - /* disable ISA interrupts */ - bridge_ctrl = config_readw(socket, CB_BRIDGE_CONTROL); - bridge_ctrl &= ~CB_BRIDGE_INTR; - config_writew(socket, CB_BRIDGE_CONTROL, bridge_ctrl); - if (request_irq(socket->cb_irq, yenta_probe_handler, SA_SHIRQ, "yenta", socket)) { printk(KERN_WARNING "Yenta: request_irq() in yenta_probe_cb_irq() failed!\n"); return -1; @@ -966,7 +949,7 @@ static int yenta_probe_cb_irq(struct yenta_socket *socket) cb_writel(socket, CB_SOCKET_EVENT, -1); cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK); cb_writel(socket, CB_SOCKET_FORCE, CB_FCARDSTS); - + msleep(100); /* disable interrupts */ @@ -1004,11 +987,12 @@ static void yenta_config_init(struct yenta_socket *socket) { u16 bridge; struct pci_dev *dev = socket->dev; + struct pci_bus_region region; - pci_set_power_state(socket->dev, 0); + pcibios_resource_to_bus(socket->dev, ®ion, &dev->resource[0]); config_writel(socket, CB_LEGACY_MODE_BASE, 0); - config_writel(socket, PCI_BASE_ADDRESS_0, dev->resource[0].start); + config_writel(socket, PCI_BASE_ADDRESS_0, region.start); config_writew(socket, PCI_COMMAND, PCI_COMMAND_IO | PCI_COMMAND_MEMORY | @@ -1031,8 +1015,8 @@ static void yenta_config_init(struct yenta_socket *socket) * - PCI interrupts enabled if a PCI interrupt exists.. */ bridge = config_readw(socket, CB_BRIDGE_CONTROL); - bridge &= ~(CB_BRIDGE_CRST | CB_BRIDGE_PREFETCH1 | CB_BRIDGE_INTR | CB_BRIDGE_ISAEN | CB_BRIDGE_VGAEN); - bridge |= CB_BRIDGE_PREFETCH0 | CB_BRIDGE_POSTEN | CB_BRIDGE_INTR; + bridge &= ~(CB_BRIDGE_CRST | CB_BRIDGE_PREFETCH1 | CB_BRIDGE_ISAEN | CB_BRIDGE_VGAEN); + bridge |= CB_BRIDGE_PREFETCH0 | CB_BRIDGE_POSTEN; config_writew(socket, CB_BRIDGE_CONTROL, bridge); } @@ -1265,10 +1249,22 @@ static struct pci_device_id yenta_table [] = { CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1250, TI1250), CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1410, TI1250), - CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1211, TI12XX), - CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1225, TI12XX), - CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1410, TI1250), - CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1420, TI12XX), + CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX21_XX11, TI12XX), + CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_X515, TI12XX), + CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_X420, TI12XX), + CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_X620, TI12XX), + CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7410, TI12XX), + CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7510, TI12XX), + CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7610, TI12XX), + + CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_710, TI12XX), + CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_712, TI12XX), + CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_720, TI12XX), + CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_722, TI12XX), + CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1211, ENE), + CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1225, ENE), + CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1410, ENE), + CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1420, ENE), CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C465, RICOH), CB_ID(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C466, RICOH), diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 91ea8e4777f3..dbb3eb0e330b 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -437,7 +437,7 @@ __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) if (cdev->dev.driver_data) { gdev = (struct ccwgroup_device *)cdev->dev.driver_data; if (get_device(&gdev->dev)) { - if (klist_node_attached(&gdev->dev.knode_bus)) + if (device_is_registered(&gdev->dev)) return gdev; put_device(&gdev->dev); } diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 14c76f5e4177..9adc11e8b8bc 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -544,7 +544,7 @@ get_disc_ccwdev_by_devno(unsigned int devno, struct ccw_device *sibling) .sibling = sibling, }; - dev = bus_find_device(&css_bus_type, NULL, &data, match_devno); + dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno); return dev ? to_ccwdev(dev) : NULL; } diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c index fa09440d82e5..38f50b7129a2 100644 --- a/drivers/s390/net/fsm.c +++ b/drivers/s390/net/fsm.c @@ -16,7 +16,7 @@ MODULE_LICENSE("GPL"); fsm_instance * init_fsm(char *name, const char **state_names, const char **event_names, int nr_states, - int nr_events, const fsm_node *tmpl, int tmpl_len, int order) + int nr_events, const fsm_node *tmpl, int tmpl_len, gfp_t order) { int i; fsm_instance *this; diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h index f9a011001eb6..1b8a7e7c34f3 100644 --- a/drivers/s390/net/fsm.h +++ b/drivers/s390/net/fsm.h @@ -110,7 +110,7 @@ extern fsm_instance * init_fsm(char *name, const char **state_names, const char **event_names, int nr_states, int nr_events, const fsm_node *tmpl, - int tmpl_len, int order); + int tmpl_len, gfp_t order); /** * Releases an FSM diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h index 2ad4797ce024..38a2441564d7 100644 --- a/drivers/s390/net/qeth.h +++ b/drivers/s390/net/qeth.h @@ -275,6 +275,10 @@ qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, enum qeth_ipa_funcs func) QETH_IDX_FUNC_LEVEL_IQD_ENA_IPAT, \ QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT, \ QETH_MAX_QUEUES,0x103}, \ + {0x1731,0x06,0x1732,0x06,QETH_CARD_TYPE_OSN,0, \ + QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT, \ + QETH_IDX_FUNC_LEVEL_OSAE_DIS_IPAT, \ + QETH_MAX_QUEUES,0}, \ {0,0,0,0,0,0,0,0,0}} #define QETH_REAL_CARD 1 @@ -363,10 +367,22 @@ struct qeth_hdr_layer2 { __u8 reserved2[16]; } __attribute__ ((packed)); +struct qeth_hdr_osn { + __u8 id; + __u8 reserved; + __u16 seq_no; + __u16 reserved2; + __u16 control_flags; + __u16 pdu_length; + __u8 reserved3[18]; + __u32 ccid; +} __attribute__ ((packed)); + struct qeth_hdr { union { struct qeth_hdr_layer2 l2; struct qeth_hdr_layer3 l3; + struct qeth_hdr_osn osn; } hdr; } __attribute__ ((packed)); @@ -413,6 +429,7 @@ enum qeth_header_ids { QETH_HEADER_TYPE_LAYER3 = 0x01, QETH_HEADER_TYPE_LAYER2 = 0x02, QETH_HEADER_TYPE_TSO = 0x03, + QETH_HEADER_TYPE_OSN = 0x04, }; /* flags for qeth_hdr.ext_flags */ #define QETH_HDR_EXT_VLAN_FRAME 0x01 @@ -582,7 +599,6 @@ enum qeth_card_states { * Protocol versions */ enum qeth_prot_versions { - QETH_PROT_SNA = 0x0001, QETH_PROT_IPV4 = 0x0004, QETH_PROT_IPV6 = 0x0006, }; @@ -686,6 +702,7 @@ struct qeth_seqno { __u32 pdu_hdr; __u32 pdu_hdr_ack; __u16 ipa; + __u32 pkt_seqno; }; struct qeth_reply { @@ -760,6 +777,11 @@ enum qeth_threads { QETH_RECOVER_THREAD = 2, }; +struct qeth_osn_info { + int (*assist_cb)(struct net_device *dev, void *data); + int (*data_cb)(struct sk_buff *skb); +}; + struct qeth_card { struct list_head list; enum qeth_card_states state; @@ -802,6 +824,7 @@ struct qeth_card { int use_hard_stop; int (*orig_hard_header)(struct sk_buff *,struct net_device *, unsigned short,void *,void *,unsigned); + struct qeth_osn_info osn_info; }; struct qeth_card_list_struct { @@ -848,6 +871,7 @@ qeth_realloc_headroom(struct qeth_card *card, struct sk_buff **skb, int size) "on interface %s", QETH_CARD_IFNAME(card)); return -ENOMEM; } + kfree_skb(*skb); *skb = new_skb; } return 0; @@ -914,10 +938,12 @@ qeth_get_hlen(__u8 link_type) static inline unsigned short qeth_get_netdev_flags(struct qeth_card *card) { - if (card->options.layer2) + if (card->options.layer2 && + (card->info.type == QETH_CARD_TYPE_OSAE)) return 0; switch (card->info.type) { case QETH_CARD_TYPE_IQD: + case QETH_CARD_TYPE_OSN: return IFF_NOARP; #ifdef CONFIG_QETH_IPV6 default: @@ -954,9 +980,10 @@ static inline int qeth_get_max_mtu_for_card(int cardtype) { switch (cardtype) { + case QETH_CARD_TYPE_UNKNOWN: - return 61440; case QETH_CARD_TYPE_OSAE: + case QETH_CARD_TYPE_OSN: return 61440; case QETH_CARD_TYPE_IQD: return 57344; @@ -1002,6 +1029,7 @@ qeth_mtu_is_valid(struct qeth_card * card, int mtu) case QETH_CARD_TYPE_IQD: return ((mtu >= 576) && (mtu <= card->info.max_mtu + 4096 - 32)); + case QETH_CARD_TYPE_OSN: case QETH_CARD_TYPE_UNKNOWN: default: return 1; @@ -1013,6 +1041,7 @@ qeth_get_arphdr_type(int cardtype, int linktype) { switch (cardtype) { case QETH_CARD_TYPE_OSAE: + case QETH_CARD_TYPE_OSN: switch (linktype) { case QETH_LINK_TYPE_LANE_TR: case QETH_LINK_TYPE_HSTR: @@ -1180,4 +1209,16 @@ qeth_fill_header(struct qeth_card *, struct qeth_hdr *, extern void qeth_flush_buffers(struct qeth_qdio_out_q *, int, int, int); +extern int +qeth_osn_assist(struct net_device *, void *, int); + +extern int +qeth_osn_register(unsigned char *read_dev_no, + struct net_device **, + int (*assist_cb)(struct net_device *, void *), + int (*data_cb)(struct sk_buff *)); + +extern void +qeth_osn_deregister(struct net_device *); + #endif /* __QETH_H__ */ diff --git a/drivers/s390/net/qeth_fs.h b/drivers/s390/net/qeth_fs.h index 5c9a51ce91b6..c0b4c8d82c45 100644 --- a/drivers/s390/net/qeth_fs.h +++ b/drivers/s390/net/qeth_fs.h @@ -12,7 +12,7 @@ #ifndef __QETH_FS_H__ #define __QETH_FS_H__ -#define VERSION_QETH_FS_H "$Revision: 1.9 $" +#define VERSION_QETH_FS_H "$Revision: 1.10 $" extern const char *VERSION_QETH_PROC_C; extern const char *VERSION_QETH_SYS_C; @@ -42,6 +42,12 @@ qeth_create_device_attributes(struct device *dev); extern void qeth_remove_device_attributes(struct device *dev); +extern int +qeth_create_device_attributes_osn(struct device *dev); + +extern void +qeth_remove_device_attributes_osn(struct device *dev); + extern int qeth_create_driver_attributes(void); @@ -108,6 +114,8 @@ qeth_get_cardname(struct qeth_card *card) return " OSD Express"; case QETH_CARD_TYPE_IQD: return " HiperSockets"; + case QETH_CARD_TYPE_OSN: + return " OSN QDIO"; default: return " unknown"; } @@ -153,6 +161,8 @@ qeth_get_cardname_short(struct qeth_card *card) } case QETH_CARD_TYPE_IQD: return "HiperSockets"; + case QETH_CARD_TYPE_OSN: + return "OSN"; default: return "unknown"; } diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 86582cf1e19e..692003c9f896 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -196,7 +196,6 @@ qeth_notifier_register(struct task_struct *p, int signum) { struct qeth_notify_list_struct *n_entry; - /*check first if entry already exists*/ spin_lock(&qeth_notify_lock); list_for_each_entry(n_entry, &qeth_notify_list, list) { @@ -511,7 +510,7 @@ static int __qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode) { struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data; - int rc = 0; + int rc = 0, rc2 = 0, rc3 = 0; enum qeth_card_states recover_flag; QETH_DBF_TEXT(setup, 3, "setoffl"); @@ -523,11 +522,13 @@ __qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode) CARD_BUS_ID(card)); return -ERESTARTSYS; } - if ((rc = ccw_device_set_offline(CARD_DDEV(card))) || - (rc = ccw_device_set_offline(CARD_WDEV(card))) || - (rc = ccw_device_set_offline(CARD_RDEV(card)))) { + rc = ccw_device_set_offline(CARD_DDEV(card)); + rc2 = ccw_device_set_offline(CARD_WDEV(card)); + rc3 = ccw_device_set_offline(CARD_RDEV(card)); + if (!rc) + rc = (rc2) ? rc2 : rc3; + if (rc) QETH_DBF_TEXT_(setup, 2, "1err%d", rc); - } if (recover_flag == CARD_STATE_UP) card->state = CARD_STATE_RECOVER; qeth_notify_processes(); @@ -1022,7 +1023,10 @@ qeth_set_intial_options(struct qeth_card *card) card->options.fake_broadcast = 0; card->options.add_hhlen = DEFAULT_ADD_HHLEN; card->options.fake_ll = 0; - card->options.layer2 = 0; + if (card->info.type == QETH_CARD_TYPE_OSN) + card->options.layer2 = 1; + else + card->options.layer2 = 0; } /** @@ -1046,6 +1050,7 @@ qeth_setup_card(struct qeth_card *card) spin_lock_init(&card->vlanlock); card->vlangrp = NULL; #endif + spin_lock_init(&card->lock); spin_lock_init(&card->ip_lock); spin_lock_init(&card->thread_mask_lock); card->thread_start_mask = 0; @@ -1110,19 +1115,20 @@ qeth_determine_card_type(struct qeth_card *card) QETH_DBF_TEXT(setup, 2, "detcdtyp"); + card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; while (known_devices[i][4]) { if ((CARD_RDEV(card)->id.dev_type == known_devices[i][2]) && (CARD_RDEV(card)->id.dev_model == known_devices[i][3])) { card->info.type = known_devices[i][4]; + card->qdio.no_out_queues = known_devices[i][8]; + card->info.is_multicast_different = known_devices[i][9]; if (is_1920_device(card)) { PRINT_INFO("Priority Queueing not able " "due to hardware limitations!\n"); card->qdio.no_out_queues = 1; card->qdio.default_out_queue = 0; - } else { - card->qdio.no_out_queues = known_devices[i][8]; - } - card->info.is_multicast_different = known_devices[i][9]; + } return 0; } i++; @@ -1146,6 +1152,8 @@ qeth_probe_device(struct ccwgroup_device *gdev) if (!get_device(dev)) return -ENODEV; + QETH_DBF_TEXT_(setup, 2, "%s", gdev->dev.bus_id); + card = qeth_alloc_card(); if (!card) { put_device(dev); @@ -1155,28 +1163,27 @@ qeth_probe_device(struct ccwgroup_device *gdev) card->read.ccwdev = gdev->cdev[0]; card->write.ccwdev = gdev->cdev[1]; card->data.ccwdev = gdev->cdev[2]; - - if ((rc = qeth_setup_card(card))){ - QETH_DBF_TEXT_(setup, 2, "2err%d", rc); - put_device(dev); - qeth_free_card(card); - return rc; - } gdev->dev.driver_data = card; card->gdev = gdev; gdev->cdev[0]->handler = qeth_irq; gdev->cdev[1]->handler = qeth_irq; gdev->cdev[2]->handler = qeth_irq; - rc = qeth_create_device_attributes(dev); - if (rc) { + if ((rc = qeth_determine_card_type(card))){ + PRINT_WARN("%s: not a valid card type\n", __func__); + QETH_DBF_TEXT_(setup, 2, "3err%d", rc); + put_device(dev); + qeth_free_card(card); + return rc; + } + if ((rc = qeth_setup_card(card))){ + QETH_DBF_TEXT_(setup, 2, "2err%d", rc); put_device(dev); qeth_free_card(card); return rc; } - if ((rc = qeth_determine_card_type(card))){ - PRINT_WARN("%s: not a valid card type\n", __func__); - QETH_DBF_TEXT_(setup, 2, "3err%d", rc); + rc = qeth_create_device_attributes(dev); + if (rc) { put_device(dev); qeth_free_card(card); return rc; @@ -1626,16 +1633,6 @@ qeth_cmd_timeout(unsigned long data) spin_unlock_irqrestore(&reply->card->lock, flags); } -static void -qeth_reset_ip_addresses(struct qeth_card *card) -{ - QETH_DBF_TEXT(trace, 2, "rstipadd"); - - qeth_clear_ip_list(card, 0, 1); - /* this function will also schedule the SET_IP_THREAD */ - qeth_set_multicast_list(card->dev); -} - static struct qeth_ipa_cmd * qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) { @@ -1664,10 +1661,11 @@ qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) "IP address reset.\n", QETH_CARD_IFNAME(card), card->info.chpid); - card->lan_online = 1; netif_carrier_on(card->dev); - qeth_reset_ip_addresses(card); + qeth_schedule_recovery(card); return NULL; + case IPA_CMD_MODCCID: + return cmd; case IPA_CMD_REGISTER_LOCAL_ADDR: QETH_DBF_TEXT(trace,3, "irla"); break; @@ -1729,6 +1727,14 @@ qeth_send_control_data_cb(struct qeth_channel *channel, cmd = qeth_check_ipa_data(card, iob); if ((cmd == NULL) && (card->state != CARD_STATE_DOWN)) goto out; + /*in case of OSN : check if cmd is set */ + if (card->info.type == QETH_CARD_TYPE_OSN && + cmd && + cmd->hdr.command != IPA_CMD_STARTLAN && + card->osn_info.assist_cb != NULL) { + card->osn_info.assist_cb(card->dev, cmd); + goto out; + } spin_lock_irqsave(&card->lock, flags); list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { @@ -1745,8 +1751,7 @@ qeth_send_control_data_cb(struct qeth_channel *channel, keep_reply = reply->callback(card, reply, (unsigned long)cmd); - } - else + } else keep_reply = reply->callback(card, reply, (unsigned long)iob); @@ -1776,6 +1781,24 @@ out: qeth_release_buffer(channel,iob); } +static inline void +qeth_prepare_control_data(struct qeth_card *card, int len, +struct qeth_cmd_buffer *iob) +{ + qeth_setup_ccw(&card->write,iob->data,len); + iob->callback = qeth_release_buffer; + + memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), + &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); + card->seqno.trans_hdr++; + memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), + &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); + card->seqno.pdu_hdr++; + memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), + &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); + QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); +} + static int qeth_send_control_data(struct qeth_card *card, int len, struct qeth_cmd_buffer *iob, @@ -1786,24 +1809,11 @@ qeth_send_control_data(struct qeth_card *card, int len, { int rc; unsigned long flags; - struct qeth_reply *reply; + struct qeth_reply *reply = NULL; struct timer_list timer; QETH_DBF_TEXT(trace, 2, "sendctl"); - qeth_setup_ccw(&card->write,iob->data,len); - - memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), - &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); - card->seqno.trans_hdr++; - - memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), - &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); - card->seqno.pdu_hdr++; - memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), - &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); - iob->callback = qeth_release_buffer; - reply = qeth_alloc_reply(card); if (!reply) { PRINT_WARN("Could no alloc qeth_reply!\n"); @@ -1818,10 +1828,6 @@ qeth_send_control_data(struct qeth_card *card, int len, init_timer(&timer); timer.function = qeth_cmd_timeout; timer.data = (unsigned long) reply; - if (IS_IPA(iob->data)) - timer.expires = jiffies + QETH_IPA_TIMEOUT; - else - timer.expires = jiffies + QETH_TIMEOUT; init_waitqueue_head(&reply->wait_q); spin_lock_irqsave(&card->lock, flags); list_add_tail(&reply->list, &card->cmd_waiter_list); @@ -1829,6 +1835,11 @@ qeth_send_control_data(struct qeth_card *card, int len, QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); wait_event(card->wait_q, atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0); + qeth_prepare_control_data(card, len, iob); + if (IS_IPA(iob->data)) + timer.expires = jiffies + QETH_IPA_TIMEOUT; + else + timer.expires = jiffies + QETH_TIMEOUT; QETH_DBF_TEXT(trace, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, @@ -1855,6 +1866,62 @@ qeth_send_control_data(struct qeth_card *card, int len, return rc; } +static int +qeth_osn_send_control_data(struct qeth_card *card, int len, + struct qeth_cmd_buffer *iob) +{ + unsigned long flags; + int rc = 0; + + QETH_DBF_TEXT(trace, 5, "osndctrd"); + + wait_event(card->wait_q, + atomic_compare_and_swap(0,1,&card->write.irq_pending) == 0); + qeth_prepare_control_data(card, len, iob); + QETH_DBF_TEXT(trace, 6, "osnoirqp"); + spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); + rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, + (addr_t) iob, 0, 0); + spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); + if (rc){ + PRINT_WARN("qeth_osn_send_control_data: " + "ccw_device_start rc = %i\n", rc); + QETH_DBF_TEXT_(trace, 2, " err%d", rc); + qeth_release_buffer(iob->channel, iob); + atomic_set(&card->write.irq_pending, 0); + wake_up(&card->wait_q); + } + return rc; +} + +static inline void +qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, + char prot_type) +{ + memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); + memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data),&prot_type,1); + memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), + &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); +} + +static int +qeth_osn_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, + int data_len) +{ + u16 s1, s2; + +QETH_DBF_TEXT(trace,4,"osndipa"); + + qeth_prepare_ipa_cmd(card, iob, QETH_PROT_OSN2); + s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len); + s2 = (u16)data_len; + memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2); + memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2); + memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2); + memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2); + return qeth_osn_send_control_data(card, s1, iob); +} + static int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, int (*reply_cb) @@ -1866,17 +1933,14 @@ qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, QETH_DBF_TEXT(trace,4,"sendipa"); - memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); - if (card->options.layer2) - prot_type = QETH_PROT_LAYER2; + if (card->info.type == QETH_CARD_TYPE_OSN) + prot_type = QETH_PROT_OSN2; + else + prot_type = QETH_PROT_LAYER2; else prot_type = QETH_PROT_TCPIP; - - memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data),&prot_type,1); - memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), - &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); - + qeth_prepare_ipa_cmd(card,iob,prot_type); rc = qeth_send_control_data(card, IPA_CMD_LENGTH, iob, reply_cb, reply_param); return rc; @@ -2018,7 +2082,10 @@ qeth_ulp_enable(struct qeth_card *card) *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (__u8) card->info.portno; if (card->options.layer2) - prot_type = QETH_PROT_LAYER2; + if (card->info.type == QETH_CARD_TYPE_OSN) + prot_type = QETH_PROT_OSN2; + else + prot_type = QETH_PROT_LAYER2; else prot_type = QETH_PROT_TCPIP; @@ -2108,15 +2175,21 @@ qeth_check_for_inbound_error(struct qeth_qdio_buffer *buf, } static inline struct sk_buff * -qeth_get_skb(unsigned int length) +qeth_get_skb(unsigned int length, struct qeth_hdr *hdr) { struct sk_buff* skb; + int add_len; + + add_len = 0; + if (hdr->hdr.osn.id == QETH_HEADER_TYPE_OSN) + add_len = sizeof(struct qeth_hdr); #ifdef CONFIG_QETH_VLAN - if ((skb = dev_alloc_skb(length + VLAN_HLEN))) - skb_reserve(skb, VLAN_HLEN); -#else - skb = dev_alloc_skb(length); + else + add_len = VLAN_HLEN; #endif + skb = dev_alloc_skb(length + add_len); + if (skb && add_len) + skb_reserve(skb, add_len); return skb; } @@ -2146,7 +2219,10 @@ qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, offset += sizeof(struct qeth_hdr); if (card->options.layer2) - skb_len = (*hdr)->hdr.l2.pkt_length; + if (card->info.type == QETH_CARD_TYPE_OSN) + skb_len = (*hdr)->hdr.osn.pdu_length; + else + skb_len = (*hdr)->hdr.l2.pkt_length; else skb_len = (*hdr)->hdr.l3.length; @@ -2154,15 +2230,15 @@ qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, return NULL; if (card->options.fake_ll){ if(card->dev->type == ARPHRD_IEEE802_TR){ - if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_TR))) + if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_TR, *hdr))) goto no_mem; skb_reserve(skb,QETH_FAKE_LL_LEN_TR); } else { - if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_ETH))) + if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_ETH, *hdr))) goto no_mem; skb_reserve(skb,QETH_FAKE_LL_LEN_ETH); } - } else if (!(skb = qeth_get_skb(skb_len))) + } else if (!(skb = qeth_get_skb(skb_len, *hdr))) goto no_mem; data_ptr = element->addr + offset; while (skb_len) { @@ -2387,6 +2463,7 @@ qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, skb_pull(skb, VLAN_HLEN); } #endif + *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; return vlan_id; } @@ -2460,8 +2537,12 @@ qeth_process_inbound_buffer(struct qeth_card *card, skb->dev = card->dev; if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr); - else + else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3) qeth_rebuild_skb(card, skb, hdr); + else { /*in case of OSN*/ + skb_push(skb, sizeof(struct qeth_hdr)); + memcpy(skb->data, hdr, sizeof(struct qeth_hdr)); + } /* is device UP ? */ if (!(card->dev->flags & IFF_UP)){ dev_kfree_skb_any(skb); @@ -2472,7 +2553,10 @@ qeth_process_inbound_buffer(struct qeth_card *card, vlan_hwaccel_rx(skb, card->vlangrp, vlan_tag); else #endif - rxrc = netif_rx(skb); + if (card->info.type == QETH_CARD_TYPE_OSN) + rxrc = card->osn_info.data_cb(skb); + else + rxrc = netif_rx(skb); card->dev->last_rx = jiffies; card->stats.rx_packets++; card->stats.rx_bytes += skb->len; @@ -3014,7 +3098,7 @@ qeth_alloc_buffer_pool(struct qeth_card *card) return -ENOMEM; } for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){ - ptr = (void *) __get_free_page(GFP_KERNEL); + ptr = (void *) __get_free_page(GFP_KERNEL|GFP_DMA); if (!ptr) { while (j > 0) free_page((unsigned long) @@ -3058,7 +3142,8 @@ qeth_alloc_qdio_buffers(struct qeth_card *card) if (card->qdio.state == QETH_QDIO_ALLOCATED) return 0; - card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL); + card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), + GFP_KERNEL|GFP_DMA); if (!card->qdio.in_q) return - ENOMEM; QETH_DBF_TEXT(setup, 2, "inq"); @@ -3083,7 +3168,7 @@ qeth_alloc_qdio_buffers(struct qeth_card *card) } for (i = 0; i < card->qdio.no_out_queues; ++i){ card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q), - GFP_KERNEL); + GFP_KERNEL|GFP_DMA); if (!card->qdio.out_qs[i]){ while (i > 0) kfree(card->qdio.out_qs[--i]); @@ -3156,8 +3241,6 @@ qeth_init_qdio_info(struct qeth_card *card) INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); /* outbound */ - card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; - card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; } static int @@ -3472,7 +3555,7 @@ qeth_mpc_initialize(struct qeth_card *card) return 0; out_qdio: - qeth_qdio_clear_card(card, card->info.type==QETH_CARD_TYPE_OSAE); + qeth_qdio_clear_card(card, card->info.type!=QETH_CARD_TYPE_IQD); return rc; } @@ -3497,6 +3580,9 @@ qeth_get_netdevice(enum qeth_card_types type, enum qeth_link_types linktype) case QETH_CARD_TYPE_IQD: dev = alloc_netdev(0, "hsi%d", ether_setup); break; + case QETH_CARD_TYPE_OSN: + dev = alloc_netdev(0, "osn%d", ether_setup); + break; default: dev = alloc_etherdev(0); } @@ -3661,7 +3747,8 @@ qeth_open(struct net_device *dev) if (card->state != CARD_STATE_SOFTSETUP) return -ENODEV; - if ( (card->options.layer2) && + if ( (card->info.type != QETH_CARD_TYPE_OSN) && + (card->options.layer2) && (!card->info.layer2_mac_registered)) { QETH_DBF_TEXT(trace,4,"nomacadr"); return -EPERM; @@ -3699,6 +3786,9 @@ qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) { int cast_type = RTN_UNSPEC; + if (card->info.type == QETH_CARD_TYPE_OSN) + return cast_type; + if (skb->dst && skb->dst->neighbour){ cast_type = skb->dst->neighbour->type; if ((cast_type == RTN_BROADCAST) || @@ -3788,13 +3878,16 @@ static inline int qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb, struct qeth_hdr **hdr, int ipv) { - int rc; + int rc = 0; #ifdef CONFIG_QETH_VLAN u16 *tag; #endif QETH_DBF_TEXT(trace, 6, "prepskb"); - + if (card->info.type == QETH_CARD_TYPE_OSN) { + *hdr = (struct qeth_hdr *)(*skb)->data; + return rc; + } rc = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr)); if (rc) return rc; @@ -4297,8 +4390,14 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) } } } + if ((card->info.type == QETH_CARD_TYPE_OSN) && + (skb->protocol == htons(ETH_P_IPV6))) { + dev_kfree_skb_any(skb); + return 0; + } cast_type = qeth_get_cast_type(card, skb); - if ((cast_type == RTN_BROADCAST) && (card->info.broadcast_capable == 0)){ + if ((cast_type == RTN_BROADCAST) && + (card->info.broadcast_capable == 0)){ card->stats.tx_dropped++; card->stats.tx_errors++; dev_kfree_skb_any(skb); @@ -4326,7 +4425,8 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) QETH_DBF_TEXT_(trace, 4, "pskbe%d", rc); return rc; } - qeth_fill_header(card, hdr, skb, ipv, cast_type); + if (card->info.type != QETH_CARD_TYPE_OSN) + qeth_fill_header(card, hdr, skb, ipv, cast_type); } if (large_send == QETH_LARGE_SEND_EDDP) { @@ -4387,6 +4487,7 @@ qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) case MII_BMCR: /* Basic mode control register */ rc = BMCR_FULLDPLX; if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH)&& + (card->info.link_type != QETH_LINK_TYPE_OSN) && (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH)) rc |= BMCR_SPEED100; break; @@ -5010,6 +5111,9 @@ qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) (card->state != CARD_STATE_SOFTSETUP)) return -ENODEV; + if (card->info.type == QETH_CARD_TYPE_OSN) + return -EPERM; + switch (cmd){ case SIOC_QETH_ARP_SET_NO_ENTRIES: if ( !capable(CAP_NET_ADMIN) || @@ -5200,7 +5304,7 @@ qeth_free_vlan_addresses4(struct qeth_card *card, unsigned short vid) if (!card->vlangrp) return; rcu_read_lock(); - in_dev = __in_dev_get(card->vlangrp->vlan_devices[vid]); + in_dev = __in_dev_get_rcu(card->vlangrp->vlan_devices[vid]); if (!in_dev) goto out; for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { @@ -5335,6 +5439,9 @@ qeth_set_multicast_list(struct net_device *dev) { struct qeth_card *card = (struct qeth_card *) dev->priv; + if (card->info.type == QETH_CARD_TYPE_OSN) + return ; + QETH_DBF_TEXT(trace,3,"setmulti"); qeth_delete_mc_addresses(card); qeth_add_multicast_ipv4(card); @@ -5376,6 +5483,94 @@ qeth_get_addr_buffer(enum qeth_prot_versions prot) return addr; } +int +qeth_osn_assist(struct net_device *dev, + void *data, + int data_len) +{ + struct qeth_cmd_buffer *iob; + struct qeth_card *card; + int rc; + + QETH_DBF_TEXT(trace, 2, "osnsdmc"); + if (!dev) + return -ENODEV; + card = (struct qeth_card *)dev->priv; + if (!card) + return -ENODEV; + if ((card->state != CARD_STATE_UP) && + (card->state != CARD_STATE_SOFTSETUP)) + return -ENODEV; + iob = qeth_wait_for_buffer(&card->write); + memcpy(iob->data+IPA_PDU_HEADER_SIZE, data, data_len); + rc = qeth_osn_send_ipa_cmd(card, iob, data_len); + return rc; +} + +static struct net_device * +qeth_netdev_by_devno(unsigned char *read_dev_no) +{ + struct qeth_card *card; + struct net_device *ndev; + unsigned char *readno; + __u16 temp_dev_no, card_dev_no; + char *endp; + unsigned long flags; + + ndev = NULL; + memcpy(&temp_dev_no, read_dev_no, 2); + read_lock_irqsave(&qeth_card_list.rwlock, flags); + list_for_each_entry(card, &qeth_card_list.list, list) { + readno = CARD_RDEV_ID(card); + readno += (strlen(readno) - 4); + card_dev_no = simple_strtoul(readno, &endp, 16); + if (card_dev_no == temp_dev_no) { + ndev = card->dev; + break; + } + } + read_unlock_irqrestore(&qeth_card_list.rwlock, flags); + return ndev; +} + +int +qeth_osn_register(unsigned char *read_dev_no, + struct net_device **dev, + int (*assist_cb)(struct net_device *, void *), + int (*data_cb)(struct sk_buff *)) +{ + struct qeth_card * card; + + QETH_DBF_TEXT(trace, 2, "osnreg"); + *dev = qeth_netdev_by_devno(read_dev_no); + if (*dev == NULL) + return -ENODEV; + card = (struct qeth_card *)(*dev)->priv; + if (!card) + return -ENODEV; + if ((assist_cb == NULL) || (data_cb == NULL)) + return -EINVAL; + card->osn_info.assist_cb = assist_cb; + card->osn_info.data_cb = data_cb; + return 0; +} + +void +qeth_osn_deregister(struct net_device * dev) +{ + struct qeth_card *card; + + QETH_DBF_TEXT(trace, 2, "osndereg"); + if (!dev) + return; + card = (struct qeth_card *)dev->priv; + if (!card) + return; + card->osn_info.assist_cb = NULL; + card->osn_info.data_cb = NULL; + return; +} + static void qeth_delete_mc_addresses(struct qeth_card *card) { @@ -5706,6 +5901,12 @@ qeth_layer2_set_mac_address(struct net_device *dev, void *p) QETH_DBF_TEXT(trace, 3, "setmcLY3"); return -EOPNOTSUPP; } + if (card->info.type == QETH_CARD_TYPE_OSN) { + PRINT_WARN("Setting MAC address on %s is not supported.\n", + dev->name); + QETH_DBF_TEXT(trace, 3, "setmcOSN"); + return -EOPNOTSUPP; + } QETH_DBF_TEXT_(trace, 3, "%s", CARD_BUS_ID(card)); QETH_DBF_HEX(trace, 3, addr->sa_data, OSA_ADDR_LEN); rc = qeth_layer2_send_delmac(card, &card->dev->dev_addr[0]); @@ -6082,9 +6283,8 @@ qeth_netdev_init(struct net_device *dev) qeth_get_hlen(card->info.link_type) + card->options.add_hhlen; dev->addr_len = OSA_ADDR_LEN; dev->mtu = card->info.initial_mtu; - - SET_ETHTOOL_OPS(dev, &qeth_ethtool_ops); - + if (card->info.type != QETH_CARD_TYPE_OSN) + SET_ETHTOOL_OPS(dev, &qeth_ethtool_ops); SET_MODULE_OWNER(dev); return 0; } @@ -6101,6 +6301,7 @@ qeth_init_func_level(struct qeth_card *card) QETH_IDX_FUNC_LEVEL_OSAE_ENA_IPAT; } else { if (card->info.type == QETH_CARD_TYPE_IQD) + /*FIXME:why do we have same values for dis and ena for osae??? */ card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD_DIS_IPAT; else @@ -6130,7 +6331,7 @@ retry: ccw_device_set_online(CARD_WDEV(card)); ccw_device_set_online(CARD_DDEV(card)); } - rc = qeth_qdio_clear_card(card,card->info.type==QETH_CARD_TYPE_OSAE); + rc = qeth_qdio_clear_card(card,card->info.type!=QETH_CARD_TYPE_IQD); if (rc == -ERESTARTSYS) { QETH_DBF_TEXT(setup, 2, "break1"); return rc; @@ -6182,8 +6383,8 @@ retry: card->dev = qeth_get_netdevice(card->info.type, card->info.link_type); if (!card->dev){ - qeth_qdio_clear_card(card, card->info.type == - QETH_CARD_TYPE_OSAE); + qeth_qdio_clear_card(card, card->info.type != + QETH_CARD_TYPE_IQD); rc = -ENODEV; QETH_DBF_TEXT_(setup, 2, "6err%d", rc); goto out; @@ -6470,6 +6671,9 @@ qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply, if (cmd->hdr.prot_version == QETH_PROT_IPV4) { card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; + /* Disable IPV6 support hard coded for Hipersockets */ + if(card->info.type == QETH_CARD_TYPE_IQD) + card->options.ipa4.supported_funcs &= ~IPA_IPV6; } else { #ifdef CONFIG_QETH_IPV6 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; @@ -7087,6 +7291,8 @@ qeth_softsetup_card(struct qeth_card *card) return rc; } else card->lan_online = 1; + if (card->info.type==QETH_CARD_TYPE_OSN) + goto out; if (card->options.layer2) { card->dev->features |= NETIF_F_HW_VLAN_FILTER | @@ -7258,7 +7464,8 @@ qeth_stop_card(struct qeth_card *card, int recovery_mode) if (card->read.state == CH_STATE_UP && card->write.state == CH_STATE_UP && (card->state == CARD_STATE_UP)) { - if(recovery_mode) { + if (recovery_mode && + card->info.type != QETH_CARD_TYPE_OSN) { qeth_stop(card->dev); } else { rtnl_lock(); @@ -7440,7 +7647,8 @@ qeth_start_again(struct qeth_card *card, int recovery_mode) { QETH_DBF_TEXT(setup ,2, "startag"); - if(recovery_mode) { + if (recovery_mode && + card->info.type != QETH_CARD_TYPE_OSN) { qeth_open(card->dev); } else { rtnl_lock(); @@ -7472,33 +7680,36 @@ qeth_start_again(struct qeth_card *card, int recovery_mode) static void qeth_make_parameters_consistent(struct qeth_card *card) { - if (card->options.layer2) { - if (card->info.type == QETH_CARD_TYPE_IQD) { - PRINT_ERR("Device %s does not support " \ - "layer 2 functionality. " \ - "Ignoring layer2 option.\n",CARD_BUS_ID(card)); - } - IGNORE_PARAM_NEQ(route4.type, NO_ROUTER, NO_ROUTER, - "Routing options are"); + if (card->options.layer2 == 0) + return; + if (card->info.type == QETH_CARD_TYPE_OSN) + return; + if (card->info.type == QETH_CARD_TYPE_IQD) { + PRINT_ERR("Device %s does not support layer 2 functionality." \ + " Ignoring layer2 option.\n",CARD_BUS_ID(card)); + card->options.layer2 = 0; + return; + } + IGNORE_PARAM_NEQ(route4.type, NO_ROUTER, NO_ROUTER, + "Routing options are"); #ifdef CONFIG_QETH_IPV6 - IGNORE_PARAM_NEQ(route6.type, NO_ROUTER, NO_ROUTER, - "Routing options are"); + IGNORE_PARAM_NEQ(route6.type, NO_ROUTER, NO_ROUTER, + "Routing options are"); #endif - IGNORE_PARAM_EQ(checksum_type, HW_CHECKSUMMING, - QETH_CHECKSUM_DEFAULT, - "Checksumming options are"); - IGNORE_PARAM_NEQ(broadcast_mode, QETH_TR_BROADCAST_ALLRINGS, - QETH_TR_BROADCAST_ALLRINGS, - "Broadcast mode options are"); - IGNORE_PARAM_NEQ(macaddr_mode, QETH_TR_MACADDR_NONCANONICAL, - QETH_TR_MACADDR_NONCANONICAL, - "Canonical MAC addr options are"); - IGNORE_PARAM_NEQ(fake_broadcast, 0, 0, - "Broadcast faking options are"); - IGNORE_PARAM_NEQ(add_hhlen, DEFAULT_ADD_HHLEN, - DEFAULT_ADD_HHLEN,"Option add_hhlen is"); - IGNORE_PARAM_NEQ(fake_ll, 0, 0,"Option fake_ll is"); - } + IGNORE_PARAM_EQ(checksum_type, HW_CHECKSUMMING, + QETH_CHECKSUM_DEFAULT, + "Checksumming options are"); + IGNORE_PARAM_NEQ(broadcast_mode, QETH_TR_BROADCAST_ALLRINGS, + QETH_TR_BROADCAST_ALLRINGS, + "Broadcast mode options are"); + IGNORE_PARAM_NEQ(macaddr_mode, QETH_TR_MACADDR_NONCANONICAL, + QETH_TR_MACADDR_NONCANONICAL, + "Canonical MAC addr options are"); + IGNORE_PARAM_NEQ(fake_broadcast, 0, 0, + "Broadcast faking options are"); + IGNORE_PARAM_NEQ(add_hhlen, DEFAULT_ADD_HHLEN, + DEFAULT_ADD_HHLEN,"Option add_hhlen is"); + IGNORE_PARAM_NEQ(fake_ll, 0, 0,"Option fake_ll is"); } @@ -7528,8 +7739,7 @@ __qeth_set_online(struct ccwgroup_device *gdev, int recovery_mode) return -EIO; } - if (card->options.layer2) - qeth_make_parameters_consistent(card); + qeth_make_parameters_consistent(card); if ((rc = qeth_hardsetup_card(card))){ QETH_DBF_TEXT_(setup, 2, "2err%d", rc); @@ -7588,6 +7798,7 @@ qeth_set_online(struct ccwgroup_device *gdev) static struct ccw_device_id qeth_ids[] = { {CCW_DEVICE(0x1731, 0x01), driver_info:QETH_CARD_TYPE_OSAE}, {CCW_DEVICE(0x1731, 0x05), driver_info:QETH_CARD_TYPE_IQD}, + {CCW_DEVICE(0x1731, 0x06), driver_info:QETH_CARD_TYPE_OSN}, {}, }; MODULE_DEVICE_TABLE(ccw, qeth_ids); @@ -7725,7 +7936,7 @@ qeth_arp_constructor(struct neighbour *neigh) goto out; rcu_read_lock(); - in_dev = rcu_dereference(__in_dev_get(dev)); + in_dev = __in_dev_get_rcu(dev); if (in_dev == NULL) { rcu_read_unlock(); return -EINVAL; @@ -8332,6 +8543,9 @@ again: printk("qeth: removed\n"); } +EXPORT_SYMBOL(qeth_osn_register); +EXPORT_SYMBOL(qeth_osn_deregister); +EXPORT_SYMBOL(qeth_osn_assist); module_init(qeth_init); module_exit(qeth_exit); MODULE_AUTHOR("Frank Pavlic "); diff --git a/drivers/s390/net/qeth_mpc.c b/drivers/s390/net/qeth_mpc.c index f685ecc7da99..30e053d3cac2 100644 --- a/drivers/s390/net/qeth_mpc.c +++ b/drivers/s390/net/qeth_mpc.c @@ -11,7 +11,7 @@ #include #include "qeth_mpc.h" -const char *VERSION_QETH_MPC_C = "$Revision: 1.11 $"; +const char *VERSION_QETH_MPC_C = "$Revision: 1.12 $"; unsigned char IDX_ACTIVATE_READ[]={ 0x00,0x00,0x80,0x00, 0x00,0x00,0x00,0x00, @@ -138,7 +138,9 @@ unsigned char IPA_PDU_HEADER[]={ sizeof(struct qeth_ipa_cmd)%256, 0x00, sizeof(struct qeth_ipa_cmd)/256, - sizeof(struct qeth_ipa_cmd),0x05, 0x77,0x77,0x77,0x77, + sizeof(struct qeth_ipa_cmd)%256, + 0x05, + 0x77,0x77,0x77,0x77, 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00, 0x01,0x00, sizeof(struct qeth_ipa_cmd)/256, diff --git a/drivers/s390/net/qeth_mpc.h b/drivers/s390/net/qeth_mpc.h index 3d916b5c5d09..7edc5f1fc0d2 100644 --- a/drivers/s390/net/qeth_mpc.h +++ b/drivers/s390/net/qeth_mpc.h @@ -46,13 +46,16 @@ extern unsigned char IPA_PDU_HEADER[]; /* IP Assist related definitions */ /*****************************************************************************/ #define IPA_CMD_INITIATOR_HOST 0x00 -#define IPA_CMD_INITIATOR_HYDRA 0x01 +#define IPA_CMD_INITIATOR_OSA 0x01 +#define IPA_CMD_INITIATOR_HOST_REPLY 0x80 +#define IPA_CMD_INITIATOR_OSA_REPLY 0x81 #define IPA_CMD_PRIM_VERSION_NO 0x01 enum qeth_card_types { QETH_CARD_TYPE_UNKNOWN = 0, QETH_CARD_TYPE_OSAE = 10, QETH_CARD_TYPE_IQD = 1234, + QETH_CARD_TYPE_OSN = 11, }; #define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18 @@ -61,6 +64,7 @@ enum qeth_link_types { QETH_LINK_TYPE_FAST_ETH = 0x01, QETH_LINK_TYPE_HSTR = 0x02, QETH_LINK_TYPE_GBIT_ETH = 0x03, + QETH_LINK_TYPE_OSN = 0x04, QETH_LINK_TYPE_10GBIT_ETH = 0x10, QETH_LINK_TYPE_LANE_ETH100 = 0x81, QETH_LINK_TYPE_LANE_TR = 0x82, @@ -111,6 +115,9 @@ enum qeth_ipa_cmds { IPA_CMD_DELGMAC = 0x24, IPA_CMD_SETVLAN = 0x25, IPA_CMD_DELVLAN = 0x26, + IPA_CMD_SETCCID = 0x41, + IPA_CMD_DELCCID = 0x42, + IPA_CMD_MODCCID = 0x43, IPA_CMD_SETIP = 0xb1, IPA_CMD_DELIP = 0xb7, IPA_CMD_QIPASSIST = 0xb2, @@ -437,8 +444,9 @@ enum qeth_ipa_arp_return_codes { #define QETH_ARP_DATA_SIZE 3968 #define QETH_ARP_CMD_LEN (QETH_ARP_DATA_SIZE + 8) /* Helper functions */ -#define IS_IPA_REPLY(cmd) (cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) - +#define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \ + (cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY)) + /*****************************************************************************/ /* END OF IP Assist related definitions */ /*****************************************************************************/ @@ -483,6 +491,7 @@ extern unsigned char ULP_ENABLE[]; /* Layer 2 defintions */ #define QETH_PROT_LAYER2 0x08 #define QETH_PROT_TCPIP 0x03 +#define QETH_PROT_OSN2 0x0a #define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer+0x50) #define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer+0x19) diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c index dda105b73063..f91a02db5743 100644 --- a/drivers/s390/net/qeth_sys.c +++ b/drivers/s390/net/qeth_sys.c @@ -1,6 +1,6 @@ /* * - * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.54 $) + * linux/drivers/s390/net/qeth_sys.c ($Revision: 1.55 $) * * Linux on zSeries OSA Express and HiperSockets support * This file contains code related to sysfs. @@ -20,7 +20,7 @@ #include "qeth_mpc.h" #include "qeth_fs.h" -const char *VERSION_QETH_SYS_C = "$Revision: 1.54 $"; +const char *VERSION_QETH_SYS_C = "$Revision: 1.55 $"; /*****************************************************************************/ /* */ @@ -937,6 +937,19 @@ static struct attribute_group qeth_device_attr_group = { .attrs = (struct attribute **)qeth_device_attrs, }; +static struct device_attribute * qeth_osn_device_attrs[] = { + &dev_attr_state, + &dev_attr_chpid, + &dev_attr_if_name, + &dev_attr_card_type, + &dev_attr_buffer_count, + &dev_attr_recover, + NULL, +}; + +static struct attribute_group qeth_osn_device_attr_group = { + .attrs = (struct attribute **)qeth_osn_device_attrs, +}; #define QETH_DEVICE_ATTR(_id,_name,_mode,_show,_store) \ struct device_attribute dev_attr_##_id = { \ @@ -1667,7 +1680,12 @@ int qeth_create_device_attributes(struct device *dev) { int ret; + struct qeth_card *card = dev->driver_data; + if (card->info.type == QETH_CARD_TYPE_OSN) + return sysfs_create_group(&dev->kobj, + &qeth_osn_device_attr_group); + if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_attr_group))) return ret; if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_ipato_group))){ @@ -1693,6 +1711,12 @@ qeth_create_device_attributes(struct device *dev) void qeth_remove_device_attributes(struct device *dev) { + struct qeth_card *card = dev->driver_data; + + if (card->info.type == QETH_CARD_TYPE_OSN) + return sysfs_remove_group(&dev->kobj, + &qeth_osn_device_attr_group); + sysfs_remove_group(&dev->kobj, &qeth_device_attr_group); sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group); sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group); diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 0b5087f7cabc..cab098556b44 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -833,7 +833,7 @@ zfcp_unit_dequeue(struct zfcp_unit *unit) } static void * -zfcp_mempool_alloc(unsigned int __nocast gfp_mask, void *size) +zfcp_mempool_alloc(gfp_t gfp_mask, void *size) { return kmalloc((size_t) size, gfp_mask); } diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index a6ac61611f35..a748fbfb6692 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -60,6 +60,7 @@ Remove un-needed eh_abort handler. Add support for embedded firmware error strings. 2.26.02.003 - Correctly handle single sgl's with use_sg=1. + 2.26.02.004 - Add support for 9550SX controllers. */ #include @@ -82,7 +83,7 @@ #include "3w-9xxx.h" /* Globals */ -#define TW_DRIVER_VERSION "2.26.02.003" +#define TW_DRIVER_VERSION "2.26.02.004" static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; static unsigned int twa_device_extension_count; static int twa_major = -1; @@ -892,11 +893,6 @@ static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value) writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); } - if (status_reg_value & TW_STATUS_SBUF_WRITE_ERROR) { - TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "SBUF Write Error: clearing"); - writel(TW_CONTROL_CLEAR_SBUF_WRITE_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); - } - if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) { if (tw_dev->reset_print == 0) { TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing"); @@ -930,6 +926,36 @@ out: return retval; } /* End twa_empty_response_queue() */ +/* This function will clear the pchip/response queue on 9550SX */ +static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev) +{ + u32 status_reg_value, response_que_value; + int count = 0, retval = 1; + + if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9550SX) { + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + + while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) { + response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev)); + if ((response_que_value & TW_9550SX_DRAIN_COMPLETED) == TW_9550SX_DRAIN_COMPLETED) { + /* P-chip settle time */ + msleep(500); + retval = 0; + goto out; + } + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + count++; + } + if (count == TW_MAX_RESPONSE_DRAIN) + goto out; + + retval = 0; + } else + retval = 0; +out: + return retval; +} /* End twa_empty_response_queue_large() */ + /* This function passes sense keys from firmware to scsi layer */ static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host) { @@ -1613,8 +1639,16 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset) int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset; while (tries < TW_MAX_RESET_TRIES) { - if (do_soft_reset) + if (do_soft_reset) { TW_SOFT_RESET(tw_dev); + /* Clear pchip/response queue on 9550SX */ + if (twa_empty_response_queue_large(tw_dev)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence"); + do_soft_reset = 1; + tries++; + continue; + } + } /* Make sure controller is in a good state */ if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) { @@ -2034,7 +2068,10 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id goto out_free_device_extension; } - mem_addr = pci_resource_start(pdev, 1); + if (pdev->device == PCI_DEVICE_ID_3WARE_9000) + mem_addr = pci_resource_start(pdev, 1); + else + mem_addr = pci_resource_start(pdev, 2); /* Save base address */ tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE); @@ -2148,6 +2185,8 @@ static void twa_remove(struct pci_dev *pdev) static struct pci_device_id twa_pci_tbl[] __devinitdata = { { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { } }; MODULE_DEVICE_TABLE(pci, twa_pci_tbl); diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h index 8c8ecbed3b58..46f22cdc8298 100644 --- a/drivers/scsi/3w-9xxx.h +++ b/drivers/scsi/3w-9xxx.h @@ -267,7 +267,6 @@ static twa_message_type twa_error_table[] = { #define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000 #define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000 #define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000 -#define TW_CONTROL_CLEAR_SBUF_WRITE_ERROR 0x00000008 /* Status register bit definitions */ #define TW_STATUS_MAJOR_VERSION_MASK 0xF0000000 @@ -285,9 +284,8 @@ static twa_message_type twa_error_table[] = { #define TW_STATUS_MICROCONTROLLER_READY 0x00002000 #define TW_STATUS_COMMAND_QUEUE_EMPTY 0x00001000 #define TW_STATUS_EXPECTED_BITS 0x00002000 -#define TW_STATUS_UNEXPECTED_BITS 0x00F00008 -#define TW_STATUS_SBUF_WRITE_ERROR 0x00000008 -#define TW_STATUS_VALID_INTERRUPT 0x00DF0008 +#define TW_STATUS_UNEXPECTED_BITS 0x00F00000 +#define TW_STATUS_VALID_INTERRUPT 0x00DF0000 /* RESPONSE QUEUE BIT DEFINITIONS */ #define TW_RESPONSE_ID_MASK 0x00000FF0 @@ -324,9 +322,9 @@ static twa_message_type twa_error_table[] = { /* Compatibility defines */ #define TW_9000_ARCH_ID 0x5 -#define TW_CURRENT_DRIVER_SRL 28 -#define TW_CURRENT_DRIVER_BUILD 9 -#define TW_CURRENT_DRIVER_BRANCH 4 +#define TW_CURRENT_DRIVER_SRL 30 +#define TW_CURRENT_DRIVER_BUILD 80 +#define TW_CURRENT_DRIVER_BRANCH 0 /* Phase defines */ #define TW_PHASE_INITIAL 0 @@ -334,6 +332,7 @@ static twa_message_type twa_error_table[] = { #define TW_PHASE_SGLIST 2 /* Misc defines */ +#define TW_9550SX_DRAIN_COMPLETED 0xFFFF #define TW_SECTOR_SIZE 512 #define TW_ALIGNMENT_9000 4 /* 4 bytes */ #define TW_ALIGNMENT_9000_SGL 0x3 @@ -417,6 +416,9 @@ static twa_message_type twa_error_table[] = { #ifndef PCI_DEVICE_ID_3WARE_9000 #define PCI_DEVICE_ID_3WARE_9000 0x1002 #endif +#ifndef PCI_DEVICE_ID_3WARE_9550SX +#define PCI_DEVICE_ID_3WARE_9550SX 0x1003 +#endif /* Bitmask macros to eliminate bitfields */ @@ -443,6 +445,7 @@ static twa_message_type twa_error_table[] = { #define TW_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0x4) #define TW_COMMAND_QUEUE_REG_ADDR(x) (sizeof(dma_addr_t) > 4 ? ((unsigned char __iomem *)x->base_addr + 0x20) : ((unsigned char __iomem *)x->base_addr + 0x8)) #define TW_RESPONSE_QUEUE_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0xC) +#define TW_RESPONSE_QUEUE_REG_ADDR_LARGE(x) ((unsigned char __iomem *)x->base_addr + 0x30) #define TW_CLEAR_ALL_INTERRUPTS(x) (writel(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x))) #define TW_CLEAR_ATTENTION_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x))) #define TW_CLEAR_HOST_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x))) diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 20019b82b4a8..9c9f162bd6ed 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -489,11 +489,11 @@ config SCSI_SATA_NV If unsure, say N. -config SCSI_SATA_PROMISE - tristate "Promise SATA TX2/TX4 support" +config SCSI_PDC_ADMA + tristate "Pacific Digital ADMA support" depends on SCSI_SATA && PCI help - This option enables support for Promise Serial ATA TX2/TX4. + This option enables support for Pacific Digital ADMA controllers If unsure, say N. @@ -505,6 +505,14 @@ config SCSI_SATA_QSTOR If unsure, say N. +config SCSI_SATA_PROMISE + tristate "Promise SATA TX2/TX4 support" + depends on SCSI_SATA && PCI + help + This option enables support for Promise Serial ATA TX2/TX4. + + If unsure, say N. + config SCSI_SATA_SX4 tristate "Promise SATA SX4 support" depends on SCSI_SATA && PCI && EXPERIMENTAL @@ -521,6 +529,14 @@ config SCSI_SATA_SIL If unsure, say N. +config SCSI_SATA_SIL24 + tristate "Silicon Image 3124/3132 SATA support" + depends on SCSI_SATA && PCI && EXPERIMENTAL + help + This option enables support for Silicon Image 3124/3132 Serial ATA. + + If unsure, say N. + config SCSI_SATA_SIS tristate "SiS 964/180 SATA support" depends on SCSI_SATA && PCI && EXPERIMENTAL @@ -553,6 +569,11 @@ config SCSI_SATA_VITESSE If unsure, say N. +config SCSI_SATA_INTEL_COMBINED + bool + depends on IDE=y && !BLK_DEV_IDE_SATA && (SCSI_SATA_AHCI || SCSI_ATA_PIIX) + default y + config SCSI_BUSLOGIC tristate "BusLogic SCSI support" depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 1e4edbdf2730..2d4439826c08 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -99,6 +99,7 @@ obj-$(CONFIG_SCSI_DC395x) += dc395x.o obj-$(CONFIG_SCSI_DC390T) += tmscsim.o obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ +obj-$(CONFIG_MEGARAID_SAS) += megaraid/ obj-$(CONFIG_SCSI_ACARD) += atp870u.o obj-$(CONFIG_SCSI_SUNESP) += esp.o obj-$(CONFIG_SCSI_GDTH) += gdth.o @@ -129,6 +130,7 @@ obj-$(CONFIG_SCSI_ATA_PIIX) += libata.o ata_piix.o obj-$(CONFIG_SCSI_SATA_PROMISE) += libata.o sata_promise.o obj-$(CONFIG_SCSI_SATA_QSTOR) += libata.o sata_qstor.o obj-$(CONFIG_SCSI_SATA_SIL) += libata.o sata_sil.o +obj-$(CONFIG_SCSI_SATA_SIL24) += libata.o sata_sil24.o obj-$(CONFIG_SCSI_SATA_VIA) += libata.o sata_via.o obj-$(CONFIG_SCSI_SATA_VITESSE) += libata.o sata_vsc.o obj-$(CONFIG_SCSI_SATA_SIS) += libata.o sata_sis.o @@ -136,6 +138,7 @@ obj-$(CONFIG_SCSI_SATA_SX4) += libata.o sata_sx4.o obj-$(CONFIG_SCSI_SATA_NV) += libata.o sata_nv.o obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o +obj-$(CONFIG_SCSI_PDC_ADMA) += libata.o pdc_adma.o obj-$(CONFIG_ARM) += arm/ diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index d40ba0bd68a3..23392ae7df8b 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -91,7 +91,7 @@ #ifndef NDEBUG #define NDEBUG 0 #endif -#ifndef NDEBUG +#ifndef NDEBUG_ABORT #define NDEBUG_ABORT 0 #endif diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index a8e3dfcd0dc7..93416f760e5a 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -313,18 +313,37 @@ int aac_get_containers(struct aac_dev *dev) } dresp = (struct aac_mount *)fib_data(fibptr); + if ((le32_to_cpu(dresp->status) == ST_OK) && + (le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) { + dinfo->command = cpu_to_le32(VM_NameServe64); + dinfo->count = cpu_to_le32(index); + dinfo->type = cpu_to_le32(FT_FILESYS); + + if (fib_send(ContainerCommand, + fibptr, + sizeof(struct aac_query_mount), + FsaNormal, + 1, 1, + NULL, NULL) < 0) + continue; + } else + dresp->mnt[0].capacityhigh = 0; + dprintk ((KERN_DEBUG - "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%u\n", + "VM_NameServe cid=%d status=%d vol=%d state=%d cap=%llu\n", (int)index, (int)le32_to_cpu(dresp->status), (int)le32_to_cpu(dresp->mnt[0].vol), (int)le32_to_cpu(dresp->mnt[0].state), - (unsigned)le32_to_cpu(dresp->mnt[0].capacity))); + ((u64)le32_to_cpu(dresp->mnt[0].capacity)) + + (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32))); if ((le32_to_cpu(dresp->status) == ST_OK) && (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { fsa_dev_ptr[index].valid = 1; fsa_dev_ptr[index].type = le32_to_cpu(dresp->mnt[0].vol); - fsa_dev_ptr[index].size = le32_to_cpu(dresp->mnt[0].capacity); + fsa_dev_ptr[index].size + = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) + + (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32); if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) fsa_dev_ptr[index].ro = 1; } @@ -460,7 +479,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd, int cid) * is updated in the struct fsa_dev_info structure rather than returned. */ -static int probe_container(struct aac_dev *dev, int cid) +int probe_container(struct aac_dev *dev, int cid) { struct fsa_dev_info *fsa_dev_ptr; int status; @@ -496,12 +515,30 @@ static int probe_container(struct aac_dev *dev, int cid) dresp = (struct aac_mount *) fib_data(fibptr); + if ((le32_to_cpu(dresp->status) == ST_OK) && + (le32_to_cpu(dresp->mnt[0].vol) == CT_NONE)) { + dinfo->command = cpu_to_le32(VM_NameServe64); + dinfo->count = cpu_to_le32(cid); + dinfo->type = cpu_to_le32(FT_FILESYS); + + if (fib_send(ContainerCommand, + fibptr, + sizeof(struct aac_query_mount), + FsaNormal, + 1, 1, + NULL, NULL) < 0) + goto error; + } else + dresp->mnt[0].capacityhigh = 0; + if ((le32_to_cpu(dresp->status) == ST_OK) && (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { fsa_dev_ptr[cid].valid = 1; fsa_dev_ptr[cid].type = le32_to_cpu(dresp->mnt[0].vol); - fsa_dev_ptr[cid].size = le32_to_cpu(dresp->mnt[0].capacity); + fsa_dev_ptr[cid].size + = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) + + (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32); if (le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) fsa_dev_ptr[cid].ro = 1; } @@ -655,7 +692,7 @@ int aac_get_adapter_info(struct aac_dev* dev) fibptr, sizeof(*info), FsaNormal, - 1, 1, + -1, 1, /* First `interrupt' command uses special wait */ NULL, NULL); @@ -806,8 +843,8 @@ int aac_get_adapter_info(struct aac_dev* dev) if (!(dev->raw_io_interface)) { dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - sizeof(struct aac_fibhdr) - - sizeof(struct aac_write) + sizeof(struct sgmap)) / - sizeof(struct sgmap); + sizeof(struct aac_write) + sizeof(struct sgentry)) / + sizeof(struct sgentry); if (dev->dac_support) { /* * 38 scatter gather elements @@ -816,8 +853,8 @@ int aac_get_adapter_info(struct aac_dev* dev) (dev->max_fib_size - sizeof(struct aac_fibhdr) - sizeof(struct aac_write64) + - sizeof(struct sgmap64)) / - sizeof(struct sgmap64); + sizeof(struct sgentry64)) / + sizeof(struct sgentry64); } dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; if(!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { @@ -854,7 +891,40 @@ static void io_callback(void *context, struct fib * fibptr) dev = (struct aac_dev *)scsicmd->device->host->hostdata; cid = ID_LUN_TO_CONTAINER(scsicmd->device->id, scsicmd->device->lun); - dprintk((KERN_DEBUG "io_callback[cpu %d]: lba = %u, t = %ld.\n", smp_processor_id(), ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3], jiffies)); + if (nblank(dprintk(x))) { + u64 lba; + switch (scsicmd->cmnd[0]) { + case WRITE_6: + case READ_6: + lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | + (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; + break; + case WRITE_16: + case READ_16: + lba = ((u64)scsicmd->cmnd[2] << 56) | + ((u64)scsicmd->cmnd[3] << 48) | + ((u64)scsicmd->cmnd[4] << 40) | + ((u64)scsicmd->cmnd[5] << 32) | + ((u64)scsicmd->cmnd[6] << 24) | + (scsicmd->cmnd[7] << 16) | + (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; + break; + case WRITE_12: + case READ_12: + lba = ((u64)scsicmd->cmnd[2] << 24) | + (scsicmd->cmnd[3] << 16) | + (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; + break; + default: + lba = ((u64)scsicmd->cmnd[2] << 24) | + (scsicmd->cmnd[3] << 16) | + (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; + break; + } + printk(KERN_DEBUG + "io_callback[cpu %d]: lba = %llu, t = %ld.\n", + smp_processor_id(), (unsigned long long)lba, jiffies); + } if (fibptr == NULL) BUG(); @@ -895,7 +965,7 @@ static void io_callback(void *context, struct fib * fibptr) static int aac_read(struct scsi_cmnd * scsicmd, int cid) { - u32 lba; + u64 lba; u32 count; int status; @@ -907,23 +977,69 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid) /* * Get block address and transfer length */ - if (scsicmd->cmnd[0] == READ_6) /* 6 byte command */ - { + switch (scsicmd->cmnd[0]) { + case READ_6: dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", cid)); - lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; + lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | + (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; count = scsicmd->cmnd[4]; if (count == 0) count = 256; - } else { + break; + case READ_16: + dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", cid)); + + lba = ((u64)scsicmd->cmnd[2] << 56) | + ((u64)scsicmd->cmnd[3] << 48) | + ((u64)scsicmd->cmnd[4] << 40) | + ((u64)scsicmd->cmnd[5] << 32) | + ((u64)scsicmd->cmnd[6] << 24) | + (scsicmd->cmnd[7] << 16) | + (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; + count = (scsicmd->cmnd[10] << 24) | + (scsicmd->cmnd[11] << 16) | + (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13]; + break; + case READ_12: + dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", cid)); + + lba = ((u64)scsicmd->cmnd[2] << 24) | + (scsicmd->cmnd[3] << 16) | + (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; + count = (scsicmd->cmnd[6] << 24) | + (scsicmd->cmnd[7] << 16) | + (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; + break; + default: dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", cid)); - lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; + lba = ((u64)scsicmd->cmnd[2] << 24) | + (scsicmd->cmnd[3] << 16) | + (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; + break; } - dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %u, t = %ld.\n", + dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n", smp_processor_id(), (unsigned long long)lba, jiffies)); + if ((!(dev->raw_io_interface) || !(dev->raw_io_64)) && + (lba & 0xffffffff00000000LL)) { + dprintk((KERN_DEBUG "aac_read: Illegal lba\n")); + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | + SAM_STAT_CHECK_CONDITION; + set_sense((u8 *) &dev->fsa_dev[cid].sense_data, + HARDWARE_ERROR, + SENCODE_INTERNAL_TARGET_FAILURE, + ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0, + 0, 0); + memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, + (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer)) + ? sizeof(scsicmd->sense_buffer) + : sizeof(dev->fsa_dev[cid].sense_data)); + scsicmd->scsi_done(scsicmd); + return 0; + } /* * Alocate and initialize a Fib */ @@ -936,8 +1052,8 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid) if (dev->raw_io_interface) { struct aac_raw_io *readcmd; readcmd = (struct aac_raw_io *) fib_data(cmd_fibcontext); - readcmd->block[0] = cpu_to_le32(lba); - readcmd->block[1] = 0; + readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); + readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); readcmd->count = cpu_to_le32(count<<9); readcmd->cid = cpu_to_le16(cid); readcmd->flags = cpu_to_le16(1); @@ -964,7 +1080,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid) readcmd->command = cpu_to_le32(VM_CtHostRead64); readcmd->cid = cpu_to_le16(cid); readcmd->sector_count = cpu_to_le16(count); - readcmd->block = cpu_to_le32(lba); + readcmd->block = cpu_to_le32((u32)(lba&0xffffffff)); readcmd->pad = 0; readcmd->flags = 0; @@ -989,7 +1105,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid) readcmd = (struct aac_read *) fib_data(cmd_fibcontext); readcmd->command = cpu_to_le32(VM_CtBlockRead); readcmd->cid = cpu_to_le32(cid); - readcmd->block = cpu_to_le32(lba); + readcmd->block = cpu_to_le32((u32)(lba&0xffffffff)); readcmd->count = cpu_to_le32(count * 512); aac_build_sg(scsicmd, &readcmd->sg); @@ -1031,7 +1147,7 @@ static int aac_read(struct scsi_cmnd * scsicmd, int cid) static int aac_write(struct scsi_cmnd * scsicmd, int cid) { - u32 lba; + u64 lba; u32 count; int status; u16 fibsize; @@ -1048,13 +1164,48 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid) count = scsicmd->cmnd[4]; if (count == 0) count = 256; + } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */ + dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", cid)); + + lba = ((u64)scsicmd->cmnd[2] << 56) | + ((u64)scsicmd->cmnd[3] << 48) | + ((u64)scsicmd->cmnd[4] << 40) | + ((u64)scsicmd->cmnd[5] << 32) | + ((u64)scsicmd->cmnd[6] << 24) | + (scsicmd->cmnd[7] << 16) | + (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; + count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) | + (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13]; + } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */ + dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", cid)); + + lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) + | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; + count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16) + | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; } else { dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", cid)); - lba = (scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; + lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; } - dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %u, t = %ld.\n", + dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n", smp_processor_id(), (unsigned long long)lba, jiffies)); + if ((!(dev->raw_io_interface) || !(dev->raw_io_64)) + && (lba & 0xffffffff00000000LL)) { + dprintk((KERN_DEBUG "aac_write: Illegal lba\n")); + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; + set_sense((u8 *) &dev->fsa_dev[cid].sense_data, + HARDWARE_ERROR, + SENCODE_INTERNAL_TARGET_FAILURE, + ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0, + 0, 0); + memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, + (sizeof(dev->fsa_dev[cid].sense_data) > sizeof(scsicmd->sense_buffer)) + ? sizeof(scsicmd->sense_buffer) + : sizeof(dev->fsa_dev[cid].sense_data)); + scsicmd->scsi_done(scsicmd); + return 0; + } /* * Allocate and initialize a Fib then setup a BlockWrite command */ @@ -1068,8 +1219,8 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid) if (dev->raw_io_interface) { struct aac_raw_io *writecmd; writecmd = (struct aac_raw_io *) fib_data(cmd_fibcontext); - writecmd->block[0] = cpu_to_le32(lba); - writecmd->block[1] = 0; + writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); + writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); writecmd->count = cpu_to_le32(count<<9); writecmd->cid = cpu_to_le16(cid); writecmd->flags = 0; @@ -1096,7 +1247,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid) writecmd->command = cpu_to_le32(VM_CtHostWrite64); writecmd->cid = cpu_to_le16(cid); writecmd->sector_count = cpu_to_le16(count); - writecmd->block = cpu_to_le32(lba); + writecmd->block = cpu_to_le32((u32)(lba&0xffffffff)); writecmd->pad = 0; writecmd->flags = 0; @@ -1121,7 +1272,7 @@ static int aac_write(struct scsi_cmnd * scsicmd, int cid) writecmd = (struct aac_write *) fib_data(cmd_fibcontext); writecmd->command = cpu_to_le32(VM_CtBlockWrite); writecmd->cid = cpu_to_le32(cid); - writecmd->block = cpu_to_le32(lba); + writecmd->block = cpu_to_le32((u32)(lba&0xffffffff)); writecmd->count = cpu_to_le32(count * 512); writecmd->sg.count = cpu_to_le32(1); /* ->stable is not used - it did mean which type of write */ @@ -1310,11 +1461,18 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) */ if ((fsa_dev_ptr[cid].valid & 1) == 0) { switch (scsicmd->cmnd[0]) { + case SERVICE_ACTION_IN: + if (!(dev->raw_io_interface) || + !(dev->raw_io_64) || + ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) + break; case INQUIRY: case READ_CAPACITY: case TEST_UNIT_READY: spin_unlock_irq(host->host_lock); probe_container(dev, cid); + if ((fsa_dev_ptr[cid].valid & 1) == 0) + fsa_dev_ptr[cid].valid = 0; spin_lock_irq(host->host_lock); if (fsa_dev_ptr[cid].valid == 0) { scsicmd->result = DID_NO_CONNECT << 16; @@ -1375,7 +1533,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) memset(&inq_data, 0, sizeof (struct inquiry_data)); inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */ - inq_data.inqd_dtq = 0x80; /* set RMB bit to one indicating that the medium is removable */ inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */ inq_data.inqd_len = 31; /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */ @@ -1397,13 +1554,55 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data)); return aac_get_container_name(scsicmd, cid); } + case SERVICE_ACTION_IN: + if (!(dev->raw_io_interface) || + !(dev->raw_io_64) || + ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) + break; + { + u64 capacity; + char cp[12]; + unsigned int offset = 0; + + dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n")); + capacity = fsa_dev_ptr[cid].size - 1; + if (scsicmd->cmnd[13] > 12) { + offset = scsicmd->cmnd[13] - 12; + if (offset > sizeof(cp)) + break; + memset(cp, 0, offset); + aac_internal_transfer(scsicmd, cp, 0, offset); + } + cp[0] = (capacity >> 56) & 0xff; + cp[1] = (capacity >> 48) & 0xff; + cp[2] = (capacity >> 40) & 0xff; + cp[3] = (capacity >> 32) & 0xff; + cp[4] = (capacity >> 24) & 0xff; + cp[5] = (capacity >> 16) & 0xff; + cp[6] = (capacity >> 8) & 0xff; + cp[7] = (capacity >> 0) & 0xff; + cp[8] = 0; + cp[9] = 0; + cp[10] = 2; + cp[11] = 0; + aac_internal_transfer(scsicmd, cp, offset, sizeof(cp)); + + /* Do not cache partition table for arrays */ + scsicmd->device->removable = 1; + + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; + scsicmd->scsi_done(scsicmd); + + return 0; + } + case READ_CAPACITY: { u32 capacity; char cp[8]; dprintk((KERN_DEBUG "READ CAPACITY command.\n")); - if (fsa_dev_ptr[cid].size <= 0x100000000LL) + if (fsa_dev_ptr[cid].size <= 0x100000000ULL) capacity = fsa_dev_ptr[cid].size - 1; else capacity = (u32)-1; @@ -1417,6 +1616,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) cp[6] = 2; cp[7] = 0; aac_internal_transfer(scsicmd, cp, 0, sizeof(cp)); + /* Do not cache partition table for arrays */ + scsicmd->device->removable = 1; scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; scsicmd->scsi_done(scsicmd); @@ -1497,6 +1698,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) { case READ_6: case READ_10: + case READ_12: + case READ_16: /* * Hack to keep track of ordinal number of the device that * corresponds to a container. Needed to convert @@ -1504,17 +1707,19 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) */ spin_unlock_irq(host->host_lock); - if (scsicmd->request->rq_disk) - memcpy(fsa_dev_ptr[cid].devname, - scsicmd->request->rq_disk->disk_name, - 8); - + if (scsicmd->request->rq_disk) + strlcpy(fsa_dev_ptr[cid].devname, + scsicmd->request->rq_disk->disk_name, + min(sizeof(fsa_dev_ptr[cid].devname), + sizeof(scsicmd->request->rq_disk->disk_name) + 1)); ret = aac_read(scsicmd, cid); spin_lock_irq(host->host_lock); return ret; case WRITE_6: case WRITE_10: + case WRITE_12: + case WRITE_16: spin_unlock_irq(host->host_lock); ret = aac_write(scsicmd, cid); spin_lock_irq(host->host_lock); @@ -1745,6 +1950,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr) case WRITE_10: case READ_12: case WRITE_12: + case READ_16: + case WRITE_16: if(le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow ) { printk(KERN_WARNING"aacraid: SCSI CMD underflow\n"); } else { @@ -1850,8 +2057,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr) sizeof(scsicmd->sense_buffer) : le32_to_cpu(srbreply->sense_data_size); #ifdef AAC_DETAILED_STATUS_INFO - dprintk((KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", - le32_to_cpu(srbreply->status), len)); + printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", + le32_to_cpu(srbreply->status), len); #endif memcpy(scsicmd->sense_buffer, srbreply->sense_data, len); diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index e40528185d48..d54b1cc88d0d 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1,6 +1,10 @@ #if (!defined(dprintk)) # define dprintk(x) #endif +/* eg: if (nblank(dprintk(x))) */ +#define _nblank(x) #x +#define nblank(x) _nblank(x)[0] + /*------------------------------------------------------------------------------ * D E F I N E S @@ -15,7 +19,7 @@ #define AAC_MAX_LUN (8) #define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff) -#define AAC_MAX_32BIT_SGBCOUNT ((unsigned short)512) +#define AAC_MAX_32BIT_SGBCOUNT ((unsigned short)256) /* * These macros convert from physical channels to virtual channels @@ -302,7 +306,6 @@ enum aac_queue_types { */ #define FsaNormal 1 -#define FsaHigh 2 /* * Define the FIB. The FIB is the where all the requested data and @@ -546,8 +549,6 @@ struct aac_queue { /* This is only valid for adapter to host command queues. */ spinlock_t *lock; /* Spinlock for this queue must take this lock before accessing the lock */ spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */ - unsigned long SavedIrql; /* Previous IRQL when the spin lock is taken */ - u32 padding; /* Padding - FIXME - can remove I believe */ struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */ /* only valid for command queues which receive entries from the adapter. */ struct list_head pendingq; /* A queue of outstanding fib's to the adapter. */ @@ -776,7 +777,9 @@ struct fsa_dev_info { u64 last; u64 size; u32 type; + u32 config_waiting_on; u16 queue_depth; + u8 config_needed; u8 valid; u8 ro; u8 locked; @@ -1012,6 +1015,7 @@ struct aac_dev /* macro side-effects BEWARE */ # define raw_io_interface \ init->InitStructRevision==cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4) + u8 raw_io_64; u8 printf_enabled; }; @@ -1362,8 +1366,10 @@ struct aac_srb_reply #define VM_CtBlockVerify64 18 #define VM_CtHostRead64 19 #define VM_CtHostWrite64 20 +#define VM_DrvErrTblLog 21 +#define VM_NameServe64 22 -#define MAX_VMCOMMAND_NUM 21 /* used for sizing stats array - leave last */ +#define MAX_VMCOMMAND_NUM 23 /* used for sizing stats array - leave last */ /* * Descriptive information (eg, vital stats) @@ -1472,6 +1478,7 @@ struct aac_mntent { manager (eg, filesystem) */ __le32 altoid; /* != oid <==> snapshot or broken mirror exists */ + __le32 capacityhigh; }; #define FSCS_NOTCLEAN 0x0001 /* fsck is neccessary before mounting */ @@ -1707,6 +1714,7 @@ extern struct aac_common aac_config; #define AifCmdJobProgress 2 /* Progress report */ #define AifJobCtrZero 101 /* Array Zero progress */ #define AifJobStsSuccess 1 /* Job completes */ +#define AifJobStsRunning 102 /* Job running */ #define AifCmdAPIReport 3 /* Report from other user of API */ #define AifCmdDriverNotify 4 /* Notify host driver of event */ #define AifDenMorphComplete 200 /* A morph operation completed */ @@ -1777,6 +1785,7 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size); struct aac_driver_ident* aac_get_driver_ident(int devtype); int aac_get_adapter_info(struct aac_dev* dev); int aac_send_shutdown(struct aac_dev *dev); +int probe_container(struct aac_dev *dev, int cid); extern int numacb; extern int acbsize; extern char aac_driver_version[]; diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 75abd0453289..59a341b2aedc 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -195,7 +195,7 @@ int aac_send_shutdown(struct aac_dev * dev) fibctx, sizeof(struct aac_close), FsaNormal, - 1, 1, + -2 /* Timeout silently */, 1, NULL, NULL); if (status == 0) @@ -313,8 +313,15 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) dev->max_fib_size = sizeof(struct hw_fib); dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size - sizeof(struct aac_fibhdr) - - sizeof(struct aac_write) + sizeof(struct sgmap)) - / sizeof(struct sgmap); + - sizeof(struct aac_write) + sizeof(struct sgentry)) + / sizeof(struct sgentry); + dev->raw_io_64 = 0; + if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, + 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && + (status[0] == 0x00000001)) { + if (status[1] & AAC_OPT_NEW_COMM_64) + dev->raw_io_64 = 1; + } if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS, 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, status+3, status+4)) @@ -342,8 +349,8 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) dev->max_fib_size = 512; dev->sg_tablesize = host->sg_tablesize = (512 - sizeof(struct aac_fibhdr) - - sizeof(struct aac_write) + sizeof(struct sgmap)) - / sizeof(struct sgmap); + - sizeof(struct aac_write) + sizeof(struct sgentry)) + / sizeof(struct sgentry); host->can_queue = AAC_NUM_IO_FIB; } else if (acbsize == 2048) { host->max_sectors = 512; diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index a1d303f03480..e4d543a474ae 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -39,7 +39,9 @@ #include #include #include +#include #include +#include #include "aacraid.h" @@ -269,40 +271,22 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr /* Interrupt Moderation, only interrupt for first two entries */ if (idx != le32_to_cpu(*(q->headers.consumer))) { if (--idx == 0) { - if (qid == AdapHighCmdQueue) - idx = ADAP_HIGH_CMD_ENTRIES; - else if (qid == AdapNormCmdQueue) + if (qid == AdapNormCmdQueue) idx = ADAP_NORM_CMD_ENTRIES; - else if (qid == AdapHighRespQueue) - idx = ADAP_HIGH_RESP_ENTRIES; - else if (qid == AdapNormRespQueue) + else idx = ADAP_NORM_RESP_ENTRIES; } if (idx != le32_to_cpu(*(q->headers.consumer))) *nonotify = 1; } - if (qid == AdapHighCmdQueue) { - if (*index >= ADAP_HIGH_CMD_ENTRIES) - *index = 0; - } else if (qid == AdapNormCmdQueue) { + if (qid == AdapNormCmdQueue) { if (*index >= ADAP_NORM_CMD_ENTRIES) *index = 0; /* Wrap to front of the Producer Queue. */ - } - else if (qid == AdapHighRespQueue) - { - if (*index >= ADAP_HIGH_RESP_ENTRIES) - *index = 0; - } - else if (qid == AdapNormRespQueue) - { + } else { if (*index >= ADAP_NORM_RESP_ENTRIES) *index = 0; /* Wrap to front of the Producer Queue. */ } - else { - printk("aacraid: invalid qid\n"); - BUG(); - } if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */ printk(KERN_WARNING "Queue %d full, %u outstanding.\n", @@ -334,12 +318,8 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f { struct aac_entry * entry = NULL; int map = 0; - struct aac_queue * q = &dev->queues->queue[qid]; - - spin_lock_irqsave(q->lock, q->SavedIrql); - if (qid == AdapHighCmdQueue || qid == AdapNormCmdQueue) - { + if (qid == AdapNormCmdQueue) { /* if no entries wait for some if caller wants to */ while (!aac_get_entry(dev, qid, &entry, index, nonotify)) { @@ -350,9 +330,7 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f */ entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size)); map = 1; - } - else if (qid == AdapHighRespQueue || qid == AdapNormRespQueue) - { + } else { while(!aac_get_entry(dev, qid, &entry, index, nonotify)) { /* if no entries wait for some if caller wants to */ @@ -375,42 +353,6 @@ static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_f return 0; } - -/** - * aac_insert_entry - insert a queue entry - * @dev: Adapter - * @index: Index of entry to insert - * @qid: Queue number - * @nonotify: Suppress adapter notification - * - * Gets the next free QE off the requested priorty adapter command - * queue and associates the Fib with the QE. The QE represented by - * index is ready to insert on the queue when this routine returns - * success. - */ - -static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned long nonotify) -{ - struct aac_queue * q = &dev->queues->queue[qid]; - - if(q == NULL) - BUG(); - *(q->headers.producer) = cpu_to_le32(index + 1); - spin_unlock_irqrestore(q->lock, q->SavedIrql); - - if (qid == AdapHighCmdQueue || - qid == AdapNormCmdQueue || - qid == AdapHighRespQueue || - qid == AdapNormRespQueue) - { - if (!nonotify) - aac_adapter_notify(dev, qid); - } - else - printk("Suprise insert!\n"); - return 0; -} - /* * Define the highest level of host to adapter communication routines. * These routines will support host to adapter FS commuication. These @@ -439,12 +381,13 @@ static int aac_insert_entry(struct aac_dev * dev, u32 index, u32 qid, unsigned l int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority, int wait, int reply, fib_callback callback, void * callback_data) { u32 index; - u32 qid; struct aac_dev * dev = fibptr->dev; unsigned long nointr = 0; struct hw_fib * hw_fib = fibptr->hw_fib; struct aac_queue * q; unsigned long flags = 0; + unsigned long qflags; + if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) return -EBUSY; /* @@ -497,26 +440,8 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority * Get a queue entry connect the FIB to it and send an notify * the adapter a command is ready. */ - if (priority == FsaHigh) { - hw_fib->header.XferState |= cpu_to_le32(HighPriority); - qid = AdapHighCmdQueue; - } else { - hw_fib->header.XferState |= cpu_to_le32(NormalPriority); - qid = AdapNormCmdQueue; - } - q = &dev->queues->queue[qid]; + hw_fib->header.XferState |= cpu_to_le32(NormalPriority); - if(wait) - spin_lock_irqsave(&fibptr->event_lock, flags); - if(aac_queue_get( dev, &index, qid, hw_fib, 1, fibptr, &nointr)<0) - return -EWOULDBLOCK; - dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index)); - dprintk((KERN_DEBUG "Fib contents:.\n")); - dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command)); - dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState)); - dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib)); - dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); - dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); /* * Fill in the Callback and CallbackContext if we are not * going to wait. @@ -525,22 +450,67 @@ int fib_send(u16 command, struct fib * fibptr, unsigned long size, int priority fibptr->callback = callback; fibptr->callback_data = callback_data; } - FIB_COUNTER_INCREMENT(aac_config.FibsSent); - list_add_tail(&fibptr->queue, &q->pendingq); - q->numpending++; fibptr->done = 0; fibptr->flags = 0; - if(aac_insert_entry(dev, index, qid, (nointr & aac_config.irq_mod)) < 0) - return -EWOULDBLOCK; + FIB_COUNTER_INCREMENT(aac_config.FibsSent); + + dprintk((KERN_DEBUG "fib_send: inserting a queue entry at index %d.\n",index)); + dprintk((KERN_DEBUG "Fib contents:.\n")); + dprintk((KERN_DEBUG " Command = %d.\n", hw_fib->header.Command)); + dprintk((KERN_DEBUG " XferState = %x.\n", hw_fib->header.XferState)); + dprintk((KERN_DEBUG " hw_fib va being sent=%p\n",fibptr->hw_fib)); + dprintk((KERN_DEBUG " hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa)); + dprintk((KERN_DEBUG " fib being sent=%p\n",fibptr)); + + q = &dev->queues->queue[AdapNormCmdQueue]; + + if(wait) + spin_lock_irqsave(&fibptr->event_lock, flags); + spin_lock_irqsave(q->lock, qflags); + aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr); + + list_add_tail(&fibptr->queue, &q->pendingq); + q->numpending++; + *(q->headers.producer) = cpu_to_le32(index + 1); + spin_unlock_irqrestore(q->lock, qflags); + if (!(nointr & aac_config.irq_mod)) + aac_adapter_notify(dev, AdapNormCmdQueue); /* * If the caller wanted us to wait for response wait now. */ if (wait) { spin_unlock_irqrestore(&fibptr->event_lock, flags); - down(&fibptr->event_wait); + /* Only set for first known interruptable command */ + if (wait < 0) { + /* + * *VERY* Dangerous to time out a command, the + * assumption is made that we have no hope of + * functioning because an interrupt routing or other + * hardware failure has occurred. + */ + unsigned long count = 36000000L; /* 3 minutes */ + unsigned long qflags; + while (down_trylock(&fibptr->event_wait)) { + if (--count == 0) { + spin_lock_irqsave(q->lock, qflags); + q->numpending--; + list_del(&fibptr->queue); + spin_unlock_irqrestore(q->lock, qflags); + if (wait == -1) { + printk(KERN_ERR "aacraid: fib_send: first asynchronous command timed out.\n" + "Usually a result of a PCI interrupt routing problem;\n" + "update mother board BIOS or consider utilizing one of\n" + "the SAFE mode kernel options (acpi, apic etc)\n"); + } + return -ETIMEDOUT; + } + udelay(5); + } + } else + down(&fibptr->event_wait); if(fibptr->done == 0) BUG(); @@ -622,15 +592,9 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid) case HostNormCmdQueue: notify = HostNormCmdNotFull; break; - case HostHighCmdQueue: - notify = HostHighCmdNotFull; - break; case HostNormRespQueue: notify = HostNormRespNotFull; break; - case HostHighRespQueue: - notify = HostHighRespNotFull; - break; default: BUG(); return; @@ -652,9 +616,13 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size) { struct hw_fib * hw_fib = fibptr->hw_fib; struct aac_dev * dev = fibptr->dev; + struct aac_queue * q; unsigned long nointr = 0; - if (hw_fib->header.XferState == 0) + unsigned long qflags; + + if (hw_fib->header.XferState == 0) { return 0; + } /* * If we plan to do anything check the structure type first. */ @@ -669,37 +637,21 @@ int fib_adapter_complete(struct fib * fibptr, unsigned short size) * send the completed cdb to the adapter. */ if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) { + u32 index; hw_fib->header.XferState |= cpu_to_le32(HostProcessed); - if (hw_fib->header.XferState & cpu_to_le32(HighPriority)) { - u32 index; - if (size) - { - size += sizeof(struct aac_fibhdr); - if (size > le16_to_cpu(hw_fib->header.SenderSize)) - return -EMSGSIZE; - hw_fib->header.Size = cpu_to_le16(size); - } - if(aac_queue_get(dev, &index, AdapHighRespQueue, hw_fib, 1, NULL, &nointr) < 0) { - return -EWOULDBLOCK; - } - if (aac_insert_entry(dev, index, AdapHighRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) { - } - } else if (hw_fib->header.XferState & - cpu_to_le32(NormalPriority)) { - u32 index; - - if (size) { - size += sizeof(struct aac_fibhdr); - if (size > le16_to_cpu(hw_fib->header.SenderSize)) - return -EMSGSIZE; - hw_fib->header.Size = cpu_to_le16(size); - } - if (aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr) < 0) - return -EWOULDBLOCK; - if (aac_insert_entry(dev, index, AdapNormRespQueue, (nointr & (int)aac_config.irq_mod)) != 0) - { - } + if (size) { + size += sizeof(struct aac_fibhdr); + if (size > le16_to_cpu(hw_fib->header.SenderSize)) + return -EMSGSIZE; + hw_fib->header.Size = cpu_to_le16(size); } + q = &dev->queues->queue[AdapNormRespQueue]; + spin_lock_irqsave(q->lock, qflags); + aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr); + *(q->headers.producer) = cpu_to_le32(index + 1); + spin_unlock_irqrestore(q->lock, qflags); + if (!(nointr & (int)aac_config.irq_mod)) + aac_adapter_notify(dev, AdapNormRespQueue); } else { @@ -791,6 +743,268 @@ void aac_printf(struct aac_dev *dev, u32 val) memset(cp, 0, 256); } + +/** + * aac_handle_aif - Handle a message from the firmware + * @dev: Which adapter this fib is from + * @fibptr: Pointer to fibptr from adapter + * + * This routine handles a driver notify fib from the adapter and + * dispatches it to the appropriate routine for handling. + */ + +static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) +{ + struct hw_fib * hw_fib = fibptr->hw_fib; + struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data; + int busy; + u32 container; + struct scsi_device *device; + enum { + NOTHING, + DELETE, + ADD, + CHANGE + } device_config_needed; + + /* Sniff for container changes */ + + if (!dev) + return; + container = (u32)-1; + + /* + * We have set this up to try and minimize the number of + * re-configures that take place. As a result of this when + * certain AIF's come in we will set a flag waiting for another + * type of AIF before setting the re-config flag. + */ + switch (le32_to_cpu(aifcmd->command)) { + case AifCmdDriverNotify: + switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) { + /* + * Morph or Expand complete + */ + case AifDenMorphComplete: + case AifDenVolumeExtendComplete: + container = le32_to_cpu(((u32 *)aifcmd->data)[1]); + if (container >= dev->maximum_num_containers) + break; + + /* + * Find the Scsi_Device associated with the SCSI + * address. Make sure we have the right array, and if + * so set the flag to initiate a new re-config once we + * see an AifEnConfigChange AIF come through. + */ + + if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) { + device = scsi_device_lookup(dev->scsi_host_ptr, + CONTAINER_TO_CHANNEL(container), + CONTAINER_TO_ID(container), + CONTAINER_TO_LUN(container)); + if (device) { + dev->fsa_dev[container].config_needed = CHANGE; + dev->fsa_dev[container].config_waiting_on = AifEnConfigChange; + scsi_device_put(device); + } + } + } + + /* + * If we are waiting on something and this happens to be + * that thing then set the re-configure flag. + */ + if (container != (u32)-1) { + if (container >= dev->maximum_num_containers) + break; + if (dev->fsa_dev[container].config_waiting_on == + le32_to_cpu(*(u32 *)aifcmd->data)) + dev->fsa_dev[container].config_waiting_on = 0; + } else for (container = 0; + container < dev->maximum_num_containers; ++container) { + if (dev->fsa_dev[container].config_waiting_on == + le32_to_cpu(*(u32 *)aifcmd->data)) + dev->fsa_dev[container].config_waiting_on = 0; + } + break; + + case AifCmdEventNotify: + switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) { + /* + * Add an Array. + */ + case AifEnAddContainer: + container = le32_to_cpu(((u32 *)aifcmd->data)[1]); + if (container >= dev->maximum_num_containers) + break; + dev->fsa_dev[container].config_needed = ADD; + dev->fsa_dev[container].config_waiting_on = + AifEnConfigChange; + break; + + /* + * Delete an Array. + */ + case AifEnDeleteContainer: + container = le32_to_cpu(((u32 *)aifcmd->data)[1]); + if (container >= dev->maximum_num_containers) + break; + dev->fsa_dev[container].config_needed = DELETE; + dev->fsa_dev[container].config_waiting_on = + AifEnConfigChange; + break; + + /* + * Container change detected. If we currently are not + * waiting on something else, setup to wait on a Config Change. + */ + case AifEnContainerChange: + container = le32_to_cpu(((u32 *)aifcmd->data)[1]); + if (container >= dev->maximum_num_containers) + break; + if (dev->fsa_dev[container].config_waiting_on) + break; + dev->fsa_dev[container].config_needed = CHANGE; + dev->fsa_dev[container].config_waiting_on = + AifEnConfigChange; + break; + + case AifEnConfigChange: + break; + + } + + /* + * If we are waiting on something and this happens to be + * that thing then set the re-configure flag. + */ + if (container != (u32)-1) { + if (container >= dev->maximum_num_containers) + break; + if (dev->fsa_dev[container].config_waiting_on == + le32_to_cpu(*(u32 *)aifcmd->data)) + dev->fsa_dev[container].config_waiting_on = 0; + } else for (container = 0; + container < dev->maximum_num_containers; ++container) { + if (dev->fsa_dev[container].config_waiting_on == + le32_to_cpu(*(u32 *)aifcmd->data)) + dev->fsa_dev[container].config_waiting_on = 0; + } + break; + + case AifCmdJobProgress: + /* + * These are job progress AIF's. When a Clear is being + * done on a container it is initially created then hidden from + * the OS. When the clear completes we don't get a config + * change so we monitor the job status complete on a clear then + * wait for a container change. + */ + + if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero)) + && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5]) + || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) { + for (container = 0; + container < dev->maximum_num_containers; + ++container) { + /* + * Stomp on all config sequencing for all + * containers? + */ + dev->fsa_dev[container].config_waiting_on = + AifEnContainerChange; + dev->fsa_dev[container].config_needed = ADD; + } + } + if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero)) + && (((u32 *)aifcmd->data)[6] == 0) + && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) { + for (container = 0; + container < dev->maximum_num_containers; + ++container) { + /* + * Stomp on all config sequencing for all + * containers? + */ + dev->fsa_dev[container].config_waiting_on = + AifEnContainerChange; + dev->fsa_dev[container].config_needed = DELETE; + } + } + break; + } + + device_config_needed = NOTHING; + for (container = 0; container < dev->maximum_num_containers; + ++container) { + if ((dev->fsa_dev[container].config_waiting_on == 0) + && (dev->fsa_dev[container].config_needed != NOTHING)) { + device_config_needed = + dev->fsa_dev[container].config_needed; + dev->fsa_dev[container].config_needed = NOTHING; + break; + } + } + if (device_config_needed == NOTHING) + return; + + /* + * If we decided that a re-configuration needs to be done, + * schedule it here on the way out the door, please close the door + * behind you. + */ + + busy = 0; + + + /* + * Find the Scsi_Device associated with the SCSI address, + * and mark it as changed, invalidating the cache. This deals + * with changes to existing device IDs. + */ + + if (!dev || !dev->scsi_host_ptr) + return; + /* + * force reload of disk info via probe_container + */ + if ((device_config_needed == CHANGE) + && (dev->fsa_dev[container].valid == 1)) + dev->fsa_dev[container].valid = 2; + if ((device_config_needed == CHANGE) || + (device_config_needed == ADD)) + probe_container(dev, container); + device = scsi_device_lookup(dev->scsi_host_ptr, + CONTAINER_TO_CHANNEL(container), + CONTAINER_TO_ID(container), + CONTAINER_TO_LUN(container)); + if (device) { + switch (device_config_needed) { + case DELETE: + scsi_remove_device(device); + break; + case CHANGE: + if (!dev->fsa_dev[container].valid) { + scsi_remove_device(device); + break; + } + scsi_rescan_device(&device->sdev_gendev); + + default: + break; + } + scsi_device_put(device); + } + if (device_config_needed == ADD) { + scsi_add_device(dev->scsi_host_ptr, + CONTAINER_TO_CHANNEL(container), + CONTAINER_TO_ID(container), + CONTAINER_TO_LUN(container)); + } + +} + /** * aac_command_thread - command processing thread * @dev: Adapter to monitor @@ -805,7 +1019,6 @@ int aac_command_thread(struct aac_dev * dev) { struct hw_fib *hw_fib, *hw_newfib; struct fib *fib, *newfib; - struct aac_queue_block *queues = dev->queues; struct aac_fib_context *fibctx; unsigned long flags; DECLARE_WAITQUEUE(wait, current); @@ -825,21 +1038,22 @@ int aac_command_thread(struct aac_dev * dev) * Let the DPC know it has a place to send the AIF's to. */ dev->aif_thread = 1; - add_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait); + add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); set_current_state(TASK_INTERRUPTIBLE); + dprintk ((KERN_INFO "aac_command_thread start\n")); while(1) { - spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags); - while(!list_empty(&(queues->queue[HostNormCmdQueue].cmdq))) { + spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); + while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { struct list_head *entry; struct aac_aifcmd * aifcmd; set_current_state(TASK_RUNNING); - - entry = queues->queue[HostNormCmdQueue].cmdq.next; + + entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; list_del(entry); - - spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags); + + spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); fib = list_entry(entry, struct fib, fiblink); /* * We will process the FIB here or pass it to a @@ -860,6 +1074,7 @@ int aac_command_thread(struct aac_dev * dev) aifcmd = (struct aac_aifcmd *) hw_fib->data; if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { /* Handle Driver Notify Events */ + aac_handle_aif(dev, fib); *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); fib_adapter_complete(fib, (u16)sizeof(u32)); } else { @@ -869,9 +1084,62 @@ int aac_command_thread(struct aac_dev * dev) u32 time_now, time_last; unsigned long flagv; - + unsigned num; + struct hw_fib ** hw_fib_pool, ** hw_fib_p; + struct fib ** fib_pool, ** fib_p; + + /* Sniff events */ + if ((aifcmd->command == + cpu_to_le32(AifCmdEventNotify)) || + (aifcmd->command == + cpu_to_le32(AifCmdJobProgress))) { + aac_handle_aif(dev, fib); + } + time_now = jiffies/HZ; + /* + * Warning: no sleep allowed while + * holding spinlock. We take the estimate + * and pre-allocate a set of fibs outside the + * lock. + */ + num = le32_to_cpu(dev->init->AdapterFibsSize) + / sizeof(struct hw_fib); /* some extra */ + spin_lock_irqsave(&dev->fib_lock, flagv); + entry = dev->fib_list.next; + while (entry != &dev->fib_list) { + entry = entry->next; + ++num; + } + spin_unlock_irqrestore(&dev->fib_lock, flagv); + hw_fib_pool = NULL; + fib_pool = NULL; + if (num + && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL))) + && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) { + hw_fib_p = hw_fib_pool; + fib_p = fib_pool; + while (hw_fib_p < &hw_fib_pool[num]) { + if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) { + --hw_fib_p; + break; + } + if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) { + kfree(*(--hw_fib_p)); + break; + } + } + if ((num = hw_fib_p - hw_fib_pool) == 0) { + kfree(fib_pool); + fib_pool = NULL; + kfree(hw_fib_pool); + hw_fib_pool = NULL; + } + } else if (hw_fib_pool) { + kfree(hw_fib_pool); + hw_fib_pool = NULL; + } spin_lock_irqsave(&dev->fib_lock, flagv); entry = dev->fib_list.next; /* @@ -880,6 +1148,8 @@ int aac_command_thread(struct aac_dev * dev) * fib, and then set the event to wake up the * thread that is waiting for it. */ + hw_fib_p = hw_fib_pool; + fib_p = fib_pool; while (entry != &dev->fib_list) { /* * Extract the fibctx @@ -912,9 +1182,11 @@ int aac_command_thread(struct aac_dev * dev) * Warning: no sleep allowed while * holding spinlock */ - hw_newfib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC); - newfib = kmalloc(sizeof(struct fib), GFP_ATOMIC); - if (newfib && hw_newfib) { + if (hw_fib_p < &hw_fib_pool[num]) { + hw_newfib = *hw_fib_p; + *(hw_fib_p++) = NULL; + newfib = *fib_p; + *(fib_p++) = NULL; /* * Make the copy of the FIB */ @@ -929,15 +1201,11 @@ int aac_command_thread(struct aac_dev * dev) fibctx->count++; /* * Set the event to wake up the - * thread that will waiting. + * thread that is waiting. */ up(&fibctx->wait_sem); } else { printk(KERN_WARNING "aifd: didn't allocate NewFib.\n"); - if(newfib) - kfree(newfib); - if(hw_newfib) - kfree(hw_newfib); } entry = entry->next; } @@ -947,21 +1215,38 @@ int aac_command_thread(struct aac_dev * dev) *(__le32 *)hw_fib->data = cpu_to_le32(ST_OK); fib_adapter_complete(fib, sizeof(u32)); spin_unlock_irqrestore(&dev->fib_lock, flagv); + /* Free up the remaining resources */ + hw_fib_p = hw_fib_pool; + fib_p = fib_pool; + while (hw_fib_p < &hw_fib_pool[num]) { + if (*hw_fib_p) + kfree(*hw_fib_p); + if (*fib_p) + kfree(*fib_p); + ++fib_p; + ++hw_fib_p; + } + if (hw_fib_pool) + kfree(hw_fib_pool); + if (fib_pool) + kfree(fib_pool); } - spin_lock_irqsave(queues->queue[HostNormCmdQueue].lock, flags); kfree(fib); + spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); } /* * There are no more AIF's */ - spin_unlock_irqrestore(queues->queue[HostNormCmdQueue].lock, flags); + spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags); schedule(); if(signal_pending(current)) break; set_current_state(TASK_INTERRUPTIBLE); } - remove_wait_queue(&queues->queue[HostNormCmdQueue].cmdready, &wait); + if (dev->queues) + remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); dev->aif_thread = 0; complete_and_exit(&dev->aif_completion, 0); + return 0; } diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 4ff29d7f5825..a1f9ceef0ac9 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -453,9 +453,9 @@ static int aac_eh_reset(struct scsi_cmnd* cmd) /* * We can exit If all the commands are complete */ + spin_unlock_irq(host->host_lock); if (active == 0) return SUCCESS; - spin_unlock_irq(host->host_lock); ssleep(1); spin_lock_irq(host->host_lock); } @@ -748,7 +748,8 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, unique_id++; } - if (pci_enable_device(pdev)) + error = pci_enable_device(pdev); + if (error) goto out; if (pci_set_dma_mask(pdev, 0xFFFFFFFFULL) || @@ -772,6 +773,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, shost->irq = pdev->irq; shost->base = pci_resource_start(pdev, 0); shost->unique_id = unique_id; + shost->max_cmd_len = 16; aac = (struct aac_dev *)shost->hostdata; aac->scsi_host_ptr = shost; @@ -799,7 +801,9 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, goto out_free_fibs; aac->maximum_num_channels = aac_drivers[index].channels; - aac_get_adapter_info(aac); + error = aac_get_adapter_info(aac); + if (error < 0) + goto out_deinit; /* * Lets override negotiations and drop the maximum SG limit to 34 @@ -927,8 +931,8 @@ static int __init aac_init(void) printk(KERN_INFO "Adaptec %s driver (%s)\n", AAC_DRIVERNAME, aac_driver_version); - error = pci_module_init(&aac_pci_driver); - if (error) + error = pci_register_driver(&aac_pci_driver); + if (error < 0) return error; aac_cfg_major = register_chrdev( 0, "aac", &aac_cfg_fops); diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c index c2c8fa828e24..fe8187d6f58b 100644 --- a/drivers/scsi/ahci.c +++ b/drivers/scsi/ahci.c @@ -216,7 +216,7 @@ static Scsi_Host_Template ahci_sht = { .ordered_flush = 1, }; -static struct ata_port_operations ahci_ops = { +static const struct ata_port_operations ahci_ops = { .port_disable = ata_port_disable, .check_status = ahci_check_status, @@ -407,7 +407,7 @@ static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in) return 0xffffffffU; } - return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4)); + return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); } @@ -425,7 +425,7 @@ static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in, return; } - writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4)); + writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); } static void ahci_phy_reset(struct ata_port *ap) @@ -453,14 +453,14 @@ static void ahci_phy_reset(struct ata_port *ap) static u8 ahci_check_status(struct ata_port *ap) { - void *mmio = (void *) ap->ioaddr.cmd_addr; + void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr; return readl(mmio + PORT_TFDATA) & 0xFF; } static u8 ahci_check_err(struct ata_port *ap) { - void *mmio = (void *) ap->ioaddr.cmd_addr; + void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr; return (readl(mmio + PORT_TFDATA) >> 8) & 0xFF; } @@ -672,17 +672,36 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs * for (i = 0; i < host_set->n_ports; i++) { struct ata_port *ap; - u32 tmp; - VPRINTK("port %u\n", i); + if (!(irq_stat & (1 << i))) + continue; + ap = host_set->ports[i]; - tmp = irq_stat & (1 << i); - if (tmp && ap) { + if (ap) { struct ata_queued_cmd *qc; qc = ata_qc_from_tag(ap, ap->active_tag); - if (ahci_host_intr(ap, qc)) - irq_ack |= (1 << i); + if (!ahci_host_intr(ap, qc)) + if (ata_ratelimit()) { + struct pci_dev *pdev = + to_pci_dev(ap->host_set->dev); + printk(KERN_WARNING + "ahci(%s): unhandled interrupt on port %u\n", + pci_name(pdev), i); + } + + VPRINTK("port %u\n", i); + } else { + VPRINTK("port %u (no irq)\n", i); + if (ata_ratelimit()) { + struct pci_dev *pdev = + to_pci_dev(ap->host_set->dev); + printk(KERN_WARNING + "ahci(%s): interrupt on disabled port %u\n", + pci_name(pdev), i); + } } + + irq_ack |= (1 << i); } if (irq_ack) { diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c index 70c5fb59c9ea..d754b3267863 100644 --- a/drivers/scsi/aic7xxx/aic7770_osm.c +++ b/drivers/scsi/aic7xxx/aic7770_osm.c @@ -112,6 +112,9 @@ aic7770_remove(struct device *dev) struct ahc_softc *ahc = dev_get_drvdata(dev); u_long s; + if (ahc->platform_data && ahc->platform_data->host) + scsi_remove_host(ahc->platform_data->host); + ahc_lock(ahc, &s); ahc_intr_enable(ahc, FALSE); ahc_unlock(ahc, &s); diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index 6b6d4e287793..95c285cc83e4 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c @@ -1192,11 +1192,6 @@ ahd_platform_free(struct ahd_softc *ahd) int i, j; if (ahd->platform_data != NULL) { - if (ahd->platform_data->host != NULL) { - scsi_remove_host(ahd->platform_data->host); - scsi_host_put(ahd->platform_data->host); - } - /* destroy all of the device and target objects */ for (i = 0; i < AHD_NUM_TARGETS; i++) { starget = ahd->platform_data->starget[i]; @@ -1226,6 +1221,9 @@ ahd_platform_free(struct ahd_softc *ahd) release_mem_region(ahd->platform_data->mem_busaddr, 0x1000); } + if (ahd->platform_data->host) + scsi_host_put(ahd->platform_data->host); + free(ahd->platform_data, M_DEVBUF); } } diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c index 390b53852d4b..bf360ae021ab 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c @@ -95,6 +95,9 @@ ahd_linux_pci_dev_remove(struct pci_dev *pdev) struct ahd_softc *ahd = pci_get_drvdata(pdev); u_long s; + if (ahd->platform_data && ahd->platform_data->host) + scsi_remove_host(ahd->platform_data->host); + ahd_lock(ahd, &s); ahd_intr_enable(ahd, FALSE); ahd_unlock(ahd, &s); diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index 876d1de8480d..6ee1435d37fa 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c @@ -1209,11 +1209,6 @@ ahc_platform_free(struct ahc_softc *ahc) int i, j; if (ahc->platform_data != NULL) { - if (ahc->platform_data->host != NULL) { - scsi_remove_host(ahc->platform_data->host); - scsi_host_put(ahc->platform_data->host); - } - /* destroy all of the device and target objects */ for (i = 0; i < AHC_NUM_TARGETS; i++) { starget = ahc->platform_data->starget[i]; @@ -1242,6 +1237,9 @@ ahc_platform_free(struct ahc_softc *ahc) 0x1000); } + if (ahc->platform_data->host) + scsi_host_put(ahc->platform_data->host); + free(ahc->platform_data, M_DEVBUF); } } diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c index 3ce77ddc889e..cb30d9c1153d 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c @@ -143,6 +143,9 @@ ahc_linux_pci_dev_remove(struct pci_dev *pdev) struct ahc_softc *ahc = pci_get_drvdata(pdev); u_long s; + if (ahc->platform_data && ahc->platform_data->host) + scsi_remove_host(ahc->platform_data->host); + ahc_lock(ahc, &s); ahc_intr_enable(ahc, FALSE); ahc_unlock(ahc, &s); diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c index 87e0c36f1554..be021478f416 100644 --- a/drivers/scsi/ata_piix.c +++ b/drivers/scsi/ata_piix.c @@ -147,7 +147,7 @@ static Scsi_Host_Template piix_sht = { .ordered_flush = 1, }; -static struct ata_port_operations piix_pata_ops = { +static const struct ata_port_operations piix_pata_ops = { .port_disable = ata_port_disable, .set_piomode = piix_set_piomode, .set_dmamode = piix_set_dmamode, @@ -177,7 +177,7 @@ static struct ata_port_operations piix_pata_ops = { .host_stop = ata_host_stop, }; -static struct ata_port_operations piix_sata_ops = { +static const struct ata_port_operations piix_sata_ops = { .port_disable = ata_port_disable, .tf_load = ata_tf_load, @@ -442,7 +442,6 @@ static void piix_sata_phy_reset(struct ata_port *ap) * piix_set_piomode - Initialize host controller PATA PIO timings * @ap: Port whose timings we are configuring * @adev: um - * @pio: PIO mode, 0 - 4 * * Set PIO mode for device, in host controller PCI config space. * diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c index c10e45b94b62..3d13fdee4fc2 100644 --- a/drivers/scsi/eata.c +++ b/drivers/scsi/eata.c @@ -1357,7 +1357,7 @@ static int port_detect(unsigned long port_base, unsigned int j, for (i = 0; i < shost->can_queue; i++) { size_t sz = shost->sg_tablesize *sizeof(struct sg_list); - unsigned int gfp_mask = (shost->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC; + gfp_t gfp_mask = (shost->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC; ha->cp[i].sglist = kmalloc(sz, gfp_mask); if (!ha->cp[i].sglist) { printk diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index f2a72d33132c..f24d84538fd5 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -176,6 +176,7 @@ void scsi_remove_host(struct Scsi_Host *shost) transport_unregister_device(&shost->shost_gendev); class_device_unregister(&shost->shost_classdev); device_del(&shost->shost_gendev); + scsi_proc_hostdir_rm(shost->hostt); } EXPORT_SYMBOL(scsi_remove_host); @@ -262,7 +263,6 @@ static void scsi_host_dev_release(struct device *dev) if (shost->work_q) destroy_workqueue(shost->work_q); - scsi_proc_hostdir_rm(shost->hostt); scsi_destroy_command_freelist(shost); kfree(shost->shost_data); @@ -287,7 +287,8 @@ static void scsi_host_dev_release(struct device *dev) struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) { struct Scsi_Host *shost; - int gfp_mask = GFP_KERNEL, rval; + gfp_t gfp_mask = GFP_KERNEL; + int rval; if (sht->unchecked_isa_dma && privsize) gfp_mask |= __GFP_DMA; diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index d92273cbe0de..f53d7b8ac33f 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c @@ -48,6 +48,7 @@ #include #include #include +#include #include #include "scsi.h" #include "scsi_priv.h" @@ -62,14 +63,15 @@ static unsigned int ata_busy_sleep (struct ata_port *ap, unsigned long tmout_pat, unsigned long tmout); +static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev); +static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev); static void ata_set_mode(struct ata_port *ap); static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); -static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift); +static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift); static int fgb(u32 bitmap); -static int ata_choose_xfer_mode(struct ata_port *ap, +static int ata_choose_xfer_mode(const struct ata_port *ap, u8 *xfer_mode_out, unsigned int *xfer_shift_out); -static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat); static void __ata_qc_complete(struct ata_queued_cmd *qc); static unsigned int ata_unique_id = 1; @@ -85,7 +87,7 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); /** - * ata_tf_load - send taskfile registers to host controller + * ata_tf_load_pio - send taskfile registers to host controller * @ap: Port to which output is sent * @tf: ATA taskfile register set * @@ -95,7 +97,7 @@ MODULE_VERSION(DRV_VERSION); * Inherited from caller. */ -static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf) +static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; @@ -153,7 +155,7 @@ static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf) * Inherited from caller. */ -static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf) +static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; @@ -222,7 +224,7 @@ static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf) * LOCKING: * Inherited from caller. */ -void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf) +void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) { if (ap->flags & ATA_FLAG_MMIO) ata_tf_load_mmio(ap, tf); @@ -242,7 +244,7 @@ void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf) * spin_lock_irqsave(host_set lock) */ -static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf) +static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf) { DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); @@ -263,7 +265,7 @@ static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf) * spin_lock_irqsave(host_set lock) */ -static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf) +static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) { DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); @@ -283,7 +285,7 @@ static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf) * LOCKING: * spin_lock_irqsave(host_set lock) */ -void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf) +void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) { if (ap->flags & ATA_FLAG_MMIO) ata_exec_command_mmio(ap, tf); @@ -303,7 +305,7 @@ void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf) * Obtains host_set lock. */ -static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf) +static inline void ata_exec(struct ata_port *ap, const struct ata_taskfile *tf) { unsigned long flags; @@ -326,7 +328,7 @@ static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf) * Obtains host_set lock. */ -static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf) +static void ata_tf_to_host(struct ata_port *ap, const struct ata_taskfile *tf) { ap->ops->tf_load(ap, tf); @@ -346,7 +348,7 @@ static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf) * spin_lock_irqsave(host_set lock) */ -void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf) +void ata_tf_to_host_nolock(struct ata_port *ap, const struct ata_taskfile *tf) { ap->ops->tf_load(ap, tf); ap->ops->exec_command(ap, tf); @@ -556,7 +558,7 @@ u8 ata_chk_err(struct ata_port *ap) * Inherited from caller. */ -void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp) +void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp) { fis[0] = 0x27; /* Register - Host to Device FIS */ fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number, @@ -597,7 +599,7 @@ void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp) * Inherited from caller. */ -void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf) +void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) { tf->command = fis[2]; /* status */ tf->feature = fis[3]; /* error */ @@ -615,79 +617,53 @@ void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf) tf->hob_nsect = fis[13]; } -/** - * ata_prot_to_cmd - determine which read/write opcodes to use - * @protocol: ATA_PROT_xxx taskfile protocol - * @lba48: true is lba48 is present - * - * Given necessary input, determine which read/write commands - * to use to transfer data. - * - * LOCKING: - * None. - */ -static int ata_prot_to_cmd(int protocol, int lba48) -{ - int rcmd = 0, wcmd = 0; - - switch (protocol) { - case ATA_PROT_PIO: - if (lba48) { - rcmd = ATA_CMD_PIO_READ_EXT; - wcmd = ATA_CMD_PIO_WRITE_EXT; - } else { - rcmd = ATA_CMD_PIO_READ; - wcmd = ATA_CMD_PIO_WRITE; - } - break; - - case ATA_PROT_DMA: - if (lba48) { - rcmd = ATA_CMD_READ_EXT; - wcmd = ATA_CMD_WRITE_EXT; - } else { - rcmd = ATA_CMD_READ; - wcmd = ATA_CMD_WRITE; - } - break; - - default: - return -1; - } - - return rcmd | (wcmd << 8); -} +static const u8 ata_rw_cmds[] = { + /* pio multi */ + ATA_CMD_READ_MULTI, + ATA_CMD_WRITE_MULTI, + ATA_CMD_READ_MULTI_EXT, + ATA_CMD_WRITE_MULTI_EXT, + /* pio */ + ATA_CMD_PIO_READ, + ATA_CMD_PIO_WRITE, + ATA_CMD_PIO_READ_EXT, + ATA_CMD_PIO_WRITE_EXT, + /* dma */ + ATA_CMD_READ, + ATA_CMD_WRITE, + ATA_CMD_READ_EXT, + ATA_CMD_WRITE_EXT +}; /** - * ata_dev_set_protocol - set taskfile protocol and r/w commands - * @dev: device to examine and configure + * ata_rwcmd_protocol - set taskfile r/w commands and protocol + * @qc: command to examine and configure * - * Examine the device configuration, after we have - * read the identify-device page and configured the - * data transfer mode. Set internal state related to - * the ATA taskfile protocol (pio, pio mult, dma, etc.) - * and calculate the proper read/write commands to use. + * Examine the device configuration and tf->flags to calculate + * the proper read/write commands and protocol to use. * * LOCKING: * caller. */ -static void ata_dev_set_protocol(struct ata_device *dev) +void ata_rwcmd_protocol(struct ata_queued_cmd *qc) { - int pio = (dev->flags & ATA_DFLAG_PIO); - int lba48 = (dev->flags & ATA_DFLAG_LBA48); - int proto, cmd; + struct ata_taskfile *tf = &qc->tf; + struct ata_device *dev = qc->dev; - if (pio) - proto = dev->xfer_protocol = ATA_PROT_PIO; - else - proto = dev->xfer_protocol = ATA_PROT_DMA; + int index, lba48, write; + + lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; + write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; - cmd = ata_prot_to_cmd(proto, lba48); - if (cmd < 0) - BUG(); + if (dev->flags & ATA_DFLAG_PIO) { + tf->protocol = ATA_PROT_PIO; + index = dev->multi_count ? 0 : 4; + } else { + tf->protocol = ATA_PROT_DMA; + index = 8; + } - dev->read_cmd = cmd & 0xff; - dev->write_cmd = (cmd >> 8) & 0xff; + tf->command = ata_rw_cmds[index + lba48 + write]; } static const char * xfer_mode_str[] = { @@ -869,7 +845,7 @@ static unsigned int ata_devchk(struct ata_port *ap, * the event of failure. */ -unsigned int ata_dev_classify(struct ata_taskfile *tf) +unsigned int ata_dev_classify(const struct ata_taskfile *tf) { /* Apple's open source Darwin code hints that some devices only * put a proper signature into the LBA mid/high registers, @@ -961,7 +937,7 @@ static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device) * caller. */ -void ata_dev_id_string(u16 *id, unsigned char *s, +void ata_dev_id_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len) { unsigned int c; @@ -1078,7 +1054,7 @@ void ata_dev_select(struct ata_port *ap, unsigned int device, * caller. */ -static inline void ata_dump_id(struct ata_device *dev) +static inline void ata_dump_id(const struct ata_device *dev) { DPRINTK("49==0x%04x " "53==0x%04x " @@ -1106,6 +1082,31 @@ static inline void ata_dump_id(struct ata_device *dev) dev->id[93]); } +/* + * Compute the PIO modes available for this device. This is not as + * trivial as it seems if we must consider early devices correctly. + * + * FIXME: pre IDE drive timing (do we care ?). + */ + +static unsigned int ata_pio_modes(const struct ata_device *adev) +{ + u16 modes; + + /* Usual case. Word 53 indicates word 88 is valid */ + if (adev->id[ATA_ID_FIELD_VALID] & (1 << 2)) { + modes = adev->id[ATA_ID_PIO_MODES] & 0x03; + modes <<= 3; + modes |= 0x7; + return modes; + } + + /* If word 88 isn't valid then Word 51 holds the PIO timing number + for the maximum. Turn it into a mask and return it */ + modes = (2 << (adev->id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ; + return modes; +} + /** * ata_dev_identify - obtain IDENTIFY x DEVICE page * @ap: port on which device we wish to probe resides @@ -1131,7 +1132,7 @@ static inline void ata_dump_id(struct ata_device *dev) static void ata_dev_identify(struct ata_port *ap, unsigned int device) { struct ata_device *dev = &ap->device[device]; - unsigned int i; + unsigned int major_version; u16 tmp; unsigned long xfer_modes; u8 status; @@ -1229,9 +1230,9 @@ retry: * common ATA, ATAPI feature tests */ - /* we require LBA and DMA support (bits 8 & 9 of word 49) */ - if (!ata_id_has_dma(dev->id) || !ata_id_has_lba(dev->id)) { - printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id); + /* we require DMA support (bits 8 of word 49) */ + if (!ata_id_has_dma(dev->id)) { + printk(KERN_DEBUG "ata%u: no dma\n", ap->id); goto err_out_nosup; } @@ -1239,10 +1240,8 @@ retry: xfer_modes = dev->id[ATA_ID_UDMA_MODES]; if (!xfer_modes) xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA; - if (!xfer_modes) { - xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3); - xfer_modes |= (0x7 << ATA_SHIFT_PIO); - } + if (!xfer_modes) + xfer_modes = ata_pio_modes(dev); ata_dump_id(dev); @@ -1251,32 +1250,75 @@ retry: if (!ata_id_is_ata(dev->id)) /* sanity check */ goto err_out_nosup; + /* get major version */ tmp = dev->id[ATA_ID_MAJOR_VER]; - for (i = 14; i >= 1; i--) - if (tmp & (1 << i)) + for (major_version = 14; major_version >= 1; major_version--) + if (tmp & (1 << major_version)) break; - /* we require at least ATA-3 */ - if (i < 3) { - printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id); - goto err_out_nosup; + /* + * The exact sequence expected by certain pre-ATA4 drives is: + * SRST RESET + * IDENTIFY + * INITIALIZE DEVICE PARAMETERS + * anything else.. + * Some drives were very specific about that exact sequence. + */ + if (major_version < 4 || (!ata_id_has_lba(dev->id))) { + ata_dev_init_params(ap, dev); + + /* current CHS translation info (id[53-58]) might be + * changed. reread the identify device info. + */ + ata_dev_reread_id(ap, dev); } - if (ata_id_has_lba48(dev->id)) { - dev->flags |= ATA_DFLAG_LBA48; - dev->n_sectors = ata_id_u64(dev->id, 100); - } else { - dev->n_sectors = ata_id_u32(dev->id, 60); + if (ata_id_has_lba(dev->id)) { + dev->flags |= ATA_DFLAG_LBA; + + if (ata_id_has_lba48(dev->id)) { + dev->flags |= ATA_DFLAG_LBA48; + dev->n_sectors = ata_id_u64(dev->id, 100); + } else { + dev->n_sectors = ata_id_u32(dev->id, 60); + } + + /* print device info to dmesg */ + printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n", + ap->id, device, + major_version, + ata_mode_string(xfer_modes), + (unsigned long long)dev->n_sectors, + dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA"); + } else { + /* CHS */ + + /* Default translation */ + dev->cylinders = dev->id[1]; + dev->heads = dev->id[3]; + dev->sectors = dev->id[6]; + dev->n_sectors = dev->cylinders * dev->heads * dev->sectors; + + if (ata_id_current_chs_valid(dev->id)) { + /* Current CHS translation is valid. */ + dev->cylinders = dev->id[54]; + dev->heads = dev->id[55]; + dev->sectors = dev->id[56]; + + dev->n_sectors = ata_id_u32(dev->id, 57); + } + + /* print device info to dmesg */ + printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n", + ap->id, device, + major_version, + ata_mode_string(xfer_modes), + (unsigned long long)dev->n_sectors, + (int)dev->cylinders, (int)dev->heads, (int)dev->sectors); + } ap->host->max_cmd_len = 16; - - /* print device info to dmesg */ - printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors:%s\n", - ap->id, device, - ata_mode_string(xfer_modes), - (unsigned long long)dev->n_sectors, - dev->flags & ATA_DFLAG_LBA48 ? " lba48" : ""); } /* ATAPI-specific feature tests */ @@ -1310,7 +1352,7 @@ err_out: } -static inline u8 ata_dev_knobble(struct ata_port *ap) +static inline u8 ata_dev_knobble(const struct ata_port *ap) { return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id))); } @@ -1496,7 +1538,153 @@ void ata_port_disable(struct ata_port *ap) ap->flags |= ATA_FLAG_PORT_DISABLED; } -static struct { +/* + * This mode timing computation functionality is ported over from + * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik + */ +/* + * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). + * These were taken from ATA/ATAPI-6 standard, rev 0a, except + * for PIO 5, which is a nonstandard extension and UDMA6, which + * is currently supported only by Maxtor drives. + */ + +static const struct ata_timing ata_timing[] = { + + { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 }, + { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 }, + { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 }, + { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 }, + + { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 }, + { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 }, + { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, + +/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */ + + { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, + { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, + { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, + + { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, + { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, + { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, + +/* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */ + { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 }, + { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 }, + + { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 }, + { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 }, + { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 }, + +/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */ + + { 0xFF } +}; + +#define ENOUGH(v,unit) (((v)-1)/(unit)+1) +#define EZ(v,unit) ((v)?ENOUGH(v,unit):0) + +static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) +{ + q->setup = EZ(t->setup * 1000, T); + q->act8b = EZ(t->act8b * 1000, T); + q->rec8b = EZ(t->rec8b * 1000, T); + q->cyc8b = EZ(t->cyc8b * 1000, T); + q->active = EZ(t->active * 1000, T); + q->recover = EZ(t->recover * 1000, T); + q->cycle = EZ(t->cycle * 1000, T); + q->udma = EZ(t->udma * 1000, UT); +} + +void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, + struct ata_timing *m, unsigned int what) +{ + if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); + if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); + if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); + if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); + if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); + if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); + if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); + if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); +} + +static const struct ata_timing* ata_timing_find_mode(unsigned short speed) +{ + const struct ata_timing *t; + + for (t = ata_timing; t->mode != speed; t++) + if (t->mode == 0xFF) + return NULL; + return t; +} + +int ata_timing_compute(struct ata_device *adev, unsigned short speed, + struct ata_timing *t, int T, int UT) +{ + const struct ata_timing *s; + struct ata_timing p; + + /* + * Find the mode. + */ + + if (!(s = ata_timing_find_mode(speed))) + return -EINVAL; + + /* + * If the drive is an EIDE drive, it can tell us it needs extended + * PIO/MW_DMA cycle timing. + */ + + if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ + memset(&p, 0, sizeof(p)); + if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { + if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO]; + else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY]; + } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { + p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN]; + } + ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); + } + + /* + * Convert the timing to bus clock counts. + */ + + ata_timing_quantize(s, t, T, UT); + + /* + * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T + * and some other commands. We have to ensure that the DMA cycle timing is + * slower/equal than the fastest PIO timing. + */ + + if (speed > XFER_PIO_4) { + ata_timing_compute(adev, adev->pio_mode, &p, T, UT); + ata_timing_merge(&p, t, t, ATA_TIMING_ALL); + } + + /* + * Lenghten active & recovery time so that cycle time is correct. + */ + + if (t->act8b + t->rec8b < t->cyc8b) { + t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; + t->rec8b = t->cyc8b - t->act8b; + } + + if (t->active + t->recover < t->cycle) { + t->active += (t->cycle - (t->active + t->recover)) / 2; + t->recover = t->cycle - t->active; + } + + return 0; +} + +static const struct { unsigned int shift; u8 base; } xfer_mode_classes[] = { @@ -1603,7 +1791,7 @@ static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode, */ static void ata_set_mode(struct ata_port *ap) { - unsigned int i, xfer_shift; + unsigned int xfer_shift; u8 xfer_mode; int rc; @@ -1632,11 +1820,6 @@ static void ata_set_mode(struct ata_port *ap) if (ap->ops->post_set_mode) ap->ops->post_set_mode(ap); - for (i = 0; i < 2; i++) { - struct ata_device *dev = &ap->device[i]; - ata_dev_set_protocol(dev); - } - return; err_out: @@ -1910,7 +2093,8 @@ err_out: DPRINTK("EXIT\n"); } -static void ata_pr_blacklisted(struct ata_port *ap, struct ata_device *dev) +static void ata_pr_blacklisted(const struct ata_port *ap, + const struct ata_device *dev) { printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n", ap->id, dev->devno); @@ -1948,7 +2132,7 @@ static const char * ata_dma_blacklist [] = { "_NEC DV5800A", }; -static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev) +static int ata_dma_blacklisted(const struct ata_device *dev) { unsigned char model_num[40]; char *s; @@ -1973,9 +2157,9 @@ static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev) return 0; } -static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift) +static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift) { - struct ata_device *master, *slave; + const struct ata_device *master, *slave; unsigned int mask; master = &ap->device[0]; @@ -1987,14 +2171,14 @@ static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift) mask = ap->udma_mask; if (ata_dev_present(master)) { mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff); - if (ata_dma_blacklisted(ap, master)) { + if (ata_dma_blacklisted(master)) { mask = 0; ata_pr_blacklisted(ap, master); } } if (ata_dev_present(slave)) { mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff); - if (ata_dma_blacklisted(ap, slave)) { + if (ata_dma_blacklisted(slave)) { mask = 0; ata_pr_blacklisted(ap, slave); } @@ -2004,14 +2188,14 @@ static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift) mask = ap->mwdma_mask; if (ata_dev_present(master)) { mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07); - if (ata_dma_blacklisted(ap, master)) { + if (ata_dma_blacklisted(master)) { mask = 0; ata_pr_blacklisted(ap, master); } } if (ata_dev_present(slave)) { mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07); - if (ata_dma_blacklisted(ap, slave)) { + if (ata_dma_blacklisted(slave)) { mask = 0; ata_pr_blacklisted(ap, slave); } @@ -2075,7 +2259,7 @@ static int fgb(u32 bitmap) * Zero on success, negative on error. */ -static int ata_choose_xfer_mode(struct ata_port *ap, +static int ata_choose_xfer_mode(const struct ata_port *ap, u8 *xfer_mode_out, unsigned int *xfer_shift_out) { @@ -2143,6 +2327,110 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev) DPRINTK("EXIT\n"); } +/** + * ata_dev_reread_id - Reread the device identify device info + * @ap: port where the device is + * @dev: device to reread the identify device info + * + * LOCKING: + */ + +static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev) +{ + DECLARE_COMPLETION(wait); + struct ata_queued_cmd *qc; + unsigned long flags; + int rc; + + qc = ata_qc_new_init(ap, dev); + BUG_ON(qc == NULL); + + ata_sg_init_one(qc, dev->id, sizeof(dev->id)); + qc->dma_dir = DMA_FROM_DEVICE; + + if (dev->class == ATA_DEV_ATA) { + qc->tf.command = ATA_CMD_ID_ATA; + DPRINTK("do ATA identify\n"); + } else { + qc->tf.command = ATA_CMD_ID_ATAPI; + DPRINTK("do ATAPI identify\n"); + } + + qc->tf.flags |= ATA_TFLAG_DEVICE; + qc->tf.protocol = ATA_PROT_PIO; + qc->nsect = 1; + + qc->waiting = &wait; + qc->complete_fn = ata_qc_complete_noop; + + spin_lock_irqsave(&ap->host_set->lock, flags); + rc = ata_qc_issue(qc); + spin_unlock_irqrestore(&ap->host_set->lock, flags); + + if (rc) + goto err_out; + + wait_for_completion(&wait); + + swap_buf_le16(dev->id, ATA_ID_WORDS); + + ata_dump_id(dev); + + DPRINTK("EXIT\n"); + + return; +err_out: + ata_port_disable(ap); +} + +/** + * ata_dev_init_params - Issue INIT DEV PARAMS command + * @ap: Port associated with device @dev + * @dev: Device to which command will be sent + * + * LOCKING: + */ + +static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev) +{ + DECLARE_COMPLETION(wait); + struct ata_queued_cmd *qc; + int rc; + unsigned long flags; + u16 sectors = dev->id[6]; + u16 heads = dev->id[3]; + + /* Number of sectors per track 1-255. Number of heads 1-16 */ + if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) + return; + + /* set up init dev params taskfile */ + DPRINTK("init dev params \n"); + + qc = ata_qc_new_init(ap, dev); + BUG_ON(qc == NULL); + + qc->tf.command = ATA_CMD_INIT_DEV_PARAMS; + qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + qc->tf.protocol = ATA_PROT_NODATA; + qc->tf.nsect = sectors; + qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ + + qc->waiting = &wait; + qc->complete_fn = ata_qc_complete_noop; + + spin_lock_irqsave(&ap->host_set->lock, flags); + rc = ata_qc_issue(qc); + spin_unlock_irqrestore(&ap->host_set->lock, flags); + + if (rc) + ata_port_disable(ap); + else + wait_for_completion(&wait); + + DPRINTK("EXIT\n"); +} + /** * ata_sg_clean - Unmap DMA memory associated with command * @qc: Command containing DMA memory to be released @@ -2413,32 +2701,32 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) /** * ata_pio_poll - - * @ap: + * @ap: the target ata_port * * LOCKING: * None. (executing in kernel thread context) * * RETURNS: - * + * timeout value to use */ static unsigned long ata_pio_poll(struct ata_port *ap) { u8 status; - unsigned int poll_state = PIO_ST_UNKNOWN; - unsigned int reg_state = PIO_ST_UNKNOWN; - const unsigned int tmout_state = PIO_ST_TMOUT; + unsigned int poll_state = HSM_ST_UNKNOWN; + unsigned int reg_state = HSM_ST_UNKNOWN; + const unsigned int tmout_state = HSM_ST_TMOUT; - switch (ap->pio_task_state) { - case PIO_ST: - case PIO_ST_POLL: - poll_state = PIO_ST_POLL; - reg_state = PIO_ST; + switch (ap->hsm_task_state) { + case HSM_ST: + case HSM_ST_POLL: + poll_state = HSM_ST_POLL; + reg_state = HSM_ST; break; - case PIO_ST_LAST: - case PIO_ST_LAST_POLL: - poll_state = PIO_ST_LAST_POLL; - reg_state = PIO_ST_LAST; + case HSM_ST_LAST: + case HSM_ST_LAST_POLL: + poll_state = HSM_ST_LAST_POLL; + reg_state = HSM_ST_LAST; break; default: BUG(); @@ -2448,20 +2736,20 @@ static unsigned long ata_pio_poll(struct ata_port *ap) status = ata_chk_status(ap); if (status & ATA_BUSY) { if (time_after(jiffies, ap->pio_task_timeout)) { - ap->pio_task_state = tmout_state; + ap->hsm_task_state = tmout_state; return 0; } - ap->pio_task_state = poll_state; + ap->hsm_task_state = poll_state; return ATA_SHORT_PAUSE; } - ap->pio_task_state = reg_state; + ap->hsm_task_state = reg_state; return 0; } /** - * ata_pio_complete - - * @ap: + * ata_pio_complete - check if drive is busy or idle + * @ap: the target ata_port * * LOCKING: * None. (executing in kernel thread context) @@ -2480,14 +2768,14 @@ static int ata_pio_complete (struct ata_port *ap) * we enter, BSY will be cleared in a chk-status or two. If not, * the drive is probably seeking or something. Snooze for a couple * msecs, then chk-status again. If still busy, fall back to - * PIO_ST_POLL state. + * HSM_ST_POLL state. */ drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); if (drv_stat & (ATA_BUSY | ATA_DRQ)) { msleep(2); drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); if (drv_stat & (ATA_BUSY | ATA_DRQ)) { - ap->pio_task_state = PIO_ST_LAST_POLL; + ap->hsm_task_state = HSM_ST_LAST_POLL; ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; return 0; } @@ -2495,14 +2783,14 @@ static int ata_pio_complete (struct ata_port *ap) drv_stat = ata_wait_idle(ap); if (!ata_ok(drv_stat)) { - ap->pio_task_state = PIO_ST_ERR; + ap->hsm_task_state = HSM_ST_ERR; return 0; } qc = ata_qc_from_tag(ap, ap->active_tag); assert(qc != NULL); - ap->pio_task_state = PIO_ST_IDLE; + ap->hsm_task_state = HSM_ST_IDLE; ata_poll_qc_complete(qc, drv_stat); @@ -2513,7 +2801,7 @@ static int ata_pio_complete (struct ata_port *ap) /** - * swap_buf_le16 - + * swap_buf_le16 - swap halves of 16-words in place * @buf: Buffer to swap * @buf_words: Number of 16-bit words in buffer. * @@ -2522,6 +2810,7 @@ static int ata_pio_complete (struct ata_port *ap) * vice-versa. * * LOCKING: + * Inherited from caller. */ void swap_buf_le16(u16 *buf, unsigned int buf_words) { @@ -2544,7 +2833,6 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words) * * LOCKING: * Inherited from caller. - * */ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, @@ -2590,7 +2878,6 @@ static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, * * LOCKING: * Inherited from caller. - * */ static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf, @@ -2630,7 +2917,6 @@ static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf, * * LOCKING: * Inherited from caller. - * */ static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, @@ -2662,7 +2948,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) unsigned char *buf; if (qc->cursect == (qc->nsect - 1)) - ap->pio_task_state = PIO_ST_LAST; + ap->hsm_task_state = HSM_ST_LAST; page = sg[qc->cursg].page; offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE; @@ -2712,7 +2998,7 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) unsigned int offset, count; if (qc->curbytes + bytes >= qc->nbytes) - ap->pio_task_state = PIO_ST_LAST; + ap->hsm_task_state = HSM_ST_LAST; next_sg: if (unlikely(qc->cursg >= qc->n_elem)) { @@ -2734,7 +3020,7 @@ next_sg: for (i = 0; i < words; i++) ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write); - ap->pio_task_state = PIO_ST_LAST; + ap->hsm_task_state = HSM_ST_LAST; return; } @@ -2783,7 +3069,6 @@ next_sg: * * LOCKING: * Inherited from caller. - * */ static void atapi_pio_bytes(struct ata_queued_cmd *qc) @@ -2815,12 +3100,12 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) err_out: printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", ap->id, dev->devno); - ap->pio_task_state = PIO_ST_ERR; + ap->hsm_task_state = HSM_ST_ERR; } /** - * ata_pio_sector - - * @ap: + * ata_pio_block - start PIO on a block + * @ap: the target ata_port * * LOCKING: * None. (executing in kernel thread context) @@ -2832,19 +3117,19 @@ static void ata_pio_block(struct ata_port *ap) u8 status; /* - * This is purely hueristic. This is a fast path. + * This is purely heuristic. This is a fast path. * Sometimes when we enter, BSY will be cleared in * a chk-status or two. If not, the drive is probably seeking * or something. Snooze for a couple msecs, then * chk-status again. If still busy, fall back to - * PIO_ST_POLL state. + * HSM_ST_POLL state. */ status = ata_busy_wait(ap, ATA_BUSY, 5); if (status & ATA_BUSY) { msleep(2); status = ata_busy_wait(ap, ATA_BUSY, 10); if (status & ATA_BUSY) { - ap->pio_task_state = PIO_ST_POLL; + ap->hsm_task_state = HSM_ST_POLL; ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; return; } @@ -2856,7 +3141,7 @@ static void ata_pio_block(struct ata_port *ap) if (is_atapi_taskfile(&qc->tf)) { /* no more data to transfer or unsupported ATAPI command */ if ((status & ATA_DRQ) == 0) { - ap->pio_task_state = PIO_ST_LAST; + ap->hsm_task_state = HSM_ST_LAST; return; } @@ -2864,7 +3149,7 @@ static void ata_pio_block(struct ata_port *ap) } else { /* handle BSY=0, DRQ=0 as error */ if ((status & ATA_DRQ) == 0) { - ap->pio_task_state = PIO_ST_ERR; + ap->hsm_task_state = HSM_ST_ERR; return; } @@ -2884,7 +3169,7 @@ static void ata_pio_error(struct ata_port *ap) printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n", ap->id, drv_stat); - ap->pio_task_state = PIO_ST_IDLE; + ap->hsm_task_state = HSM_ST_IDLE; ata_poll_qc_complete(qc, drv_stat | ATA_ERR); } @@ -2899,25 +3184,25 @@ fsm_start: timeout = 0; qc_completed = 0; - switch (ap->pio_task_state) { - case PIO_ST_IDLE: + switch (ap->hsm_task_state) { + case HSM_ST_IDLE: return; - case PIO_ST: + case HSM_ST: ata_pio_block(ap); break; - case PIO_ST_LAST: + case HSM_ST_LAST: qc_completed = ata_pio_complete(ap); break; - case PIO_ST_POLL: - case PIO_ST_LAST_POLL: + case HSM_ST_POLL: + case HSM_ST_LAST_POLL: timeout = ata_pio_poll(ap); break; - case PIO_ST_TMOUT: - case PIO_ST_ERR: + case HSM_ST_TMOUT: + case HSM_ST_ERR: ata_pio_error(ap); return; } @@ -2928,52 +3213,6 @@ fsm_start: goto fsm_start; } -static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev, - struct scsi_cmnd *cmd) -{ - DECLARE_COMPLETION(wait); - struct ata_queued_cmd *qc; - unsigned long flags; - int rc; - - DPRINTK("ATAPI request sense\n"); - - qc = ata_qc_new_init(ap, dev); - BUG_ON(qc == NULL); - - /* FIXME: is this needed? */ - memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); - - ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer)); - qc->dma_dir = DMA_FROM_DEVICE; - - memset(&qc->cdb, 0, ap->cdb_len); - qc->cdb[0] = REQUEST_SENSE; - qc->cdb[4] = SCSI_SENSE_BUFFERSIZE; - - qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; - qc->tf.command = ATA_CMD_PACKET; - - qc->tf.protocol = ATA_PROT_ATAPI; - qc->tf.lbam = (8 * 1024) & 0xff; - qc->tf.lbah = (8 * 1024) >> 8; - qc->nbytes = SCSI_SENSE_BUFFERSIZE; - - qc->waiting = &wait; - qc->complete_fn = ata_qc_complete_noop; - - spin_lock_irqsave(&ap->host_set->lock, flags); - rc = ata_qc_issue(qc); - spin_unlock_irqrestore(&ap->host_set->lock, flags); - - if (rc) - ata_port_disable(ap); - else - wait_for_completion(&wait); - - DPRINTK("EXIT\n"); -} - /** * ata_qc_timeout - Handle timeout of queued command * @qc: Command that timed out @@ -3091,14 +3330,14 @@ void ata_eng_timeout(struct ata_port *ap) DPRINTK("ENTER\n"); qc = ata_qc_from_tag(ap, ap->active_tag); - if (!qc) { + if (qc) + ata_qc_timeout(qc); + else { printk(KERN_ERR "ata%u: BUG: timeout without command\n", ap->id); goto out; } - ata_qc_timeout(qc); - out: DPRINTK("EXIT\n"); } @@ -3155,15 +3394,12 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, qc->nbytes = qc->curbytes = 0; ata_tf_init(ap, &qc->tf, dev->devno); - - if (dev->flags & ATA_DFLAG_LBA48) - qc->tf.flags |= ATA_TFLAG_LBA48; } return qc; } -static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat) +int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat) { return 0; } @@ -3201,7 +3437,6 @@ static void __ata_qc_complete(struct ata_queued_cmd *qc) * * LOCKING: * spin_lock_irqsave(host_set lock) - * */ void ata_qc_free(struct ata_queued_cmd *qc) { @@ -3221,7 +3456,6 @@ void ata_qc_free(struct ata_queued_cmd *qc) * * LOCKING: * spin_lock_irqsave(host_set lock) - * */ void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) @@ -3360,7 +3594,7 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc) case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ ata_qc_set_polling(qc); ata_tf_to_host_nolock(ap, &qc->tf); - ap->pio_task_state = PIO_ST; + ap->hsm_task_state = HSM_ST; queue_work(ata_wq, &ap->pio_task); break; @@ -3586,7 +3820,7 @@ u8 ata_bmdma_status(struct ata_port *ap) void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; host_stat = readb(mmio + ATA_DMA_STATUS); } else - host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); return host_stat; } @@ -3715,7 +3949,6 @@ idle_irq: * * RETURNS: * IRQ_NONE or IRQ_HANDLED. - * */ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) @@ -3806,7 +4039,7 @@ static void atapi_packet_task(void *_data) ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); /* PIO commands are handled by polling */ - ap->pio_task_state = PIO_ST; + ap->hsm_task_state = HSM_ST; queue_work(ata_wq, &ap->pio_task); } @@ -3827,6 +4060,7 @@ err_out: * May be used as the port_start() entry in ata_port_operations. * * LOCKING: + * Inherited from caller. */ int ata_port_start (struct ata_port *ap) @@ -3852,6 +4086,7 @@ int ata_port_start (struct ata_port *ap) * May be used as the port_stop() entry in ata_port_operations. * * LOCKING: + * Inherited from caller. */ void ata_port_stop (struct ata_port *ap) @@ -3874,6 +4109,7 @@ void ata_host_stop (struct ata_host_set *host_set) * @do_unregister: 1 if we fully unregister, 0 to just stop the port * * LOCKING: + * Inherited from caller. */ static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister) @@ -3901,12 +4137,11 @@ static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister) * * LOCKING: * Inherited from caller. - * */ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, struct ata_host_set *host_set, - struct ata_probe_ent *ent, unsigned int port_no) + const struct ata_probe_ent *ent, unsigned int port_no) { unsigned int i; @@ -3962,10 +4197,9 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, * * RETURNS: * New ata_port on success, for NULL on error. - * */ -static struct ata_port * ata_host_add(struct ata_probe_ent *ent, +static struct ata_port * ata_host_add(const struct ata_probe_ent *ent, struct ata_host_set *host_set, unsigned int port_no) { @@ -4010,10 +4244,9 @@ err_out: * * RETURNS: * Number of ports registered. Zero on error (no ports registered). - * */ -int ata_device_add(struct ata_probe_ent *ent) +int ata_device_add(const struct ata_probe_ent *ent) { unsigned int count = 0, i; struct device *dev = ent->dev; @@ -4113,7 +4346,7 @@ int ata_device_add(struct ata_probe_ent *ent) for (i = 0; i < count; i++) { struct ata_port *ap = host_set->ports[i]; - scsi_scan_host(ap->host); + ata_scsi_scan_host(ap); } dev_set_drvdata(dev, host_set); @@ -4131,6 +4364,52 @@ err_out: return 0; } +/** + * ata_host_set_remove - PCI layer callback for device removal + * @host_set: ATA host set that was removed + * + * Unregister all objects associated with this host set. Free those + * objects. + * + * LOCKING: + * Inherited from calling layer (may sleep). + */ + +void ata_host_set_remove(struct ata_host_set *host_set) +{ + struct ata_port *ap; + unsigned int i; + + for (i = 0; i < host_set->n_ports; i++) { + ap = host_set->ports[i]; + scsi_remove_host(ap->host); + } + + free_irq(host_set->irq, host_set); + + for (i = 0; i < host_set->n_ports; i++) { + ap = host_set->ports[i]; + + ata_scsi_release(ap->host); + + if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) { + struct ata_ioports *ioaddr = &ap->ioaddr; + + if (ioaddr->cmd_addr == 0x1f0) + release_region(0x1f0, 8); + else if (ioaddr->cmd_addr == 0x170) + release_region(0x170, 8); + } + + scsi_host_put(ap->host); + } + + if (host_set->ops->host_stop) + host_set->ops->host_stop(host_set); + + kfree(host_set); +} + /** * ata_scsi_release - SCSI layer callback hook for host unload * @host: libata host to be unloaded @@ -4185,7 +4464,7 @@ void ata_std_ports(struct ata_ioports *ioaddr) } static struct ata_probe_ent * -ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port) +ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port) { struct ata_probe_ent *probe_ent; @@ -4226,85 +4505,86 @@ void ata_pci_host_stop (struct ata_host_set *host_set) * ata_pci_init_native_mode - Initialize native-mode driver * @pdev: pci device to be initialized * @port: array[2] of pointers to port info structures. + * @ports: bitmap of ports present * * Utility function which allocates and initializes an * ata_probe_ent structure for a standard dual-port * PIO-based IDE controller. The returned ata_probe_ent * structure can be passed to ata_device_add(). The returned * ata_probe_ent structure should then be freed with kfree(). + * + * The caller need only pass the address of the primary port, the + * secondary will be deduced automatically. If the device has non + * standard secondary port mappings this function can be called twice, + * once for each interface. */ struct ata_probe_ent * -ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port) +ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports) { struct ata_probe_ent *probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); + int p = 0; + if (!probe_ent) return NULL; - probe_ent->n_ports = 2; probe_ent->irq = pdev->irq; probe_ent->irq_flags = SA_SHIRQ; - probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0); - probe_ent->port[0].altstatus_addr = - probe_ent->port[0].ctl_addr = - pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; - probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); + if (ports & ATA_PORT_PRIMARY) { + probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0); + probe_ent->port[p].altstatus_addr = + probe_ent->port[p].ctl_addr = + pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; + probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4); + ata_std_ports(&probe_ent->port[p]); + p++; + } - probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2); - probe_ent->port[1].altstatus_addr = - probe_ent->port[1].ctl_addr = - pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; - probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8; - - ata_std_ports(&probe_ent->port[0]); - ata_std_ports(&probe_ent->port[1]); + if (ports & ATA_PORT_SECONDARY) { + probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2); + probe_ent->port[p].altstatus_addr = + probe_ent->port[p].ctl_addr = + pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; + probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8; + ata_std_ports(&probe_ent->port[p]); + p++; + } + probe_ent->n_ports = p; return probe_ent; } -static struct ata_probe_ent * -ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port, - struct ata_probe_ent **ppe2) +static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info **port, int port_num) { - struct ata_probe_ent *probe_ent, *probe_ent2; + struct ata_probe_ent *probe_ent; probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); if (!probe_ent) return NULL; - probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]); - if (!probe_ent2) { - kfree(probe_ent); - return NULL; - } - probe_ent->n_ports = 1; - probe_ent->irq = 14; - - probe_ent->hard_port_no = 0; probe_ent->legacy_mode = 1; + probe_ent->n_ports = 1; + probe_ent->hard_port_no = port_num; - probe_ent2->n_ports = 1; - probe_ent2->irq = 15; - - probe_ent2->hard_port_no = 1; - probe_ent2->legacy_mode = 1; - - probe_ent->port[0].cmd_addr = 0x1f0; - probe_ent->port[0].altstatus_addr = - probe_ent->port[0].ctl_addr = 0x3f6; - probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); - - probe_ent2->port[0].cmd_addr = 0x170; - probe_ent2->port[0].altstatus_addr = - probe_ent2->port[0].ctl_addr = 0x376; - probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8; - + switch(port_num) + { + case 0: + probe_ent->irq = 14; + probe_ent->port[0].cmd_addr = 0x1f0; + probe_ent->port[0].altstatus_addr = + probe_ent->port[0].ctl_addr = 0x3f6; + break; + case 1: + probe_ent->irq = 15; + probe_ent->port[0].cmd_addr = 0x170; + probe_ent->port[0].altstatus_addr = + probe_ent->port[0].ctl_addr = 0x376; + break; + } + probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num; ata_std_ports(&probe_ent->port[0]); - ata_std_ports(&probe_ent2->port[0]); - - *ppe2 = probe_ent2; return probe_ent; } @@ -4327,13 +4607,12 @@ ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port, * * RETURNS: * Zero on success, negative on errno-based value on error. - * */ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, unsigned int n_ports) { - struct ata_probe_ent *probe_ent, *probe_ent2 = NULL; + struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL; struct ata_port_info *port[2]; u8 tmp8, mask; unsigned int legacy_mode = 0; @@ -4350,7 +4629,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { - /* TODO: support transitioning to native mode? */ + /* TODO: What if one channel is in native mode ... */ pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); mask = (1 << 2) | (1 << 0); if ((tmp8 & mask) != mask) @@ -4358,11 +4637,20 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, } /* FIXME... */ - if ((!legacy_mode) && (n_ports > 1)) { - printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n"); - return -EINVAL; + if ((!legacy_mode) && (n_ports > 2)) { + printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n"); + n_ports = 2; + /* For now */ } + /* FIXME: Really for ATA it isn't safe because the device may be + multi-purpose and we want to leave it alone if it was already + enabled. Secondly for shared use as Arjan says we want refcounting + + Checking dev->is_enabled is insufficient as this is not set at + boot for the primary video which is BIOS enabled + */ + rc = pci_enable_device(pdev); if (rc) return rc; @@ -4373,6 +4661,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, goto err_out; } + /* FIXME: Should use platform specific mappers for legacy port ranges */ if (legacy_mode) { if (!request_region(0x1f0, 8, "libata")) { struct resource *conflict, res; @@ -4417,10 +4706,17 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, goto err_out_regions; if (legacy_mode) { - probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2); - } else - probe_ent = ata_pci_init_native_mode(pdev, port); - if (!probe_ent) { + if (legacy_mode & (1 << 0)) + probe_ent = ata_pci_init_legacy_port(pdev, port, 0); + if (legacy_mode & (1 << 1)) + probe_ent2 = ata_pci_init_legacy_port(pdev, port, 1); + } else { + if (n_ports == 2) + probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); + else + probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY); + } + if (!probe_ent && !probe_ent2) { rc = -ENOMEM; goto err_out_regions; } @@ -4458,7 +4754,7 @@ err_out: * @pdev: PCI device that was removed * * PCI layer indicates to libata via this hook that - * hot-unplug or module unload event has occured. + * hot-unplug or module unload event has occurred. * Handle this by unregistering all objects associated * with this PCI device. Free those objects. Then finally * release PCI resources and disable device. @@ -4471,46 +4767,15 @@ void ata_pci_remove_one (struct pci_dev *pdev) { struct device *dev = pci_dev_to_dev(pdev); struct ata_host_set *host_set = dev_get_drvdata(dev); - struct ata_port *ap; - unsigned int i; - - for (i = 0; i < host_set->n_ports; i++) { - ap = host_set->ports[i]; - - scsi_remove_host(ap->host); - } - - free_irq(host_set->irq, host_set); - - for (i = 0; i < host_set->n_ports; i++) { - ap = host_set->ports[i]; - - ata_scsi_release(ap->host); - - if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) { - struct ata_ioports *ioaddr = &ap->ioaddr; - - if (ioaddr->cmd_addr == 0x1f0) - release_region(0x1f0, 8); - else if (ioaddr->cmd_addr == 0x170) - release_region(0x170, 8); - } - - scsi_host_put(ap->host); - } - - if (host_set->ops->host_stop) - host_set->ops->host_stop(host_set); - - kfree(host_set); + ata_host_set_remove(host_set); pci_release_regions(pdev); pci_disable_device(pdev); dev_set_drvdata(dev, NULL); } /* move to PCI subsystem */ -int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits) +int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) { unsigned long tmp = 0; @@ -4563,6 +4828,27 @@ static void __exit ata_exit(void) module_init(ata_init); module_exit(ata_exit); +static unsigned long ratelimit_time; +static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED; + +int ata_ratelimit(void) +{ + int rc; + unsigned long flags; + + spin_lock_irqsave(&ata_ratelimit_lock, flags); + + if (time_after(jiffies, ratelimit_time)) { + rc = 1; + ratelimit_time = jiffies + (HZ/5); + } else + rc = 0; + + spin_unlock_irqrestore(&ata_ratelimit_lock, flags); + + return rc; +} + /* * libata is essentially a library of internal helper functions for * low-level ATA host controller drivers. As such, the API/ABI is @@ -4573,6 +4859,7 @@ module_exit(ata_exit); EXPORT_SYMBOL_GPL(ata_std_bios_param); EXPORT_SYMBOL_GPL(ata_std_ports); EXPORT_SYMBOL_GPL(ata_device_add); +EXPORT_SYMBOL_GPL(ata_host_set_remove); EXPORT_SYMBOL_GPL(ata_sg_init); EXPORT_SYMBOL_GPL(ata_sg_init_one); EXPORT_SYMBOL_GPL(ata_qc_complete); @@ -4603,6 +4890,7 @@ EXPORT_SYMBOL_GPL(sata_phy_reset); EXPORT_SYMBOL_GPL(__sata_phy_reset); EXPORT_SYMBOL_GPL(ata_bus_reset); EXPORT_SYMBOL_GPL(ata_port_disable); +EXPORT_SYMBOL_GPL(ata_ratelimit); EXPORT_SYMBOL_GPL(ata_scsi_ioctl); EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); EXPORT_SYMBOL_GPL(ata_scsi_error); @@ -4614,6 +4902,9 @@ EXPORT_SYMBOL_GPL(ata_dev_id_string); EXPORT_SYMBOL_GPL(ata_dev_config); EXPORT_SYMBOL_GPL(ata_scsi_simulate); +EXPORT_SYMBOL_GPL(ata_timing_compute); +EXPORT_SYMBOL_GPL(ata_timing_merge); + #ifdef CONFIG_PCI EXPORT_SYMBOL_GPL(pci_test_config_bits); EXPORT_SYMBOL_GPL(ata_pci_host_stop); diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c index 104fd9a63e73..58858886d751 100644 --- a/drivers/scsi/libata-scsi.c +++ b/drivers/scsi/libata-scsi.c @@ -44,11 +44,19 @@ #include "libata.h" -typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, u8 *scsicmd); +typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd); static struct ata_device * -ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev); +ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev); +static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, + void (*done)(struct scsi_cmnd *)) +{ + ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0); + /* "Invalid field in cbd" */ + done(cmd); +} + /** * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd. * @sdev: SCSI device for which BIOS geometry is to be determined @@ -182,7 +190,6 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat) { struct scsi_cmnd *cmd = qc->scsicmd; u8 err = 0; - unsigned char *sb = cmd->sense_buffer; /* Based on the 3ware driver translation table */ static unsigned char sense_table[][4] = { /* BBD|ECC|ID|MAR */ @@ -225,8 +232,6 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat) }; int i = 0; - cmd->result = SAM_STAT_CHECK_CONDITION; - /* * Is this an error we can process/parse */ @@ -281,11 +286,9 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat) /* Look for best matches first */ if((sense_table[i][0] & err) == sense_table[i][0]) { - sb[0] = 0x70; - sb[2] = sense_table[i][1]; - sb[7] = 0x0a; - sb[12] = sense_table[i][2]; - sb[13] = sense_table[i][3]; + ata_scsi_set_sense(cmd, sense_table[i][1] /* sk */, + sense_table[i][2] /* asc */, + sense_table[i][3] /* ascq */ ); return; } i++; @@ -300,11 +303,9 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat) { if(stat_table[i][0] & drv_stat) { - sb[0] = 0x70; - sb[2] = stat_table[i][1]; - sb[7] = 0x0a; - sb[12] = stat_table[i][2]; - sb[13] = stat_table[i][3]; + ata_scsi_set_sense(cmd, sense_table[i][1] /* sk */, + sense_table[i][2] /* asc */, + sense_table[i][3] /* ascq */ ); return; } i++; @@ -313,15 +314,12 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat) printk(KERN_ERR "ata%u: called with no error (%02X)!\n", qc->ap->id, drv_stat); /* additional-sense-code[-qualifier] */ - sb[0] = 0x70; - sb[2] = MEDIUM_ERROR; - sb[7] = 0x0A; if (cmd->sc_data_direction == DMA_FROM_DEVICE) { - sb[12] = 0x11; /* "unrecovered read error" */ - sb[13] = 0x04; + ata_scsi_set_sense(cmd, MEDIUM_ERROR, 0x11, 0x4); + /* "unrecovered read error" */ } else { - sb[12] = 0x0C; /* "write error - */ - sb[13] = 0x02; /* auto-reallocation failed" */ + ata_scsi_set_sense(cmd, MEDIUM_ERROR, 0xc, 0x2); + /* "write error - auto-reallocation failed" */ } } @@ -420,7 +418,7 @@ int ata_scsi_error(struct Scsi_Host *host) */ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc, - u8 *scsicmd) + const u8 *scsicmd) { struct ata_taskfile *tf = &qc->tf; @@ -430,15 +428,26 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc, ; /* ignore IMMED bit, violates sat-r05 */ } if (scsicmd[4] & 0x2) - return 1; /* LOEJ bit set not supported */ + goto invalid_fld; /* LOEJ bit set not supported */ if (((scsicmd[4] >> 4) & 0xf) != 0) - return 1; /* power conditions not supported */ + goto invalid_fld; /* power conditions not supported */ if (scsicmd[4] & 0x1) { tf->nsect = 1; /* 1 sector, lba=0 */ - tf->lbah = 0x0; - tf->lbam = 0x0; - tf->lbal = 0x0; - tf->device |= ATA_LBA; + + if (qc->dev->flags & ATA_DFLAG_LBA) { + qc->tf.flags |= ATA_TFLAG_LBA; + + tf->lbah = 0x0; + tf->lbam = 0x0; + tf->lbal = 0x0; + tf->device |= ATA_LBA; + } else { + /* CHS */ + tf->lbal = 0x1; /* sect */ + tf->lbam = 0x0; /* cyl low */ + tf->lbah = 0x0; /* cyl high */ + } + tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ } else { tf->nsect = 0; /* time period value (0 implies now) */ @@ -453,6 +462,11 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc, */ return 0; + +invalid_fld: + ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0); + /* "Invalid field in cbd" */ + return 1; } @@ -471,14 +485,14 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc, * Zero on success, non-zero on error. */ -static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) +static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd) { struct ata_taskfile *tf = &qc->tf; tf->flags |= ATA_TFLAG_DEVICE; tf->protocol = ATA_PROT_NODATA; - if ((tf->flags & ATA_TFLAG_LBA48) && + if ((qc->dev->flags & ATA_DFLAG_LBA48) && (ata_id_has_flush_ext(qc->dev->id))) tf->command = ATA_CMD_FLUSH_EXT; else @@ -487,6 +501,99 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) return 0; } +/** + * scsi_6_lba_len - Get LBA and transfer length + * @scsicmd: SCSI command to translate + * + * Calculate LBA and transfer length for 6-byte commands. + * + * RETURNS: + * @plba: the LBA + * @plen: the transfer length + */ + +static void scsi_6_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen) +{ + u64 lba = 0; + u32 len = 0; + + VPRINTK("six-byte command\n"); + + lba |= ((u64)scsicmd[2]) << 8; + lba |= ((u64)scsicmd[3]); + + len |= ((u32)scsicmd[4]); + + *plba = lba; + *plen = len; +} + +/** + * scsi_10_lba_len - Get LBA and transfer length + * @scsicmd: SCSI command to translate + * + * Calculate LBA and transfer length for 10-byte commands. + * + * RETURNS: + * @plba: the LBA + * @plen: the transfer length + */ + +static void scsi_10_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen) +{ + u64 lba = 0; + u32 len = 0; + + VPRINTK("ten-byte command\n"); + + lba |= ((u64)scsicmd[2]) << 24; + lba |= ((u64)scsicmd[3]) << 16; + lba |= ((u64)scsicmd[4]) << 8; + lba |= ((u64)scsicmd[5]); + + len |= ((u32)scsicmd[7]) << 8; + len |= ((u32)scsicmd[8]); + + *plba = lba; + *plen = len; +} + +/** + * scsi_16_lba_len - Get LBA and transfer length + * @scsicmd: SCSI command to translate + * + * Calculate LBA and transfer length for 16-byte commands. + * + * RETURNS: + * @plba: the LBA + * @plen: the transfer length + */ + +static void scsi_16_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen) +{ + u64 lba = 0; + u32 len = 0; + + VPRINTK("sixteen-byte command\n"); + + lba |= ((u64)scsicmd[2]) << 56; + lba |= ((u64)scsicmd[3]) << 48; + lba |= ((u64)scsicmd[4]) << 40; + lba |= ((u64)scsicmd[5]) << 32; + lba |= ((u64)scsicmd[6]) << 24; + lba |= ((u64)scsicmd[7]) << 16; + lba |= ((u64)scsicmd[8]) << 8; + lba |= ((u64)scsicmd[9]); + + len |= ((u32)scsicmd[10]) << 24; + len |= ((u32)scsicmd[11]) << 16; + len |= ((u32)scsicmd[12]) << 8; + len |= ((u32)scsicmd[13]); + + *plba = lba; + *plen = len; +} + /** * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one * @qc: Storage for translated ATA taskfile @@ -501,82 +608,110 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) * Zero on success, non-zero on error. */ -static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) +static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd) { struct ata_taskfile *tf = &qc->tf; - unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; + struct ata_device *dev = qc->dev; u64 dev_sectors = qc->dev->n_sectors; - u64 sect = 0; - u32 n_sect = 0; + u64 block; + u32 n_block; tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf->protocol = ATA_PROT_NODATA; - tf->device |= ATA_LBA; - - if (scsicmd[0] == VERIFY) { - sect |= ((u64)scsicmd[2]) << 24; - sect |= ((u64)scsicmd[3]) << 16; - sect |= ((u64)scsicmd[4]) << 8; - sect |= ((u64)scsicmd[5]); - - n_sect |= ((u32)scsicmd[7]) << 8; - n_sect |= ((u32)scsicmd[8]); - } - - else if (scsicmd[0] == VERIFY_16) { - sect |= ((u64)scsicmd[2]) << 56; - sect |= ((u64)scsicmd[3]) << 48; - sect |= ((u64)scsicmd[4]) << 40; - sect |= ((u64)scsicmd[5]) << 32; - sect |= ((u64)scsicmd[6]) << 24; - sect |= ((u64)scsicmd[7]) << 16; - sect |= ((u64)scsicmd[8]) << 8; - sect |= ((u64)scsicmd[9]); - - n_sect |= ((u32)scsicmd[10]) << 24; - n_sect |= ((u32)scsicmd[11]) << 16; - n_sect |= ((u32)scsicmd[12]) << 8; - n_sect |= ((u32)scsicmd[13]); - } + if (scsicmd[0] == VERIFY) + scsi_10_lba_len(scsicmd, &block, &n_block); + else if (scsicmd[0] == VERIFY_16) + scsi_16_lba_len(scsicmd, &block, &n_block); else - return 1; + goto invalid_fld; - if (!n_sect) - return 1; - if (sect >= dev_sectors) - return 1; - if ((sect + n_sect) > dev_sectors) - return 1; - if (lba48) { - if (n_sect > (64 * 1024)) - return 1; + if (!n_block) + goto nothing_to_do; + if (block >= dev_sectors) + goto out_of_range; + if ((block + n_block) > dev_sectors) + goto out_of_range; + + if (dev->flags & ATA_DFLAG_LBA) { + tf->flags |= ATA_TFLAG_LBA; + + if (dev->flags & ATA_DFLAG_LBA48) { + if (n_block > (64 * 1024)) + goto invalid_fld; + + /* use LBA48 */ + tf->flags |= ATA_TFLAG_LBA48; + tf->command = ATA_CMD_VERIFY_EXT; + + tf->hob_nsect = (n_block >> 8) & 0xff; + + tf->hob_lbah = (block >> 40) & 0xff; + tf->hob_lbam = (block >> 32) & 0xff; + tf->hob_lbal = (block >> 24) & 0xff; + } else { + if (n_block > 256) + goto invalid_fld; + + /* use LBA28 */ + tf->command = ATA_CMD_VERIFY; + + tf->device |= (block >> 24) & 0xf; + } + + tf->nsect = n_block & 0xff; + + tf->lbah = (block >> 16) & 0xff; + tf->lbam = (block >> 8) & 0xff; + tf->lbal = block & 0xff; + + tf->device |= ATA_LBA; } else { - if (n_sect > 256) - return 1; - } + /* CHS */ + u32 sect, head, cyl, track; - if (lba48) { - tf->command = ATA_CMD_VERIFY_EXT; + if (n_block > 256) + goto invalid_fld; - tf->hob_nsect = (n_sect >> 8) & 0xff; + /* Convert LBA to CHS */ + track = (u32)block / dev->sectors; + cyl = track / dev->heads; + head = track % dev->heads; + sect = (u32)block % dev->sectors + 1; - tf->hob_lbah = (sect >> 40) & 0xff; - tf->hob_lbam = (sect >> 32) & 0xff; - tf->hob_lbal = (sect >> 24) & 0xff; - } else { + DPRINTK("block %u track %u cyl %u head %u sect %u\n", + (u32)block, track, cyl, head, sect); + + /* Check whether the converted CHS can fit. + Cylinder: 0-65535 + Head: 0-15 + Sector: 1-255*/ + if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) + goto out_of_range; + tf->command = ATA_CMD_VERIFY; - - tf->device |= (sect >> 24) & 0xf; + tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ + tf->lbal = sect; + tf->lbam = cyl; + tf->lbah = cyl >> 8; + tf->device |= head; } - tf->nsect = n_sect & 0xff; - - tf->lbah = (sect >> 16) & 0xff; - tf->lbam = (sect >> 8) & 0xff; - tf->lbal = sect & 0xff; - return 0; + +invalid_fld: + ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0); + /* "Invalid field in cbd" */ + return 1; + +out_of_range: + ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0); + /* "Logical Block Address out of range" */ + return 1; + +nothing_to_do: + qc->scsicmd->result = SAM_STAT_GOOD; + return 1; } /** @@ -599,106 +734,137 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) * Zero on success, non-zero on error. */ -static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) +static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd) { struct ata_taskfile *tf = &qc->tf; - unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; + struct ata_device *dev = qc->dev; + u64 block; + u32 n_block; tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; - tf->protocol = qc->dev->xfer_protocol; - tf->device |= ATA_LBA; - if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 || - scsicmd[0] == READ_16) { - tf->command = qc->dev->read_cmd; - } else { - tf->command = qc->dev->write_cmd; + if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 || + scsicmd[0] == WRITE_16) tf->flags |= ATA_TFLAG_WRITE; + + /* Calculate the SCSI LBA and transfer length. */ + switch (scsicmd[0]) { + case READ_10: + case WRITE_10: + scsi_10_lba_len(scsicmd, &block, &n_block); + break; + case READ_6: + case WRITE_6: + scsi_6_lba_len(scsicmd, &block, &n_block); + + /* for 6-byte r/w commands, transfer length 0 + * means 256 blocks of data, not 0 block. + */ + if (!n_block) + n_block = 256; + break; + case READ_16: + case WRITE_16: + scsi_16_lba_len(scsicmd, &block, &n_block); + break; + default: + DPRINTK("no-byte command\n"); + goto invalid_fld; } - if (scsicmd[0] == READ_10 || scsicmd[0] == WRITE_10) { - if (lba48) { - tf->hob_nsect = scsicmd[7]; - tf->hob_lbal = scsicmd[2]; + /* Check and compose ATA command */ + if (!n_block) + /* For 10-byte and 16-byte SCSI R/W commands, transfer + * length 0 means transfer 0 block of data. + * However, for ATA R/W commands, sector count 0 means + * 256 or 65536 sectors, not 0 sectors as in SCSI. + */ + goto nothing_to_do; - qc->nsect = ((unsigned int)scsicmd[7] << 8) | - scsicmd[8]; - } else { - /* if we don't support LBA48 addressing, the request - * -may- be too large. */ - if ((scsicmd[2] & 0xf0) || scsicmd[7]) - return 1; + if (dev->flags & ATA_DFLAG_LBA) { + tf->flags |= ATA_TFLAG_LBA; - /* stores LBA27:24 in lower 4 bits of device reg */ - tf->device |= scsicmd[2]; + if (dev->flags & ATA_DFLAG_LBA48) { + /* The request -may- be too large for LBA48. */ + if ((block >> 48) || (n_block > 65536)) + goto out_of_range; - qc->nsect = scsicmd[8]; + /* use LBA48 */ + tf->flags |= ATA_TFLAG_LBA48; + + tf->hob_nsect = (n_block >> 8) & 0xff; + + tf->hob_lbah = (block >> 40) & 0xff; + tf->hob_lbam = (block >> 32) & 0xff; + tf->hob_lbal = (block >> 24) & 0xff; + } else { + /* use LBA28 */ + + /* The request -may- be too large for LBA28. */ + if ((block >> 28) || (n_block > 256)) + goto out_of_range; + + tf->device |= (block >> 24) & 0xf; } - tf->nsect = scsicmd[8]; - tf->lbal = scsicmd[5]; - tf->lbam = scsicmd[4]; - tf->lbah = scsicmd[3]; + ata_rwcmd_protocol(qc); - VPRINTK("ten-byte command\n"); - if (qc->nsect == 0) /* we don't support length==0 cmds */ - return 1; - return 0; + qc->nsect = n_block; + tf->nsect = n_block & 0xff; + + tf->lbah = (block >> 16) & 0xff; + tf->lbam = (block >> 8) & 0xff; + tf->lbal = block & 0xff; + + tf->device |= ATA_LBA; + } else { + /* CHS */ + u32 sect, head, cyl, track; + + /* The request -may- be too large for CHS addressing. */ + if ((block >> 28) || (n_block > 256)) + goto out_of_range; + + ata_rwcmd_protocol(qc); + + /* Convert LBA to CHS */ + track = (u32)block / dev->sectors; + cyl = track / dev->heads; + head = track % dev->heads; + sect = (u32)block % dev->sectors + 1; + + DPRINTK("block %u track %u cyl %u head %u sect %u\n", + (u32)block, track, cyl, head, sect); + + /* Check whether the converted CHS can fit. + Cylinder: 0-65535 + Head: 0-15 + Sector: 1-255*/ + if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) + goto out_of_range; + + qc->nsect = n_block; + tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ + tf->lbal = sect; + tf->lbam = cyl; + tf->lbah = cyl >> 8; + tf->device |= head; } - if (scsicmd[0] == READ_6 || scsicmd[0] == WRITE_6) { - qc->nsect = tf->nsect = scsicmd[4]; - if (!qc->nsect) { - qc->nsect = 256; - if (lba48) - tf->hob_nsect = 1; - } + return 0; - tf->lbal = scsicmd[3]; - tf->lbam = scsicmd[2]; - tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */ +invalid_fld: + ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0); + /* "Invalid field in cbd" */ + return 1; - VPRINTK("six-byte command\n"); - return 0; - } +out_of_range: + ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0); + /* "Logical Block Address out of range" */ + return 1; - if (scsicmd[0] == READ_16 || scsicmd[0] == WRITE_16) { - /* rule out impossible LBAs and sector counts */ - if (scsicmd[2] || scsicmd[3] || scsicmd[10] || scsicmd[11]) - return 1; - - if (lba48) { - tf->hob_nsect = scsicmd[12]; - tf->hob_lbal = scsicmd[6]; - tf->hob_lbam = scsicmd[5]; - tf->hob_lbah = scsicmd[4]; - - qc->nsect = ((unsigned int)scsicmd[12] << 8) | - scsicmd[13]; - } else { - /* once again, filter out impossible non-zero values */ - if (scsicmd[4] || scsicmd[5] || scsicmd[12] || - (scsicmd[6] & 0xf0)) - return 1; - - /* stores LBA27:24 in lower 4 bits of device reg */ - tf->device |= scsicmd[6]; - - qc->nsect = scsicmd[13]; - } - - tf->nsect = scsicmd[13]; - tf->lbal = scsicmd[9]; - tf->lbam = scsicmd[8]; - tf->lbah = scsicmd[7]; - - VPRINTK("sixteen-byte command\n"); - if (qc->nsect == 0) /* we don't support length==0 cmds */ - return 1; - return 0; - } - - DPRINTK("no-byte command\n"); +nothing_to_do: + qc->scsicmd->result = SAM_STAT_GOOD; return 1; } @@ -731,6 +897,12 @@ static int ata_scsi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) * This function sets up an ata_queued_cmd structure for the * SCSI command, and sends that ata_queued_cmd to the hardware. * + * The xlat_func argument (actor) returns 0 if ready to execute + * ATA command, else 1 to finish translation. If 1 is returned + * then cmd->result (and possibly cmd->sense_buffer) are assumed + * to be set reflecting an error condition or clean (early) + * termination. + * * LOCKING: * spin_lock_irqsave(host_set lock) */ @@ -747,7 +919,7 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev, qc = ata_scsi_qc_new(ap, dev, cmd, done); if (!qc) - return; + goto err_mem; /* data is present; dma-map it */ if (cmd->sc_data_direction == DMA_FROM_DEVICE || @@ -755,7 +927,7 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev, if (unlikely(cmd->request_bufflen < 1)) { printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n", ap->id, dev->devno); - goto err_out; + goto err_did; } if (cmd->use_sg) @@ -770,19 +942,28 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev, qc->complete_fn = ata_scsi_qc_complete; if (xlat_func(qc, scsicmd)) - goto err_out; + goto early_finish; /* select device, send command to hardware */ if (ata_qc_issue(qc)) - goto err_out; + goto err_did; VPRINTK("EXIT\n"); return; -err_out: +early_finish: + ata_qc_free(qc); + done(cmd); + DPRINTK("EXIT - early finish (good or error)\n"); + return; + +err_did: ata_qc_free(qc); - ata_bad_cdb(cmd, done); - DPRINTK("EXIT - badcmd\n"); +err_mem: + cmd->result = (DID_ERROR << 16); + done(cmd); + DPRINTK("EXIT - internal\n"); + return; } /** @@ -849,7 +1030,8 @@ static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf) * Mapping the response buffer, calling the command's handler, * and handling the handler's return value. This return value * indicates whether the handler wishes the SCSI command to be - * completed successfully, or not. + * completed successfully (0), or not (in which case cmd->result + * and sense buffer are assumed to be set). * * LOCKING: * spin_lock_irqsave(host_set lock) @@ -868,12 +1050,9 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args, rc = actor(args, rbuf, buflen); ata_scsi_rbuf_put(cmd, rbuf); - if (rc) - ata_bad_cdb(cmd, args->done); - else { + if (rc == 0) cmd->result = SAM_STAT_GOOD; - args->done(cmd); - } + args->done(cmd); } /** @@ -1179,8 +1358,16 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, * in the same manner) */ page_control = scsicmd[2] >> 6; - if ((page_control != 0) && (page_control != 3)) - return 1; + switch (page_control) { + case 0: /* current */ + break; /* supported */ + case 3: /* saved */ + goto saving_not_supp; + case 1: /* changeable */ + case 2: /* defaults */ + default: + goto invalid_fld; + } if (six_byte) output_len = 4; @@ -1211,7 +1398,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, break; default: /* invalid page code */ - return 1; + goto invalid_fld; } if (six_byte) { @@ -1224,6 +1411,16 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, } return 0; + +invalid_fld: + ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0); + /* "Invalid field in cbd" */ + return 1; + +saving_not_supp: + ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0); + /* "Saving parameters not supported" */ + return 1; } /** @@ -1246,10 +1443,20 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, VPRINTK("ENTER\n"); - if (ata_id_has_lba48(args->id)) - n_sectors = ata_id_u64(args->id, 100); - else - n_sectors = ata_id_u32(args->id, 60); + if (ata_id_has_lba(args->id)) { + if (ata_id_has_lba48(args->id)) + n_sectors = ata_id_u64(args->id, 100); + else + n_sectors = ata_id_u32(args->id, 60); + } else { + /* CHS default translation */ + n_sectors = args->id[1] * args->id[3] * args->id[6]; + + if (ata_id_current_chs_valid(args->id)) + /* CHS current translation */ + n_sectors = ata_id_u32(args->id, 57); + } + n_sectors--; /* ATA TotalUserSectors - 1 */ if (args->cmd->cmnd[0] == READ_CAPACITY) { @@ -1312,6 +1519,34 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, return 0; } +/** + * ata_scsi_set_sense - Set SCSI sense data and status + * @cmd: SCSI request to be handled + * @sk: SCSI-defined sense key + * @asc: SCSI-defined additional sense code + * @ascq: SCSI-defined additional sense code qualifier + * + * Helper function that builds a valid fixed format, current + * response code and the given sense key (sk), additional sense + * code (asc) and additional sense code qualifier (ascq) with + * a SCSI command status of %SAM_STAT_CHECK_CONDITION and + * DRIVER_SENSE set in the upper bits of scsi_cmnd::result . + * + * LOCKING: + * Not required + */ + +void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) +{ + cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; + + cmd->sense_buffer[0] = 0x70; /* fixed format, current */ + cmd->sense_buffer[2] = sk; + cmd->sense_buffer[7] = 18 - 8; /* additional sense length */ + cmd->sense_buffer[12] = asc; + cmd->sense_buffer[13] = ascq; +} + /** * ata_scsi_badcmd - End a SCSI request with an error * @cmd: SCSI request to be handled @@ -1330,30 +1565,84 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq) { DPRINTK("ENTER\n"); - cmd->result = SAM_STAT_CHECK_CONDITION; - - cmd->sense_buffer[0] = 0x70; - cmd->sense_buffer[2] = ILLEGAL_REQUEST; - cmd->sense_buffer[7] = 14 - 8; /* addnl. sense len. FIXME: correct? */ - cmd->sense_buffer[12] = asc; - cmd->sense_buffer[13] = ascq; + ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq); done(cmd); } +void atapi_request_sense(struct ata_port *ap, struct ata_device *dev, + struct scsi_cmnd *cmd) +{ + DECLARE_COMPLETION(wait); + struct ata_queued_cmd *qc; + unsigned long flags; + int rc; + + DPRINTK("ATAPI request sense\n"); + + qc = ata_qc_new_init(ap, dev); + BUG_ON(qc == NULL); + + /* FIXME: is this needed? */ + memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); + + ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer)); + qc->dma_dir = DMA_FROM_DEVICE; + + memset(&qc->cdb, 0, ap->cdb_len); + qc->cdb[0] = REQUEST_SENSE; + qc->cdb[4] = SCSI_SENSE_BUFFERSIZE; + + qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + qc->tf.command = ATA_CMD_PACKET; + + qc->tf.protocol = ATA_PROT_ATAPI; + qc->tf.lbam = (8 * 1024) & 0xff; + qc->tf.lbah = (8 * 1024) >> 8; + qc->nbytes = SCSI_SENSE_BUFFERSIZE; + + qc->waiting = &wait; + qc->complete_fn = ata_qc_complete_noop; + + spin_lock_irqsave(&ap->host_set->lock, flags); + rc = ata_qc_issue(qc); + spin_unlock_irqrestore(&ap->host_set->lock, flags); + + if (rc) + ata_port_disable(ap); + else + wait_for_completion(&wait); + + DPRINTK("EXIT\n"); +} + static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) { struct scsi_cmnd *cmd = qc->scsicmd; - if (unlikely(drv_stat & (ATA_ERR | ATA_BUSY | ATA_DRQ))) { + VPRINTK("ENTER, drv_stat == 0x%x\n", drv_stat); + + if (unlikely(drv_stat & (ATA_BUSY | ATA_DRQ))) + ata_to_sense_error(qc, drv_stat); + + else if (unlikely(drv_stat & ATA_ERR)) { DPRINTK("request check condition\n"); + /* FIXME: command completion with check condition + * but no sense causes the error handler to run, + * which then issues REQUEST SENSE, fills in the sense + * buffer, and completes the command (for the second + * time). We need to issue REQUEST SENSE some other + * way, to avoid completing the command twice. + */ cmd->result = SAM_STAT_CHECK_CONDITION; qc->scsidone(cmd); return 1; - } else { + } + + else { u8 *scsicmd = cmd->cmnd; if (scsicmd[0] == INQUIRY) { @@ -1361,15 +1650,30 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) unsigned int buflen; buflen = ata_scsi_rbuf_get(cmd, &buf); - buf[2] = 0x5; - buf[3] = (buf[3] & 0xf0) | 2; + + /* ATAPI devices typically report zero for their SCSI version, + * and sometimes deviate from the spec WRT response data + * format. If SCSI version is reported as zero like normal, + * then we make the following fixups: 1) Fake MMC-5 version, + * to indicate to the Linux scsi midlayer this is a modern + * device. 2) Ensure response data format / ATAPI information + * are always correct. + */ + /* FIXME: do we ever override EVPD pages and the like, with + * this code? + */ + if (buf[2] == 0) { + buf[2] = 0x5; + buf[3] = 0x32; + } + ata_scsi_rbuf_put(cmd, buf); } + cmd->result = SAM_STAT_GOOD; } qc->scsidone(cmd); - return 0; } /** @@ -1384,7 +1688,7 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat) * Zero on success, non-zero on failure. */ -static unsigned int atapi_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) +static unsigned int atapi_xlat(struct ata_queued_cmd *qc, const u8 *scsicmd) { struct scsi_cmnd *cmd = qc->scsicmd; struct ata_device *dev = qc->dev; @@ -1453,7 +1757,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc, u8 *scsicmd) */ static struct ata_device * -ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev) +ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev) { struct ata_device *dev; @@ -1610,7 +1914,7 @@ void ata_scsi_simulate(u16 *id, void (*done)(struct scsi_cmnd *)) { struct ata_scsi_args args; - u8 *scsicmd = cmd->cmnd; + const u8 *scsicmd = cmd->cmnd; args.id = id; args.cmd = cmd; @@ -1630,7 +1934,7 @@ void ata_scsi_simulate(u16 *id, case INQUIRY: if (scsicmd[1] & 2) /* is CmdDt set? */ - ata_bad_cdb(cmd, done); + ata_scsi_invalid_field(cmd, done); else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); else if (scsicmd[2] == 0x00) @@ -1640,7 +1944,7 @@ void ata_scsi_simulate(u16 *id, else if (scsicmd[2] == 0x83) ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); else - ata_bad_cdb(cmd, done); + ata_scsi_invalid_field(cmd, done); break; case MODE_SENSE: @@ -1650,7 +1954,7 @@ void ata_scsi_simulate(u16 *id, case MODE_SELECT: /* unconditionally return */ case MODE_SELECT_10: /* bad-field-in-cdb */ - ata_bad_cdb(cmd, done); + ata_scsi_invalid_field(cmd, done); break; case READ_CAPACITY: @@ -1661,7 +1965,7 @@ void ata_scsi_simulate(u16 *id, if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); else - ata_bad_cdb(cmd, done); + ata_scsi_invalid_field(cmd, done); break; case REPORT_LUNS: @@ -1673,8 +1977,26 @@ void ata_scsi_simulate(u16 *id, /* all other commands */ default: - ata_bad_scsiop(cmd, done); + ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0); + /* "Invalid command operation code" */ + done(cmd); break; } } +void ata_scsi_scan_host(struct ata_port *ap) +{ + struct ata_device *dev; + unsigned int i; + + if (ap->flags & ATA_FLAG_PORT_DISABLED) + return; + + for (i = 0; i < ATA_MAX_DEVICES; i++) { + dev = &ap->device[i]; + + if (ata_dev_present(dev)) + scsi_scan_target(&ap->host->shost_gendev, 0, i, 0, 0); + } +} + diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h index d608b3a0f6fe..3d60190584ba 100644 --- a/drivers/scsi/libata.h +++ b/drivers/scsi/libata.h @@ -39,18 +39,23 @@ struct ata_scsi_args { /* libata-core.c */ extern int atapi_enabled; +extern int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat); extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, struct ata_device *dev); +extern void ata_rwcmd_protocol(struct ata_queued_cmd *qc); extern void ata_qc_free(struct ata_queued_cmd *qc); extern int ata_qc_issue(struct ata_queued_cmd *qc); extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); extern void ata_dev_select(struct ata_port *ap, unsigned int device, unsigned int wait, unsigned int can_sleep); -extern void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf); +extern void ata_tf_to_host_nolock(struct ata_port *ap, const struct ata_taskfile *tf); extern void swap_buf_le16(u16 *buf, unsigned int buf_words); /* libata-scsi.c */ +extern void atapi_request_sense(struct ata_port *ap, struct ata_device *dev, + struct scsi_cmnd *cmd); +extern void ata_scsi_scan_host(struct ata_port *ap); extern void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat); extern int ata_scsi_error(struct Scsi_Host *host); extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, @@ -76,18 +81,10 @@ extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, extern void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq); +extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, + u8 sk, u8 asc, u8 ascq); extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, unsigned int (*actor) (struct ata_scsi_args *args, u8 *rbuf, unsigned int buflen)); -static inline void ata_bad_scsiop(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) -{ - ata_scsi_badcmd(cmd, done, 0x20, 0x00); -} - -static inline void ata_bad_cdb(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) -{ - ata_scsi_badcmd(cmd, done, 0x24, 0x00); -} - #endif /* __LIBATA_H__ */ diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 86eaf6d408d5..acae7c48ef7d 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -973,10 +973,10 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost) if ((phba->fc_flag & FC_FABRIC) || ((phba->fc_topology == TOPOLOGY_LOOP) && (phba->fc_flag & FC_PUBLIC_LOOP))) - node_name = wwn_to_u64(phba->fc_fabparam.nodeName.wwn); + node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn); else /* fabric is local port if there is no F/FL_Port */ - node_name = wwn_to_u64(phba->fc_nodename.wwn); + node_name = wwn_to_u64(phba->fc_nodename.u.wwn); spin_unlock_irq(shost->host_lock); @@ -1110,7 +1110,7 @@ lpfc_get_starget_node_name(struct scsi_target *starget) /* Search the mapped list for this target ID */ list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { if (starget->id == ndlp->nlp_sid) { - node_name = wwn_to_u64(ndlp->nlp_nodename.wwn); + node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); break; } } @@ -1131,7 +1131,7 @@ lpfc_get_starget_port_name(struct scsi_target *starget) /* Search the mapped list for this target ID */ list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { if (starget->id == ndlp->nlp_sid) { - port_name = wwn_to_u64(ndlp->nlp_portname.wwn); + port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); break; } } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 4fb8eb0c84cf..56052f4510c3 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1019,8 +1019,8 @@ lpfc_register_remote_port(struct lpfc_hba * phba, struct fc_rport_identifiers rport_ids; /* Remote port has reappeared. Re-register w/ FC transport */ - rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.wwn); - rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.wwn); + rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); + rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); rport_ids.port_id = ndlp->nlp_DID; rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; if (ndlp->nlp_type & NLP_FCP_TARGET) diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 047a87c26cc0..86c41981188b 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -280,9 +280,9 @@ struct lpfc_name { #define NAME_CCITT_GR_TYPE 0xE uint8_t IEEEextLsb; /* FC Word 0, bit 16:23, IEEE extended Lsb */ uint8_t IEEE[6]; /* FC IEEE address */ - }; + } s; uint8_t wwn[8]; - }; + } u; }; struct csp { diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 454058f655db..0856ff7d3b33 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -285,7 +285,7 @@ lpfc_config_port_post(struct lpfc_hba * phba) if (phba->SerialNumber[0] == 0) { uint8_t *outptr; - outptr = (uint8_t *) & phba->fc_nodename.IEEE[0]; + outptr = &phba->fc_nodename.u.s.IEEE[0]; for (i = 0; i < 12; i++) { status = *outptr++; j = ((status & 0xf0) >> 4); @@ -1523,8 +1523,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) * Must done after lpfc_sli_hba_setup() */ - fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.wwn); - fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.wwn); + fc_host_node_name(host) = wwn_to_u64(phba->fc_nodename.u.wwn); + fc_host_port_name(host) = wwn_to_u64(phba->fc_portname.u.wwn); fc_host_supported_classes(host) = FC_COS_CLASS3; memset(fc_host_supported_fc4s(host), 0, diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 0aba13ceaacf..352df47bcaca 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -39,7 +39,7 @@ #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ static void * -lpfc_pool_kmalloc(unsigned int gfp_flags, void *data) +lpfc_pool_kmalloc(gfp_t gfp_flags, void *data) { return kmalloc((unsigned long)data, gfp_flags); } diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 6f308ebe3e79..61a6fd810bb4 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -621,8 +621,6 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy) if(islogical) { switch (cmd->cmnd[0]) { case TEST_UNIT_READY: - memset(cmd->request_buffer, 0, cmd->request_bufflen); - #if MEGA_HAVE_CLUSTERING /* * Do we support clustering and is the support enabled @@ -652,11 +650,28 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy) return NULL; #endif - case MODE_SENSE: + case MODE_SENSE: { + char *buf; + + if (cmd->use_sg) { + struct scatterlist *sg; + + sg = (struct scatterlist *)cmd->request_buffer; + buf = kmap_atomic(sg->page, KM_IRQ0) + + sg->offset; + } else + buf = cmd->request_buffer; memset(cmd->request_buffer, 0, cmd->cmnd[4]); + if (cmd->use_sg) { + struct scatterlist *sg; + + sg = (struct scatterlist *)cmd->request_buffer; + kunmap_atomic(buf - sg->offset, KM_IRQ0); + } cmd->result = (DID_OK << 16); cmd->scsi_done(cmd); return NULL; + } case READ_CAPACITY: case INQUIRY: @@ -1685,14 +1700,23 @@ mega_rundoneq (adapter_t *adapter) static void mega_free_scb(adapter_t *adapter, scb_t *scb) { + unsigned long length; + switch( scb->dma_type ) { case MEGA_DMA_TYPE_NONE: break; case MEGA_BULK_DATA: + if (scb->cmd->use_sg == 0) + length = scb->cmd->request_bufflen; + else { + struct scatterlist *sgl = + (struct scatterlist *)scb->cmd->request_buffer; + length = sgl->length; + } pci_unmap_page(adapter->dev, scb->dma_h_bulkdata, - scb->cmd->request_bufflen, scb->dma_direction); + length, scb->dma_direction); break; case MEGA_SGLIST: @@ -1741,6 +1765,7 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len) struct scatterlist *sgl; struct page *page; unsigned long offset; + unsigned int length; Scsi_Cmnd *cmd; int sgcnt; int idx; @@ -1748,14 +1773,23 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len) cmd = scb->cmd; /* Scatter-gather not used */ - if( !cmd->use_sg ) { + if( cmd->use_sg == 0 || (cmd->use_sg == 1 && + !adapter->has_64bit_addr)) { - page = virt_to_page(cmd->request_buffer); - offset = offset_in_page(cmd->request_buffer); + if (cmd->use_sg == 0) { + page = virt_to_page(cmd->request_buffer); + offset = offset_in_page(cmd->request_buffer); + length = cmd->request_bufflen; + } else { + sgl = (struct scatterlist *)cmd->request_buffer; + page = sgl->page; + offset = sgl->offset; + length = sgl->length; + } scb->dma_h_bulkdata = pci_map_page(adapter->dev, page, offset, - cmd->request_bufflen, + length, scb->dma_direction); scb->dma_type = MEGA_BULK_DATA; @@ -1765,14 +1799,14 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len) */ if( adapter->has_64bit_addr ) { scb->sgl64[0].address = scb->dma_h_bulkdata; - scb->sgl64[0].length = cmd->request_bufflen; + scb->sgl64[0].length = length; *buf = (u32)scb->sgl_dma_addr; - *len = (u32)cmd->request_bufflen; + *len = (u32)length; return 1; } else { *buf = (u32)scb->dma_h_bulkdata; - *len = (u32)cmd->request_bufflen; + *len = (u32)length; } return 0; } @@ -1791,27 +1825,23 @@ mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len) if( sgcnt > adapter->sglen ) BUG(); + *len = 0; + for( idx = 0; idx < sgcnt; idx++, sgl++ ) { if( adapter->has_64bit_addr ) { scb->sgl64[idx].address = sg_dma_address(sgl); - scb->sgl64[idx].length = sg_dma_len(sgl); + *len += scb->sgl64[idx].length = sg_dma_len(sgl); } else { scb->sgl[idx].address = sg_dma_address(sgl); - scb->sgl[idx].length = sg_dma_len(sgl); + *len += scb->sgl[idx].length = sg_dma_len(sgl); } } /* Reset pointer and length fields */ *buf = scb->sgl_dma_addr; - /* - * For passthru command, dataxferlen must be set, even for commands - * with a sg list - */ - *len = (u32)cmd->request_bufflen; - /* Return count of SG requests */ return sgcnt; } diff --git a/drivers/scsi/megaraid/Kconfig.megaraid b/drivers/scsi/megaraid/Kconfig.megaraid index 917d591d90b2..7363e12663ac 100644 --- a/drivers/scsi/megaraid/Kconfig.megaraid +++ b/drivers/scsi/megaraid/Kconfig.megaraid @@ -76,3 +76,12 @@ config MEGARAID_LEGACY To compile this driver as a module, choose M here: the module will be called megaraid endif + +config MEGARAID_SAS + tristate "LSI Logic MegaRAID SAS RAID Module" + depends on PCI && SCSI + help + Module for LSI Logic's SAS based RAID controllers. + To compile this driver as a module, choose 'm' here. + Module will be called megaraid_sas + diff --git a/drivers/scsi/megaraid/Makefile b/drivers/scsi/megaraid/Makefile index 6dd99f275722..f469915b97c3 100644 --- a/drivers/scsi/megaraid/Makefile +++ b/drivers/scsi/megaraid/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o +obj-$(CONFIG_MEGARAID_SAS) += megaraid_sas.o diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c new file mode 100644 index 000000000000..c3f637395734 --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -0,0 +1,2806 @@ +/* + * + * Linux MegaRAID driver for SAS based RAID controllers + * + * Copyright (c) 2003-2005 LSI Logic Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * FILE : megaraid_sas.c + * Version : v00.00.02.00-rc4 + * + * Authors: + * Sreenivas Bagalkote + * Sumant Patro + * + * List of supported controllers + * + * OEM Product Name VID DID SSVID SSID + * --- ------------ --- --- ---- ---- + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include "megaraid_sas.h" + +MODULE_LICENSE("GPL"); +MODULE_VERSION(MEGASAS_VERSION); +MODULE_AUTHOR("sreenivas.bagalkote@lsil.com"); +MODULE_DESCRIPTION("LSI Logic MegaRAID SAS Driver"); + +/* + * PCI ID table for all supported controllers + */ +static struct pci_device_id megasas_pci_table[] = { + + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_LSI_SAS1064R, + PCI_ANY_ID, + PCI_ANY_ID, + }, + { + PCI_VENDOR_ID_DELL, + PCI_DEVICE_ID_DELL_PERC5, + PCI_ANY_ID, + PCI_ANY_ID, + }, + {0} /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(pci, megasas_pci_table); + +static int megasas_mgmt_majorno; +static struct megasas_mgmt_info megasas_mgmt_info; +static struct fasync_struct *megasas_async_queue; +static DECLARE_MUTEX(megasas_async_queue_mutex); + +/** + * megasas_get_cmd - Get a command from the free pool + * @instance: Adapter soft state + * + * Returns a free command from the pool + */ +static inline struct megasas_cmd *megasas_get_cmd(struct megasas_instance + *instance) +{ + unsigned long flags; + struct megasas_cmd *cmd = NULL; + + spin_lock_irqsave(&instance->cmd_pool_lock, flags); + + if (!list_empty(&instance->cmd_pool)) { + cmd = list_entry((&instance->cmd_pool)->next, + struct megasas_cmd, list); + list_del_init(&cmd->list); + } else { + printk(KERN_ERR "megasas: Command pool empty!\n"); + } + + spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); + return cmd; +} + +/** + * megasas_return_cmd - Return a cmd to free command pool + * @instance: Adapter soft state + * @cmd: Command packet to be returned to free command pool + */ +static inline void +megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) +{ + unsigned long flags; + + spin_lock_irqsave(&instance->cmd_pool_lock, flags); + + cmd->scmd = NULL; + list_add_tail(&cmd->list, &instance->cmd_pool); + + spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); +} + +/** + * megasas_enable_intr - Enables interrupts + * @regs: MFI register set + */ +static inline void +megasas_enable_intr(struct megasas_register_set __iomem * regs) +{ + writel(1, &(regs)->outbound_intr_mask); + + /* Dummy readl to force pci flush */ + readl(®s->outbound_intr_mask); +} + +/** + * megasas_disable_intr - Disables interrupts + * @regs: MFI register set + */ +static inline void +megasas_disable_intr(struct megasas_register_set __iomem * regs) +{ + u32 mask = readl(®s->outbound_intr_mask) & (~0x00000001); + writel(mask, ®s->outbound_intr_mask); + + /* Dummy readl to force pci flush */ + readl(®s->outbound_intr_mask); +} + +/** + * megasas_issue_polled - Issues a polling command + * @instance: Adapter soft state + * @cmd: Command packet to be issued + * + * For polling, MFI requires the cmd_status to be set to 0xFF before posting. + */ +static int +megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) +{ + int i; + u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000; + + struct megasas_header *frame_hdr = &cmd->frame->hdr; + + frame_hdr->cmd_status = 0xFF; + frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; + + /* + * Issue the frame using inbound queue port + */ + writel(cmd->frame_phys_addr >> 3, + &instance->reg_set->inbound_queue_port); + + /* + * Wait for cmd_status to change + */ + for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i++) { + rmb(); + msleep(1); + } + + if (frame_hdr->cmd_status == 0xff) + return -ETIME; + + return 0; +} + +/** + * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds + * @instance: Adapter soft state + * @cmd: Command to be issued + * + * This function waits on an event for the command to be returned from ISR. + * Used to issue ioctl commands. + */ +static int +megasas_issue_blocked_cmd(struct megasas_instance *instance, + struct megasas_cmd *cmd) +{ + cmd->cmd_status = ENODATA; + + writel(cmd->frame_phys_addr >> 3, + &instance->reg_set->inbound_queue_port); + + wait_event(instance->int_cmd_wait_q, (cmd->cmd_status != ENODATA)); + + return 0; +} + +/** + * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd + * @instance: Adapter soft state + * @cmd_to_abort: Previously issued cmd to be aborted + * + * MFI firmware can abort previously issued AEN comamnd (automatic event + * notification). The megasas_issue_blocked_abort_cmd() issues such abort + * cmd and blocks till it is completed. + */ +static int +megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, + struct megasas_cmd *cmd_to_abort) +{ + struct megasas_cmd *cmd; + struct megasas_abort_frame *abort_fr; + + cmd = megasas_get_cmd(instance); + + if (!cmd) + return -1; + + abort_fr = &cmd->frame->abort; + + /* + * Prepare and issue the abort frame + */ + abort_fr->cmd = MFI_CMD_ABORT; + abort_fr->cmd_status = 0xFF; + abort_fr->flags = 0; + abort_fr->abort_context = cmd_to_abort->index; + abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; + abort_fr->abort_mfi_phys_addr_hi = 0; + + cmd->sync_cmd = 1; + cmd->cmd_status = 0xFF; + + writel(cmd->frame_phys_addr >> 3, + &instance->reg_set->inbound_queue_port); + + /* + * Wait for this cmd to complete + */ + wait_event(instance->abort_cmd_wait_q, (cmd->cmd_status != 0xFF)); + + megasas_return_cmd(instance, cmd); + return 0; +} + +/** + * megasas_make_sgl32 - Prepares 32-bit SGL + * @instance: Adapter soft state + * @scp: SCSI command from the mid-layer + * @mfi_sgl: SGL to be filled in + * + * If successful, this function returns the number of SG elements. Otherwise, + * it returnes -1. + */ +static inline int +megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, + union megasas_sgl *mfi_sgl) +{ + int i; + int sge_count; + struct scatterlist *os_sgl; + + /* + * Return 0 if there is no data transfer + */ + if (!scp->request_buffer || !scp->request_bufflen) + return 0; + + if (!scp->use_sg) { + mfi_sgl->sge32[0].phys_addr = pci_map_single(instance->pdev, + scp-> + request_buffer, + scp-> + request_bufflen, + scp-> + sc_data_direction); + mfi_sgl->sge32[0].length = scp->request_bufflen; + + return 1; + } + + os_sgl = (struct scatterlist *)scp->request_buffer; + sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg, + scp->sc_data_direction); + + for (i = 0; i < sge_count; i++, os_sgl++) { + mfi_sgl->sge32[i].length = sg_dma_len(os_sgl); + mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl); + } + + return sge_count; +} + +/** + * megasas_make_sgl64 - Prepares 64-bit SGL + * @instance: Adapter soft state + * @scp: SCSI command from the mid-layer + * @mfi_sgl: SGL to be filled in + * + * If successful, this function returns the number of SG elements. Otherwise, + * it returnes -1. + */ +static inline int +megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, + union megasas_sgl *mfi_sgl) +{ + int i; + int sge_count; + struct scatterlist *os_sgl; + + /* + * Return 0 if there is no data transfer + */ + if (!scp->request_buffer || !scp->request_bufflen) + return 0; + + if (!scp->use_sg) { + mfi_sgl->sge64[0].phys_addr = pci_map_single(instance->pdev, + scp-> + request_buffer, + scp-> + request_bufflen, + scp-> + sc_data_direction); + + mfi_sgl->sge64[0].length = scp->request_bufflen; + + return 1; + } + + os_sgl = (struct scatterlist *)scp->request_buffer; + sge_count = pci_map_sg(instance->pdev, os_sgl, scp->use_sg, + scp->sc_data_direction); + + for (i = 0; i < sge_count; i++, os_sgl++) { + mfi_sgl->sge64[i].length = sg_dma_len(os_sgl); + mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl); + } + + return sge_count; +} + +/** + * megasas_build_dcdb - Prepares a direct cdb (DCDB) command + * @instance: Adapter soft state + * @scp: SCSI command + * @cmd: Command to be prepared in + * + * This function prepares CDB commands. These are typcially pass-through + * commands to the devices. + */ +static inline int +megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, + struct megasas_cmd *cmd) +{ + u32 sge_sz; + int sge_bytes; + u32 is_logical; + u32 device_id; + u16 flags = 0; + struct megasas_pthru_frame *pthru; + + is_logical = MEGASAS_IS_LOGICAL(scp); + device_id = MEGASAS_DEV_INDEX(instance, scp); + pthru = (struct megasas_pthru_frame *)cmd->frame; + + if (scp->sc_data_direction == PCI_DMA_TODEVICE) + flags = MFI_FRAME_DIR_WRITE; + else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) + flags = MFI_FRAME_DIR_READ; + else if (scp->sc_data_direction == PCI_DMA_NONE) + flags = MFI_FRAME_DIR_NONE; + + /* + * Prepare the DCDB frame + */ + pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO; + pthru->cmd_status = 0x0; + pthru->scsi_status = 0x0; + pthru->target_id = device_id; + pthru->lun = scp->device->lun; + pthru->cdb_len = scp->cmd_len; + pthru->timeout = 0; + pthru->flags = flags; + pthru->data_xfer_len = scp->request_bufflen; + + memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); + + /* + * Construct SGL + */ + sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : + sizeof(struct megasas_sge32); + + if (IS_DMA64) { + pthru->flags |= MFI_FRAME_SGL64; + pthru->sge_count = megasas_make_sgl64(instance, scp, + &pthru->sgl); + } else + pthru->sge_count = megasas_make_sgl32(instance, scp, + &pthru->sgl); + + /* + * Sense info specific + */ + pthru->sense_len = SCSI_SENSE_BUFFERSIZE; + pthru->sense_buf_phys_addr_hi = 0; + pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; + + sge_bytes = sge_sz * pthru->sge_count; + + /* + * Compute the total number of frames this command consumes. FW uses + * this number to pull sufficient number of frames from host memory. + */ + cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + + ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1; + + if (cmd->frame_count > 7) + cmd->frame_count = 8; + + return cmd->frame_count; +} + +/** + * megasas_build_ldio - Prepares IOs to logical devices + * @instance: Adapter soft state + * @scp: SCSI command + * @cmd: Command to to be prepared + * + * Frames (and accompanying SGLs) for regular SCSI IOs use this function. + */ +static inline int +megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, + struct megasas_cmd *cmd) +{ + u32 sge_sz; + int sge_bytes; + u32 device_id; + u8 sc = scp->cmnd[0]; + u16 flags = 0; + struct megasas_io_frame *ldio; + + device_id = MEGASAS_DEV_INDEX(instance, scp); + ldio = (struct megasas_io_frame *)cmd->frame; + + if (scp->sc_data_direction == PCI_DMA_TODEVICE) + flags = MFI_FRAME_DIR_WRITE; + else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) + flags = MFI_FRAME_DIR_READ; + + /* + * Preare the Logical IO frame: 2nd bit is zero for all read cmds + */ + ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ; + ldio->cmd_status = 0x0; + ldio->scsi_status = 0x0; + ldio->target_id = device_id; + ldio->timeout = 0; + ldio->reserved_0 = 0; + ldio->pad_0 = 0; + ldio->flags = flags; + ldio->start_lba_hi = 0; + ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; + + /* + * 6-byte READ(0x08) or WRITE(0x0A) cdb + */ + if (scp->cmd_len == 6) { + ldio->lba_count = (u32) scp->cmnd[4]; + ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) | + ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; + + ldio->start_lba_lo &= 0x1FFFFF; + } + + /* + * 10-byte READ(0x28) or WRITE(0x2A) cdb + */ + else if (scp->cmd_len == 10) { + ldio->lba_count = (u32) scp->cmnd[8] | + ((u32) scp->cmnd[7] << 8); + ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | + ((u32) scp->cmnd[3] << 16) | + ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; + } + + /* + * 12-byte READ(0xA8) or WRITE(0xAA) cdb + */ + else if (scp->cmd_len == 12) { + ldio->lba_count = ((u32) scp->cmnd[6] << 24) | + ((u32) scp->cmnd[7] << 16) | + ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; + + ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | + ((u32) scp->cmnd[3] << 16) | + ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; + } + + /* + * 16-byte READ(0x88) or WRITE(0x8A) cdb + */ + else if (scp->cmd_len == 16) { + ldio->lba_count = ((u32) scp->cmnd[10] << 24) | + ((u32) scp->cmnd[11] << 16) | + ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; + + ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) | + ((u32) scp->cmnd[7] << 16) | + ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; + + ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) | + ((u32) scp->cmnd[3] << 16) | + ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; + + } + + /* + * Construct SGL + */ + sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : + sizeof(struct megasas_sge32); + + if (IS_DMA64) { + ldio->flags |= MFI_FRAME_SGL64; + ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); + } else + ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); + + /* + * Sense info specific + */ + ldio->sense_len = SCSI_SENSE_BUFFERSIZE; + ldio->sense_buf_phys_addr_hi = 0; + ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr; + + sge_bytes = sge_sz * ldio->sge_count; + + cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + + ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1; + + if (cmd->frame_count > 7) + cmd->frame_count = 8; + + return cmd->frame_count; +} + +/** + * megasas_build_cmd - Prepares a command packet + * @instance: Adapter soft state + * @scp: SCSI command + * @frame_count: [OUT] Number of frames used to prepare this command + */ +static inline struct megasas_cmd *megasas_build_cmd(struct megasas_instance + *instance, + struct scsi_cmnd *scp, + int *frame_count) +{ + u32 logical_cmd; + struct megasas_cmd *cmd; + + /* + * Find out if this is logical or physical drive command. + */ + logical_cmd = MEGASAS_IS_LOGICAL(scp); + + /* + * Logical drive command + */ + if (logical_cmd) { + + if (scp->device->id >= MEGASAS_MAX_LD) { + scp->result = DID_BAD_TARGET << 16; + return NULL; + } + + switch (scp->cmnd[0]) { + + case READ_10: + case WRITE_10: + case READ_12: + case WRITE_12: + case READ_6: + case WRITE_6: + case READ_16: + case WRITE_16: + /* + * Fail for LUN > 0 + */ + if (scp->device->lun) { + scp->result = DID_BAD_TARGET << 16; + return NULL; + } + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + scp->result = DID_IMM_RETRY << 16; + return NULL; + } + + *frame_count = megasas_build_ldio(instance, scp, cmd); + + if (!(*frame_count)) { + megasas_return_cmd(instance, cmd); + return NULL; + } + + return cmd; + + default: + /* + * Fail for LUN > 0 + */ + if (scp->device->lun) { + scp->result = DID_BAD_TARGET << 16; + return NULL; + } + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + scp->result = DID_IMM_RETRY << 16; + return NULL; + } + + *frame_count = megasas_build_dcdb(instance, scp, cmd); + + if (!(*frame_count)) { + megasas_return_cmd(instance, cmd); + return NULL; + } + + return cmd; + } + } else { + cmd = megasas_get_cmd(instance); + + if (!cmd) { + scp->result = DID_IMM_RETRY << 16; + return NULL; + } + + *frame_count = megasas_build_dcdb(instance, scp, cmd); + + if (!(*frame_count)) { + megasas_return_cmd(instance, cmd); + return NULL; + } + + return cmd; + } + + return NULL; +} + +/** + * megasas_queue_command - Queue entry point + * @scmd: SCSI command to be queued + * @done: Callback entry point + */ +static int +megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) +{ + u32 frame_count; + unsigned long flags; + struct megasas_cmd *cmd; + struct megasas_instance *instance; + + instance = (struct megasas_instance *) + scmd->device->host->hostdata; + scmd->scsi_done = done; + scmd->result = 0; + + cmd = megasas_build_cmd(instance, scmd, &frame_count); + + if (!cmd) { + done(scmd); + return 0; + } + + cmd->scmd = scmd; + scmd->SCp.ptr = (char *)cmd; + scmd->SCp.sent_command = jiffies; + + /* + * Issue the command to the FW + */ + spin_lock_irqsave(&instance->instance_lock, flags); + instance->fw_outstanding++; + spin_unlock_irqrestore(&instance->instance_lock, flags); + + writel(((cmd->frame_phys_addr >> 3) | (cmd->frame_count - 1)), + &instance->reg_set->inbound_queue_port); + + return 0; +} + +/** + * megasas_wait_for_outstanding - Wait for all outstanding cmds + * @instance: Adapter soft state + * + * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to + * complete all its outstanding commands. Returns error if one or more IOs + * are pending after this time period. It also marks the controller dead. + */ +static int megasas_wait_for_outstanding(struct megasas_instance *instance) +{ + int i; + u32 wait_time = MEGASAS_RESET_WAIT_TIME; + + for (i = 0; i < wait_time; i++) { + + if (!instance->fw_outstanding) + break; + + if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { + printk(KERN_NOTICE "megasas: [%2d]waiting for %d " + "commands to complete\n", i, + instance->fw_outstanding); + } + + msleep(1000); + } + + if (instance->fw_outstanding) { + instance->hw_crit_error = 1; + return FAILED; + } + + return SUCCESS; +} + +/** + * megasas_generic_reset - Generic reset routine + * @scmd: Mid-layer SCSI command + * + * This routine implements a generic reset handler for device, bus and host + * reset requests. Device, bus and host specific reset handlers can use this + * function after they do their specific tasks. + */ +static int megasas_generic_reset(struct scsi_cmnd *scmd) +{ + int ret_val; + struct megasas_instance *instance; + + instance = (struct megasas_instance *)scmd->device->host->hostdata; + + printk(KERN_NOTICE "megasas: RESET -%ld cmd=%x \n", + scmd->serial_number, scmd->cmnd[0], scmd->device->channel, + scmd->device->id, scmd->device->lun); + + if (instance->hw_crit_error) { + printk(KERN_ERR "megasas: cannot recover from previous reset " + "failures\n"); + return FAILED; + } + + spin_unlock(scmd->device->host->host_lock); + + ret_val = megasas_wait_for_outstanding(instance); + + if (ret_val == SUCCESS) + printk(KERN_NOTICE "megasas: reset successful \n"); + else + printk(KERN_ERR "megasas: failed to do reset\n"); + + spin_lock(scmd->device->host->host_lock); + + return ret_val; +} + +static enum scsi_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) +{ + unsigned long seconds; + + if (scmd->SCp.ptr) { + seconds = (jiffies - scmd->SCp.sent_command) / HZ; + + if (seconds < 90) { + return EH_RESET_TIMER; + } else { + return EH_NOT_HANDLED; + } + } + + return EH_HANDLED; +} + +/** + * megasas_reset_device - Device reset handler entry point + */ +static int megasas_reset_device(struct scsi_cmnd *scmd) +{ + int ret; + + /* + * First wait for all commands to complete + */ + ret = megasas_generic_reset(scmd); + + return ret; +} + +/** + * megasas_reset_bus_host - Bus & host reset handler entry point + */ +static int megasas_reset_bus_host(struct scsi_cmnd *scmd) +{ + int ret; + + /* + * Frist wait for all commands to complete + */ + ret = megasas_generic_reset(scmd); + + return ret; +} + +/** + * megasas_service_aen - Processes an event notification + * @instance: Adapter soft state + * @cmd: AEN command completed by the ISR + * + * For AEN, driver sends a command down to FW that is held by the FW till an + * event occurs. When an event of interest occurs, FW completes the command + * that it was previously holding. + * + * This routines sends SIGIO signal to processes that have registered with the + * driver for AEN. + */ +static void +megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) +{ + /* + * Don't signal app if it is just an aborted previously registered aen + */ + if (!cmd->abort_aen) + kill_fasync(&megasas_async_queue, SIGIO, POLL_IN); + else + cmd->abort_aen = 0; + + instance->aen_cmd = NULL; + megasas_return_cmd(instance, cmd); +} + +/* + * Scsi host template for megaraid_sas driver + */ +static struct scsi_host_template megasas_template = { + + .module = THIS_MODULE, + .name = "LSI Logic SAS based MegaRAID driver", + .proc_name = "megaraid_sas", + .queuecommand = megasas_queue_command, + .eh_device_reset_handler = megasas_reset_device, + .eh_bus_reset_handler = megasas_reset_bus_host, + .eh_host_reset_handler = megasas_reset_bus_host, + .eh_timed_out = megasas_reset_timer, + .use_clustering = ENABLE_CLUSTERING, +}; + +/** + * megasas_complete_int_cmd - Completes an internal command + * @instance: Adapter soft state + * @cmd: Command to be completed + * + * The megasas_issue_blocked_cmd() function waits for a command to complete + * after it issues a command. This function wakes up that waiting routine by + * calling wake_up() on the wait queue. + */ +static void +megasas_complete_int_cmd(struct megasas_instance *instance, + struct megasas_cmd *cmd) +{ + cmd->cmd_status = cmd->frame->io.cmd_status; + + if (cmd->cmd_status == ENODATA) { + cmd->cmd_status = 0; + } + wake_up(&instance->int_cmd_wait_q); +} + +/** + * megasas_complete_abort - Completes aborting a command + * @instance: Adapter soft state + * @cmd: Cmd that was issued to abort another cmd + * + * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q + * after it issues an abort on a previously issued command. This function + * wakes up all functions waiting on the same wait queue. + */ +static void +megasas_complete_abort(struct megasas_instance *instance, + struct megasas_cmd *cmd) +{ + if (cmd->sync_cmd) { + cmd->sync_cmd = 0; + cmd->cmd_status = 0; + wake_up(&instance->abort_cmd_wait_q); + } + + return; +} + +/** + * megasas_unmap_sgbuf - Unmap SG buffers + * @instance: Adapter soft state + * @cmd: Completed command + */ +static inline void +megasas_unmap_sgbuf(struct megasas_instance *instance, struct megasas_cmd *cmd) +{ + dma_addr_t buf_h; + u8 opcode; + + if (cmd->scmd->use_sg) { + pci_unmap_sg(instance->pdev, cmd->scmd->request_buffer, + cmd->scmd->use_sg, cmd->scmd->sc_data_direction); + return; + } + + if (!cmd->scmd->request_bufflen) + return; + + opcode = cmd->frame->hdr.cmd; + + if ((opcode == MFI_CMD_LD_READ) || (opcode == MFI_CMD_LD_WRITE)) { + if (IS_DMA64) + buf_h = cmd->frame->io.sgl.sge64[0].phys_addr; + else + buf_h = cmd->frame->io.sgl.sge32[0].phys_addr; + } else { + if (IS_DMA64) + buf_h = cmd->frame->pthru.sgl.sge64[0].phys_addr; + else + buf_h = cmd->frame->pthru.sgl.sge32[0].phys_addr; + } + + pci_unmap_single(instance->pdev, buf_h, cmd->scmd->request_bufflen, + cmd->scmd->sc_data_direction); + return; +} + +/** + * megasas_complete_cmd - Completes a command + * @instance: Adapter soft state + * @cmd: Command to be completed + * @alt_status: If non-zero, use this value as status to + * SCSI mid-layer instead of the value returned + * by the FW. This should be used if caller wants + * an alternate status (as in the case of aborted + * commands) + */ +static inline void +megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, + u8 alt_status) +{ + int exception = 0; + struct megasas_header *hdr = &cmd->frame->hdr; + unsigned long flags; + + if (cmd->scmd) { + cmd->scmd->SCp.ptr = (char *)0; + } + + switch (hdr->cmd) { + + case MFI_CMD_PD_SCSI_IO: + case MFI_CMD_LD_SCSI_IO: + + /* + * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been + * issued either through an IO path or an IOCTL path. If it + * was via IOCTL, we will send it to internal completion. + */ + if (cmd->sync_cmd) { + cmd->sync_cmd = 0; + megasas_complete_int_cmd(instance, cmd); + break; + } + + /* + * Don't export physical disk devices to mid-layer. + */ + if (!MEGASAS_IS_LOGICAL(cmd->scmd) && + (hdr->cmd_status == MFI_STAT_OK) && + (cmd->scmd->cmnd[0] == INQUIRY)) { + + if (((*(u8 *) cmd->scmd->request_buffer) & 0x1F) == + TYPE_DISK) { + cmd->scmd->result = DID_BAD_TARGET << 16; + exception = 1; + } + } + + case MFI_CMD_LD_READ: + case MFI_CMD_LD_WRITE: + + if (alt_status) { + cmd->scmd->result = alt_status << 16; + exception = 1; + } + + if (exception) { + + spin_lock_irqsave(&instance->instance_lock, flags); + instance->fw_outstanding--; + spin_unlock_irqrestore(&instance->instance_lock, flags); + + megasas_unmap_sgbuf(instance, cmd); + cmd->scmd->scsi_done(cmd->scmd); + megasas_return_cmd(instance, cmd); + + break; + } + + switch (hdr->cmd_status) { + + case MFI_STAT_OK: + cmd->scmd->result = DID_OK << 16; + break; + + case MFI_STAT_SCSI_IO_FAILED: + case MFI_STAT_LD_INIT_IN_PROGRESS: + cmd->scmd->result = + (DID_ERROR << 16) | hdr->scsi_status; + break; + + case MFI_STAT_SCSI_DONE_WITH_ERROR: + + cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status; + + if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) { + memset(cmd->scmd->sense_buffer, 0, + SCSI_SENSE_BUFFERSIZE); + memcpy(cmd->scmd->sense_buffer, cmd->sense, + hdr->sense_len); + + cmd->scmd->result |= DRIVER_SENSE << 24; + } + + break; + + case MFI_STAT_LD_OFFLINE: + case MFI_STAT_DEVICE_NOT_FOUND: + cmd->scmd->result = DID_BAD_TARGET << 16; + break; + + default: + printk(KERN_DEBUG "megasas: MFI FW status %#x\n", + hdr->cmd_status); + cmd->scmd->result = DID_ERROR << 16; + break; + } + + spin_lock_irqsave(&instance->instance_lock, flags); + instance->fw_outstanding--; + spin_unlock_irqrestore(&instance->instance_lock, flags); + + megasas_unmap_sgbuf(instance, cmd); + cmd->scmd->scsi_done(cmd->scmd); + megasas_return_cmd(instance, cmd); + + break; + + case MFI_CMD_SMP: + case MFI_CMD_STP: + case MFI_CMD_DCMD: + + /* + * See if got an event notification + */ + if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) + megasas_service_aen(instance, cmd); + else + megasas_complete_int_cmd(instance, cmd); + + break; + + case MFI_CMD_ABORT: + /* + * Cmd issued to abort another cmd returned + */ + megasas_complete_abort(instance, cmd); + break; + + default: + printk("megasas: Unknown command completed! [0x%X]\n", + hdr->cmd); + break; + } +} + +/** + * megasas_deplete_reply_queue - Processes all completed commands + * @instance: Adapter soft state + * @alt_status: Alternate status to be returned to + * SCSI mid-layer instead of the status + * returned by the FW + */ +static inline int +megasas_deplete_reply_queue(struct megasas_instance *instance, u8 alt_status) +{ + u32 status; + u32 producer; + u32 consumer; + u32 context; + struct megasas_cmd *cmd; + + /* + * Check if it is our interrupt + */ + status = readl(&instance->reg_set->outbound_intr_status); + + if (!(status & MFI_OB_INTR_STATUS_MASK)) { + return IRQ_NONE; + } + + /* + * Clear the interrupt by writing back the same value + */ + writel(status, &instance->reg_set->outbound_intr_status); + + producer = *instance->producer; + consumer = *instance->consumer; + + while (consumer != producer) { + context = instance->reply_queue[consumer]; + + cmd = instance->cmd_list[context]; + + megasas_complete_cmd(instance, cmd, alt_status); + + consumer++; + if (consumer == (instance->max_fw_cmds + 1)) { + consumer = 0; + } + } + + *instance->consumer = producer; + + return IRQ_HANDLED; +} + +/** + * megasas_isr - isr entry point + */ +static irqreturn_t megasas_isr(int irq, void *devp, struct pt_regs *regs) +{ + return megasas_deplete_reply_queue((struct megasas_instance *)devp, + DID_OK); +} + +/** + * megasas_transition_to_ready - Move the FW to READY state + * @reg_set: MFI register set + * + * During the initialization, FW passes can potentially be in any one of + * several possible states. If the FW in operational, waiting-for-handshake + * states, driver must take steps to bring it to ready state. Otherwise, it + * has to wait for the ready state. + */ +static int +megasas_transition_to_ready(struct megasas_register_set __iomem * reg_set) +{ + int i; + u8 max_wait; + u32 fw_state; + u32 cur_state; + + fw_state = readl(®_set->outbound_msg_0) & MFI_STATE_MASK; + + while (fw_state != MFI_STATE_READY) { + + printk(KERN_INFO "megasas: Waiting for FW to come to ready" + " state\n"); + switch (fw_state) { + + case MFI_STATE_FAULT: + + printk(KERN_DEBUG "megasas: FW in FAULT state!!\n"); + return -ENODEV; + + case MFI_STATE_WAIT_HANDSHAKE: + /* + * Set the CLR bit in inbound doorbell + */ + writel(MFI_INIT_CLEAR_HANDSHAKE, + ®_set->inbound_doorbell); + + max_wait = 2; + cur_state = MFI_STATE_WAIT_HANDSHAKE; + break; + + case MFI_STATE_OPERATIONAL: + /* + * Bring it to READY state; assuming max wait 2 secs + */ + megasas_disable_intr(reg_set); + writel(MFI_INIT_READY, ®_set->inbound_doorbell); + + max_wait = 10; + cur_state = MFI_STATE_OPERATIONAL; + break; + + case MFI_STATE_UNDEFINED: + /* + * This state should not last for more than 2 seconds + */ + max_wait = 2; + cur_state = MFI_STATE_UNDEFINED; + break; + + case MFI_STATE_BB_INIT: + max_wait = 2; + cur_state = MFI_STATE_BB_INIT; + break; + + case MFI_STATE_FW_INIT: + max_wait = 20; + cur_state = MFI_STATE_FW_INIT; + break; + + case MFI_STATE_FW_INIT_2: + max_wait = 20; + cur_state = MFI_STATE_FW_INIT_2; + break; + + case MFI_STATE_DEVICE_SCAN: + max_wait = 20; + cur_state = MFI_STATE_DEVICE_SCAN; + break; + + case MFI_STATE_FLUSH_CACHE: + max_wait = 20; + cur_state = MFI_STATE_FLUSH_CACHE; + break; + + default: + printk(KERN_DEBUG "megasas: Unknown state 0x%x\n", + fw_state); + return -ENODEV; + } + + /* + * The cur_state should not last for more than max_wait secs + */ + for (i = 0; i < (max_wait * 1000); i++) { + fw_state = MFI_STATE_MASK & + readl(®_set->outbound_msg_0); + + if (fw_state == cur_state) { + msleep(1); + } else + break; + } + + /* + * Return error if fw_state hasn't changed after max_wait + */ + if (fw_state == cur_state) { + printk(KERN_DEBUG "FW state [%d] hasn't changed " + "in %d secs\n", fw_state, max_wait); + return -ENODEV; + } + }; + + return 0; +} + +/** + * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool + * @instance: Adapter soft state + */ +static void megasas_teardown_frame_pool(struct megasas_instance *instance) +{ + int i; + u32 max_cmd = instance->max_fw_cmds; + struct megasas_cmd *cmd; + + if (!instance->frame_dma_pool) + return; + + /* + * Return all frames to pool + */ + for (i = 0; i < max_cmd; i++) { + + cmd = instance->cmd_list[i]; + + if (cmd->frame) + pci_pool_free(instance->frame_dma_pool, cmd->frame, + cmd->frame_phys_addr); + + if (cmd->sense) + pci_pool_free(instance->sense_dma_pool, cmd->frame, + cmd->sense_phys_addr); + } + + /* + * Now destroy the pool itself + */ + pci_pool_destroy(instance->frame_dma_pool); + pci_pool_destroy(instance->sense_dma_pool); + + instance->frame_dma_pool = NULL; + instance->sense_dma_pool = NULL; +} + +/** + * megasas_create_frame_pool - Creates DMA pool for cmd frames + * @instance: Adapter soft state + * + * Each command packet has an embedded DMA memory buffer that is used for + * filling MFI frame and the SG list that immediately follows the frame. This + * function creates those DMA memory buffers for each command packet by using + * PCI pool facility. + */ +static int megasas_create_frame_pool(struct megasas_instance *instance) +{ + int i; + u32 max_cmd; + u32 sge_sz; + u32 sgl_sz; + u32 total_sz; + u32 frame_count; + struct megasas_cmd *cmd; + + max_cmd = instance->max_fw_cmds; + + /* + * Size of our frame is 64 bytes for MFI frame, followed by max SG + * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer + */ + sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : + sizeof(struct megasas_sge32); + + /* + * Calculated the number of 64byte frames required for SGL + */ + sgl_sz = sge_sz * instance->max_num_sge; + frame_count = (sgl_sz + MEGAMFI_FRAME_SIZE - 1) / MEGAMFI_FRAME_SIZE; + + /* + * We need one extra frame for the MFI command + */ + frame_count++; + + total_sz = MEGAMFI_FRAME_SIZE * frame_count; + /* + * Use DMA pool facility provided by PCI layer + */ + instance->frame_dma_pool = pci_pool_create("megasas frame pool", + instance->pdev, total_sz, 64, + 0); + + if (!instance->frame_dma_pool) { + printk(KERN_DEBUG "megasas: failed to setup frame pool\n"); + return -ENOMEM; + } + + instance->sense_dma_pool = pci_pool_create("megasas sense pool", + instance->pdev, 128, 4, 0); + + if (!instance->sense_dma_pool) { + printk(KERN_DEBUG "megasas: failed to setup sense pool\n"); + + pci_pool_destroy(instance->frame_dma_pool); + instance->frame_dma_pool = NULL; + + return -ENOMEM; + } + + /* + * Allocate and attach a frame to each of the commands in cmd_list. + * By making cmd->index as the context instead of the &cmd, we can + * always use 32bit context regardless of the architecture + */ + for (i = 0; i < max_cmd; i++) { + + cmd = instance->cmd_list[i]; + + cmd->frame = pci_pool_alloc(instance->frame_dma_pool, + GFP_KERNEL, &cmd->frame_phys_addr); + + cmd->sense = pci_pool_alloc(instance->sense_dma_pool, + GFP_KERNEL, &cmd->sense_phys_addr); + + /* + * megasas_teardown_frame_pool() takes care of freeing + * whatever has been allocated + */ + if (!cmd->frame || !cmd->sense) { + printk(KERN_DEBUG "megasas: pci_pool_alloc failed \n"); + megasas_teardown_frame_pool(instance); + return -ENOMEM; + } + + cmd->frame->io.context = cmd->index; + } + + return 0; +} + +/** + * megasas_free_cmds - Free all the cmds in the free cmd pool + * @instance: Adapter soft state + */ +static void megasas_free_cmds(struct megasas_instance *instance) +{ + int i; + /* First free the MFI frame pool */ + megasas_teardown_frame_pool(instance); + + /* Free all the commands in the cmd_list */ + for (i = 0; i < instance->max_fw_cmds; i++) + kfree(instance->cmd_list[i]); + + /* Free the cmd_list buffer itself */ + kfree(instance->cmd_list); + instance->cmd_list = NULL; + + INIT_LIST_HEAD(&instance->cmd_pool); +} + +/** + * megasas_alloc_cmds - Allocates the command packets + * @instance: Adapter soft state + * + * Each command that is issued to the FW, whether IO commands from the OS or + * internal commands like IOCTLs, are wrapped in local data structure called + * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to + * the FW. + * + * Each frame has a 32-bit field called context (tag). This context is used + * to get back the megasas_cmd from the frame when a frame gets completed in + * the ISR. Typically the address of the megasas_cmd itself would be used as + * the context. But we wanted to keep the differences between 32 and 64 bit + * systems to the mininum. We always use 32 bit integers for the context. In + * this driver, the 32 bit values are the indices into an array cmd_list. + * This array is used only to look up the megasas_cmd given the context. The + * free commands themselves are maintained in a linked list called cmd_pool. + */ +static int megasas_alloc_cmds(struct megasas_instance *instance) +{ + int i; + int j; + u32 max_cmd; + struct megasas_cmd *cmd; + + max_cmd = instance->max_fw_cmds; + + /* + * instance->cmd_list is an array of struct megasas_cmd pointers. + * Allocate the dynamic array first and then allocate individual + * commands. + */ + instance->cmd_list = kmalloc(sizeof(struct megasas_cmd *) * max_cmd, + GFP_KERNEL); + + if (!instance->cmd_list) { + printk(KERN_DEBUG "megasas: out of memory\n"); + return -ENOMEM; + } + + memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) * max_cmd); + + for (i = 0; i < max_cmd; i++) { + instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd), + GFP_KERNEL); + + if (!instance->cmd_list[i]) { + + for (j = 0; j < i; j++) + kfree(instance->cmd_list[j]); + + kfree(instance->cmd_list); + instance->cmd_list = NULL; + + return -ENOMEM; + } + } + + /* + * Add all the commands to command pool (instance->cmd_pool) + */ + for (i = 0; i < max_cmd; i++) { + cmd = instance->cmd_list[i]; + memset(cmd, 0, sizeof(struct megasas_cmd)); + cmd->index = i; + cmd->instance = instance; + + list_add_tail(&cmd->list, &instance->cmd_pool); + } + + /* + * Create a frame pool and assign one frame to each cmd + */ + if (megasas_create_frame_pool(instance)) { + printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n"); + megasas_free_cmds(instance); + } + + return 0; +} + +/** + * megasas_get_controller_info - Returns FW's controller structure + * @instance: Adapter soft state + * @ctrl_info: Controller information structure + * + * Issues an internal command (DCMD) to get the FW's controller structure. + * This information is mainly used to find out the maximum IO transfer per + * command supported by the FW. + */ +static int +megasas_get_ctrl_info(struct megasas_instance *instance, + struct megasas_ctrl_info *ctrl_info) +{ + int ret = 0; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct megasas_ctrl_info *ci; + dma_addr_t ci_h = 0; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + printk(KERN_DEBUG "megasas: Failed to get a free cmd\n"); + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + + ci = pci_alloc_consistent(instance->pdev, + sizeof(struct megasas_ctrl_info), &ci_h); + + if (!ci) { + printk(KERN_DEBUG "Failed to alloc mem for ctrl info\n"); + megasas_return_cmd(instance, cmd); + return -ENOMEM; + } + + memset(ci, 0, sizeof(*ci)); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0xFF; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info); + dcmd->opcode = MR_DCMD_CTRL_GET_INFO; + dcmd->sgl.sge32[0].phys_addr = ci_h; + dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info); + + if (!megasas_issue_polled(instance, cmd)) { + ret = 0; + memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info)); + } else { + ret = -1; + } + + pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), + ci, ci_h); + + megasas_return_cmd(instance, cmd); + return ret; +} + +/** + * megasas_init_mfi - Initializes the FW + * @instance: Adapter soft state + * + * This is the main function for initializing MFI firmware. + */ +static int megasas_init_mfi(struct megasas_instance *instance) +{ + u32 context_sz; + u32 reply_q_sz; + u32 max_sectors_1; + u32 max_sectors_2; + struct megasas_register_set __iomem *reg_set; + + struct megasas_cmd *cmd; + struct megasas_ctrl_info *ctrl_info; + + struct megasas_init_frame *init_frame; + struct megasas_init_queue_info *initq_info; + dma_addr_t init_frame_h; + dma_addr_t initq_info_h; + + /* + * Map the message registers + */ + instance->base_addr = pci_resource_start(instance->pdev, 0); + + if (pci_request_regions(instance->pdev, "megasas: LSI Logic")) { + printk(KERN_DEBUG "megasas: IO memory region busy!\n"); + return -EBUSY; + } + + instance->reg_set = ioremap_nocache(instance->base_addr, 8192); + + if (!instance->reg_set) { + printk(KERN_DEBUG "megasas: Failed to map IO mem\n"); + goto fail_ioremap; + } + + reg_set = instance->reg_set; + + /* + * We expect the FW state to be READY + */ + if (megasas_transition_to_ready(instance->reg_set)) + goto fail_ready_state; + + /* + * Get various operational parameters from status register + */ + instance->max_fw_cmds = readl(®_set->outbound_msg_0) & 0x00FFFF; + instance->max_num_sge = (readl(®_set->outbound_msg_0) & 0xFF0000) >> + 0x10; + /* + * Create a pool of commands + */ + if (megasas_alloc_cmds(instance)) + goto fail_alloc_cmds; + + /* + * Allocate memory for reply queue. Length of reply queue should + * be _one_ more than the maximum commands handled by the firmware. + * + * Note: When FW completes commands, it places corresponding contex + * values in this circular reply queue. This circular queue is a fairly + * typical producer-consumer queue. FW is the producer (of completed + * commands) and the driver is the consumer. + */ + context_sz = sizeof(u32); + reply_q_sz = context_sz * (instance->max_fw_cmds + 1); + + instance->reply_queue = pci_alloc_consistent(instance->pdev, + reply_q_sz, + &instance->reply_queue_h); + + if (!instance->reply_queue) { + printk(KERN_DEBUG "megasas: Out of DMA mem for reply queue\n"); + goto fail_reply_queue; + } + + /* + * Prepare a init frame. Note the init frame points to queue info + * structure. Each frame has SGL allocated after first 64 bytes. For + * this frame - since we don't need any SGL - we use SGL's space as + * queue info structure + * + * We will not get a NULL command below. We just created the pool. + */ + cmd = megasas_get_cmd(instance); + + init_frame = (struct megasas_init_frame *)cmd->frame; + initq_info = (struct megasas_init_queue_info *) + ((unsigned long)init_frame + 64); + + init_frame_h = cmd->frame_phys_addr; + initq_info_h = init_frame_h + 64; + + memset(init_frame, 0, MEGAMFI_FRAME_SIZE); + memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); + + initq_info->reply_queue_entries = instance->max_fw_cmds + 1; + initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h; + + initq_info->producer_index_phys_addr_lo = instance->producer_h; + initq_info->consumer_index_phys_addr_lo = instance->consumer_h; + + init_frame->cmd = MFI_CMD_INIT; + init_frame->cmd_status = 0xFF; + init_frame->queue_info_new_phys_addr_lo = initq_info_h; + + init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info); + + /* + * Issue the init frame in polled mode + */ + if (megasas_issue_polled(instance, cmd)) { + printk(KERN_DEBUG "megasas: Failed to init firmware\n"); + goto fail_fw_init; + } + + megasas_return_cmd(instance, cmd); + + ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); + + /* + * Compute the max allowed sectors per IO: The controller info has two + * limits on max sectors. Driver should use the minimum of these two. + * + * 1 << stripe_sz_ops.min = max sectors per strip + * + * Note that older firmwares ( < FW ver 30) didn't report information + * to calculate max_sectors_1. So the number ended up as zero always. + */ + if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) { + + max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * + ctrl_info->max_strips_per_io; + max_sectors_2 = ctrl_info->max_request_size; + + instance->max_sectors_per_req = (max_sectors_1 < max_sectors_2) + ? max_sectors_1 : max_sectors_2; + } else + instance->max_sectors_per_req = instance->max_num_sge * + PAGE_SIZE / 512; + + kfree(ctrl_info); + + return 0; + + fail_fw_init: + megasas_return_cmd(instance, cmd); + + pci_free_consistent(instance->pdev, reply_q_sz, + instance->reply_queue, instance->reply_queue_h); + fail_reply_queue: + megasas_free_cmds(instance); + + fail_alloc_cmds: + fail_ready_state: + iounmap(instance->reg_set); + + fail_ioremap: + pci_release_regions(instance->pdev); + + return -EINVAL; +} + +/** + * megasas_release_mfi - Reverses the FW initialization + * @intance: Adapter soft state + */ +static void megasas_release_mfi(struct megasas_instance *instance) +{ + u32 reply_q_sz = sizeof(u32) * (instance->max_fw_cmds + 1); + + pci_free_consistent(instance->pdev, reply_q_sz, + instance->reply_queue, instance->reply_queue_h); + + megasas_free_cmds(instance); + + iounmap(instance->reg_set); + + pci_release_regions(instance->pdev); +} + +/** + * megasas_get_seq_num - Gets latest event sequence numbers + * @instance: Adapter soft state + * @eli: FW event log sequence numbers information + * + * FW maintains a log of all events in a non-volatile area. Upper layers would + * usually find out the latest sequence number of the events, the seq number at + * the boot etc. They would "read" all the events below the latest seq number + * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq + * number), they would subsribe to AEN (asynchronous event notification) and + * wait for the events to happen. + */ +static int +megasas_get_seq_num(struct megasas_instance *instance, + struct megasas_evt_log_info *eli) +{ + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + struct megasas_evt_log_info *el_info; + dma_addr_t el_info_h = 0; + + cmd = megasas_get_cmd(instance); + + if (!cmd) { + return -ENOMEM; + } + + dcmd = &cmd->frame->dcmd; + el_info = pci_alloc_consistent(instance->pdev, + sizeof(struct megasas_evt_log_info), + &el_info_h); + + if (!el_info) { + megasas_return_cmd(instance, cmd); + return -ENOMEM; + } + + memset(el_info, 0, sizeof(*el_info)); + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0x0; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info); + dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; + dcmd->sgl.sge32[0].phys_addr = el_info_h; + dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info); + + megasas_issue_blocked_cmd(instance, cmd); + + /* + * Copy the data back into callers buffer + */ + memcpy(eli, el_info, sizeof(struct megasas_evt_log_info)); + + pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), + el_info, el_info_h); + + megasas_return_cmd(instance, cmd); + + return 0; +} + +/** + * megasas_register_aen - Registers for asynchronous event notification + * @instance: Adapter soft state + * @seq_num: The starting sequence number + * @class_locale: Class of the event + * + * This function subscribes for AEN for events beyond the @seq_num. It requests + * to be notified if and only if the event is of type @class_locale + */ +static int +megasas_register_aen(struct megasas_instance *instance, u32 seq_num, + u32 class_locale_word) +{ + int ret_val; + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + union megasas_evt_class_locale curr_aen; + union megasas_evt_class_locale prev_aen; + + /* + * If there an AEN pending already (aen_cmd), check if the + * class_locale of that pending AEN is inclusive of the new + * AEN request we currently have. If it is, then we don't have + * to do anything. In other words, whichever events the current + * AEN request is subscribing to, have already been subscribed + * to. + * + * If the old_cmd is _not_ inclusive, then we have to abort + * that command, form a class_locale that is superset of both + * old and current and re-issue to the FW + */ + + curr_aen.word = class_locale_word; + + if (instance->aen_cmd) { + + prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; + + /* + * A class whose enum value is smaller is inclusive of all + * higher values. If a PROGRESS (= -1) was previously + * registered, then a new registration requests for higher + * classes need not be sent to FW. They are automatically + * included. + * + * Locale numbers don't have such hierarchy. They are bitmap + * values + */ + if ((prev_aen.members.class <= curr_aen.members.class) && + !((prev_aen.members.locale & curr_aen.members.locale) ^ + curr_aen.members.locale)) { + /* + * Previously issued event registration includes + * current request. Nothing to do. + */ + return 0; + } else { + curr_aen.members.locale |= prev_aen.members.locale; + + if (prev_aen.members.class < curr_aen.members.class) + curr_aen.members.class = prev_aen.members.class; + + instance->aen_cmd->abort_aen = 1; + ret_val = megasas_issue_blocked_abort_cmd(instance, + instance-> + aen_cmd); + + if (ret_val) { + printk(KERN_DEBUG "megasas: Failed to abort " + "previous AEN command\n"); + return ret_val; + } + } + } + + cmd = megasas_get_cmd(instance); + + if (!cmd) + return -ENOMEM; + + dcmd = &cmd->frame->dcmd; + + memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail)); + + /* + * Prepare DCMD for aen registration + */ + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0x0; + dcmd->sge_count = 1; + dcmd->flags = MFI_FRAME_DIR_READ; + dcmd->timeout = 0; + dcmd->data_xfer_len = sizeof(struct megasas_evt_detail); + dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; + dcmd->mbox.w[0] = seq_num; + dcmd->mbox.w[1] = curr_aen.word; + dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h; + dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail); + + /* + * Store reference to the cmd used to register for AEN. When an + * application wants us to register for AEN, we have to abort this + * cmd and re-register with a new EVENT LOCALE supplied by that app + */ + instance->aen_cmd = cmd; + + /* + * Issue the aen registration frame + */ + writel(cmd->frame_phys_addr >> 3, + &instance->reg_set->inbound_queue_port); + + return 0; +} + +/** + * megasas_start_aen - Subscribes to AEN during driver load time + * @instance: Adapter soft state + */ +static int megasas_start_aen(struct megasas_instance *instance) +{ + struct megasas_evt_log_info eli; + union megasas_evt_class_locale class_locale; + + /* + * Get the latest sequence number from FW + */ + memset(&eli, 0, sizeof(eli)); + + if (megasas_get_seq_num(instance, &eli)) + return -1; + + /* + * Register AEN with FW for latest sequence number plus 1 + */ + class_locale.members.reserved = 0; + class_locale.members.locale = MR_EVT_LOCALE_ALL; + class_locale.members.class = MR_EVT_CLASS_DEBUG; + + return megasas_register_aen(instance, eli.newest_seq_num + 1, + class_locale.word); +} + +/** + * megasas_io_attach - Attaches this driver to SCSI mid-layer + * @instance: Adapter soft state + */ +static int megasas_io_attach(struct megasas_instance *instance) +{ + struct Scsi_Host *host = instance->host; + + /* + * Export parameters required by SCSI mid-layer + */ + host->irq = instance->pdev->irq; + host->unique_id = instance->unique_id; + host->can_queue = instance->max_fw_cmds - MEGASAS_INT_CMDS; + host->this_id = instance->init_id; + host->sg_tablesize = instance->max_num_sge; + host->max_sectors = instance->max_sectors_per_req; + host->cmd_per_lun = 128; + host->max_channel = MEGASAS_MAX_CHANNELS - 1; + host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL; + host->max_lun = MEGASAS_MAX_LUN; + + /* + * Notify the mid-layer about the new controller + */ + if (scsi_add_host(host, &instance->pdev->dev)) { + printk(KERN_DEBUG "megasas: scsi_add_host failed\n"); + return -ENODEV; + } + + /* + * Trigger SCSI to scan our drives + */ + scsi_scan_host(host); + return 0; +} + +/** + * megasas_probe_one - PCI hotplug entry point + * @pdev: PCI device structure + * @id: PCI ids of supported hotplugged adapter + */ +static int __devinit +megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int rval; + struct Scsi_Host *host; + struct megasas_instance *instance; + + /* + * Announce PCI information + */ + printk(KERN_INFO "megasas: %#4.04x:%#4.04x:%#4.04x:%#4.04x: ", + pdev->vendor, pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device); + + printk("bus %d:slot %d:func %d\n", + pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + + /* + * PCI prepping: enable device set bus mastering and dma mask + */ + rval = pci_enable_device(pdev); + + if (rval) { + return rval; + } + + pci_set_master(pdev); + + /* + * All our contollers are capable of performing 64-bit DMA + */ + if (IS_DMA64) { + if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) != 0) { + + if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) + goto fail_set_dma_mask; + } + } else { + if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) + goto fail_set_dma_mask; + } + + host = scsi_host_alloc(&megasas_template, + sizeof(struct megasas_instance)); + + if (!host) { + printk(KERN_DEBUG "megasas: scsi_host_alloc failed\n"); + goto fail_alloc_instance; + } + + instance = (struct megasas_instance *)host->hostdata; + memset(instance, 0, sizeof(*instance)); + + instance->producer = pci_alloc_consistent(pdev, sizeof(u32), + &instance->producer_h); + instance->consumer = pci_alloc_consistent(pdev, sizeof(u32), + &instance->consumer_h); + + if (!instance->producer || !instance->consumer) { + printk(KERN_DEBUG "megasas: Failed to allocate memory for " + "producer, consumer\n"); + goto fail_alloc_dma_buf; + } + + *instance->producer = 0; + *instance->consumer = 0; + + instance->evt_detail = pci_alloc_consistent(pdev, + sizeof(struct + megasas_evt_detail), + &instance->evt_detail_h); + + if (!instance->evt_detail) { + printk(KERN_DEBUG "megasas: Failed to allocate memory for " + "event detail structure\n"); + goto fail_alloc_dma_buf; + } + + /* + * Initialize locks and queues + */ + INIT_LIST_HEAD(&instance->cmd_pool); + + init_waitqueue_head(&instance->int_cmd_wait_q); + init_waitqueue_head(&instance->abort_cmd_wait_q); + + spin_lock_init(&instance->cmd_pool_lock); + spin_lock_init(&instance->instance_lock); + + sema_init(&instance->aen_mutex, 1); + sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS); + + /* + * Initialize PCI related and misc parameters + */ + instance->pdev = pdev; + instance->host = host; + instance->unique_id = pdev->bus->number << 8 | pdev->devfn; + instance->init_id = MEGASAS_DEFAULT_INIT_ID; + + /* + * Initialize MFI Firmware + */ + if (megasas_init_mfi(instance)) + goto fail_init_mfi; + + /* + * Register IRQ + */ + if (request_irq(pdev->irq, megasas_isr, SA_SHIRQ, "megasas", instance)) { + printk(KERN_DEBUG "megasas: Failed to register IRQ\n"); + goto fail_irq; + } + + megasas_enable_intr(instance->reg_set); + + /* + * Store instance in PCI softstate + */ + pci_set_drvdata(pdev, instance); + + /* + * Add this controller to megasas_mgmt_info structure so that it + * can be exported to management applications + */ + megasas_mgmt_info.count++; + megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance; + megasas_mgmt_info.max_index++; + + /* + * Initiate AEN (Asynchronous Event Notification) + */ + if (megasas_start_aen(instance)) { + printk(KERN_DEBUG "megasas: start aen failed\n"); + goto fail_start_aen; + } + + /* + * Register with SCSI mid-layer + */ + if (megasas_io_attach(instance)) + goto fail_io_attach; + + return 0; + + fail_start_aen: + fail_io_attach: + megasas_mgmt_info.count--; + megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; + megasas_mgmt_info.max_index--; + + pci_set_drvdata(pdev, NULL); + megasas_disable_intr(instance->reg_set); + free_irq(instance->pdev->irq, instance); + + megasas_release_mfi(instance); + + fail_irq: + fail_init_mfi: + fail_alloc_dma_buf: + if (instance->evt_detail) + pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), + instance->evt_detail, + instance->evt_detail_h); + + if (instance->producer) + pci_free_consistent(pdev, sizeof(u32), instance->producer, + instance->producer_h); + if (instance->consumer) + pci_free_consistent(pdev, sizeof(u32), instance->consumer, + instance->consumer_h); + scsi_host_put(host); + + fail_alloc_instance: + fail_set_dma_mask: + pci_disable_device(pdev); + + return -ENODEV; +} + +/** + * megasas_flush_cache - Requests FW to flush all its caches + * @instance: Adapter soft state + */ +static void megasas_flush_cache(struct megasas_instance *instance) +{ + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + + cmd = megasas_get_cmd(instance); + + if (!cmd) + return; + + dcmd = &cmd->frame->dcmd; + + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0x0; + dcmd->sge_count = 0; + dcmd->flags = MFI_FRAME_DIR_NONE; + dcmd->timeout = 0; + dcmd->data_xfer_len = 0; + dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; + dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; + + megasas_issue_blocked_cmd(instance, cmd); + + megasas_return_cmd(instance, cmd); + + return; +} + +/** + * megasas_shutdown_controller - Instructs FW to shutdown the controller + * @instance: Adapter soft state + */ +static void megasas_shutdown_controller(struct megasas_instance *instance) +{ + struct megasas_cmd *cmd; + struct megasas_dcmd_frame *dcmd; + + cmd = megasas_get_cmd(instance); + + if (!cmd) + return; + + if (instance->aen_cmd) + megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd); + + dcmd = &cmd->frame->dcmd; + + memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + + dcmd->cmd = MFI_CMD_DCMD; + dcmd->cmd_status = 0x0; + dcmd->sge_count = 0; + dcmd->flags = MFI_FRAME_DIR_NONE; + dcmd->timeout = 0; + dcmd->data_xfer_len = 0; + dcmd->opcode = MR_DCMD_CTRL_SHUTDOWN; + + megasas_issue_blocked_cmd(instance, cmd); + + megasas_return_cmd(instance, cmd); + + return; +} + +/** + * megasas_detach_one - PCI hot"un"plug entry point + * @pdev: PCI device structure + */ +static void megasas_detach_one(struct pci_dev *pdev) +{ + int i; + struct Scsi_Host *host; + struct megasas_instance *instance; + + instance = pci_get_drvdata(pdev); + host = instance->host; + + scsi_remove_host(instance->host); + megasas_flush_cache(instance); + megasas_shutdown_controller(instance); + + /* + * Take the instance off the instance array. Note that we will not + * decrement the max_index. We let this array be sparse array + */ + for (i = 0; i < megasas_mgmt_info.max_index; i++) { + if (megasas_mgmt_info.instance[i] == instance) { + megasas_mgmt_info.count--; + megasas_mgmt_info.instance[i] = NULL; + + break; + } + } + + pci_set_drvdata(instance->pdev, NULL); + + megasas_disable_intr(instance->reg_set); + + free_irq(instance->pdev->irq, instance); + + megasas_release_mfi(instance); + + pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), + instance->evt_detail, instance->evt_detail_h); + + pci_free_consistent(pdev, sizeof(u32), instance->producer, + instance->producer_h); + + pci_free_consistent(pdev, sizeof(u32), instance->consumer, + instance->consumer_h); + + scsi_host_put(host); + + pci_set_drvdata(pdev, NULL); + + pci_disable_device(pdev); + + return; +} + +/** + * megasas_shutdown - Shutdown entry point + * @device: Generic device structure + */ +static void megasas_shutdown(struct pci_dev *pdev) +{ + struct megasas_instance *instance = pci_get_drvdata(pdev); + megasas_flush_cache(instance); +} + +/** + * megasas_mgmt_open - char node "open" entry point + */ +static int megasas_mgmt_open(struct inode *inode, struct file *filep) +{ + /* + * Allow only those users with admin rights + */ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + return 0; +} + +/** + * megasas_mgmt_release - char node "release" entry point + */ +static int megasas_mgmt_release(struct inode *inode, struct file *filep) +{ + filep->private_data = NULL; + fasync_helper(-1, filep, 0, &megasas_async_queue); + + return 0; +} + +/** + * megasas_mgmt_fasync - Async notifier registration from applications + * + * This function adds the calling process to a driver global queue. When an + * event occurs, SIGIO will be sent to all processes in this queue. + */ +static int megasas_mgmt_fasync(int fd, struct file *filep, int mode) +{ + int rc; + + down(&megasas_async_queue_mutex); + + rc = fasync_helper(fd, filep, mode, &megasas_async_queue); + + up(&megasas_async_queue_mutex); + + if (rc >= 0) { + /* For sanity check when we get ioctl */ + filep->private_data = filep; + return 0; + } + + printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc); + + return rc; +} + +/** + * megasas_mgmt_fw_ioctl - Issues management ioctls to FW + * @instance: Adapter soft state + * @argp: User's ioctl packet + */ +static int +megasas_mgmt_fw_ioctl(struct megasas_instance *instance, + struct megasas_iocpacket __user * user_ioc, + struct megasas_iocpacket *ioc) +{ + struct megasas_sge32 *kern_sge32; + struct megasas_cmd *cmd; + void *kbuff_arr[MAX_IOCTL_SGE]; + dma_addr_t buf_handle = 0; + int error = 0, i; + void *sense = NULL; + dma_addr_t sense_handle; + u32 *sense_ptr; + + memset(kbuff_arr, 0, sizeof(kbuff_arr)); + + if (ioc->sge_count > MAX_IOCTL_SGE) { + printk(KERN_DEBUG "megasas: SGE count [%d] > max limit [%d]\n", + ioc->sge_count, MAX_IOCTL_SGE); + return -EINVAL; + } + + cmd = megasas_get_cmd(instance); + if (!cmd) { + printk(KERN_DEBUG "megasas: Failed to get a cmd packet\n"); + return -ENOMEM; + } + + /* + * User's IOCTL packet has 2 frames (maximum). Copy those two + * frames into our cmd's frames. cmd->frame's context will get + * overwritten when we copy from user's frames. So set that value + * alone separately + */ + memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); + cmd->frame->hdr.context = cmd->index; + + /* + * The management interface between applications and the fw uses + * MFI frames. E.g, RAID configuration changes, LD property changes + * etc are accomplishes through different kinds of MFI frames. The + * driver needs to care only about substituting user buffers with + * kernel buffers in SGLs. The location of SGL is embedded in the + * struct iocpacket itself. + */ + kern_sge32 = (struct megasas_sge32 *) + ((unsigned long)cmd->frame + ioc->sgl_off); + + /* + * For each user buffer, create a mirror buffer and copy in + */ + for (i = 0; i < ioc->sge_count; i++) { + kbuff_arr[i] = pci_alloc_consistent(instance->pdev, + ioc->sgl[i].iov_len, + &buf_handle); + if (!kbuff_arr[i]) { + printk(KERN_DEBUG "megasas: Failed to alloc " + "kernel SGL buffer for IOCTL \n"); + error = -ENOMEM; + goto out; + } + + /* + * We don't change the dma_coherent_mask, so + * pci_alloc_consistent only returns 32bit addresses + */ + kern_sge32[i].phys_addr = (u32) buf_handle; + kern_sge32[i].length = ioc->sgl[i].iov_len; + + /* + * We created a kernel buffer corresponding to the + * user buffer. Now copy in from the user buffer + */ + if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base, + (u32) (ioc->sgl[i].iov_len))) { + error = -EFAULT; + goto out; + } + } + + if (ioc->sense_len) { + sense = pci_alloc_consistent(instance->pdev, ioc->sense_len, + &sense_handle); + if (!sense) { + error = -ENOMEM; + goto out; + } + + sense_ptr = + (u32 *) ((unsigned long)cmd->frame + ioc->sense_off); + *sense_ptr = sense_handle; + } + + /* + * Set the sync_cmd flag so that the ISR knows not to complete this + * cmd to the SCSI mid-layer + */ + cmd->sync_cmd = 1; + megasas_issue_blocked_cmd(instance, cmd); + cmd->sync_cmd = 0; + + /* + * copy out the kernel buffers to user buffers + */ + for (i = 0; i < ioc->sge_count; i++) { + if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i], + ioc->sgl[i].iov_len)) { + error = -EFAULT; + goto out; + } + } + + /* + * copy out the sense + */ + if (ioc->sense_len) { + /* + * sense_ptr points to the location that has the user + * sense buffer address + */ + sense_ptr = (u32 *) ((unsigned long)ioc->frame.raw + + ioc->sense_off); + + if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)), + sense, ioc->sense_len)) { + error = -EFAULT; + goto out; + } + } + + /* + * copy the status codes returned by the fw + */ + if (copy_to_user(&user_ioc->frame.hdr.cmd_status, + &cmd->frame->hdr.cmd_status, sizeof(u8))) { + printk(KERN_DEBUG "megasas: Error copying out cmd_status\n"); + error = -EFAULT; + } + + out: + if (sense) { + pci_free_consistent(instance->pdev, ioc->sense_len, + sense, sense_handle); + } + + for (i = 0; i < ioc->sge_count && kbuff_arr[i]; i++) { + pci_free_consistent(instance->pdev, + kern_sge32[i].length, + kbuff_arr[i], kern_sge32[i].phys_addr); + } + + megasas_return_cmd(instance, cmd); + return error; +} + +static struct megasas_instance *megasas_lookup_instance(u16 host_no) +{ + int i; + + for (i = 0; i < megasas_mgmt_info.max_index; i++) { + + if ((megasas_mgmt_info.instance[i]) && + (megasas_mgmt_info.instance[i]->host->host_no == host_no)) + return megasas_mgmt_info.instance[i]; + } + + return NULL; +} + +static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) +{ + struct megasas_iocpacket __user *user_ioc = + (struct megasas_iocpacket __user *)arg; + struct megasas_iocpacket *ioc; + struct megasas_instance *instance; + int error; + + ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); + if (!ioc) + return -ENOMEM; + + if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) { + error = -EFAULT; + goto out_kfree_ioc; + } + + instance = megasas_lookup_instance(ioc->host_no); + if (!instance) { + error = -ENODEV; + goto out_kfree_ioc; + } + + /* + * We will allow only MEGASAS_INT_CMDS number of parallel ioctl cmds + */ + if (down_interruptible(&instance->ioctl_sem)) { + error = -ERESTARTSYS; + goto out_kfree_ioc; + } + error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); + up(&instance->ioctl_sem); + + out_kfree_ioc: + kfree(ioc); + return error; +} + +static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg) +{ + struct megasas_instance *instance; + struct megasas_aen aen; + int error; + + if (file->private_data != file) { + printk(KERN_DEBUG "megasas: fasync_helper was not " + "called first\n"); + return -EINVAL; + } + + if (copy_from_user(&aen, (void __user *)arg, sizeof(aen))) + return -EFAULT; + + instance = megasas_lookup_instance(aen.host_no); + + if (!instance) + return -ENODEV; + + down(&instance->aen_mutex); + error = megasas_register_aen(instance, aen.seq_num, + aen.class_locale_word); + up(&instance->aen_mutex); + return error; +} + +/** + * megasas_mgmt_ioctl - char node ioctl entry point + */ +static long +megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case MEGASAS_IOC_FIRMWARE: + return megasas_mgmt_ioctl_fw(file, arg); + + case MEGASAS_IOC_GET_AEN: + return megasas_mgmt_ioctl_aen(file, arg); + } + + return -ENOTTY; +} + +#ifdef CONFIG_COMPAT +static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg) +{ + struct compat_megasas_iocpacket __user *cioc = + (struct compat_megasas_iocpacket __user *)arg; + struct megasas_iocpacket __user *ioc = + compat_alloc_user_space(sizeof(struct megasas_iocpacket)); + int i; + int error = 0; + + clear_user(ioc, sizeof(*ioc)); + + if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) || + copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) || + copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) || + copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) || + copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) || + copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32))) + return -EFAULT; + + for (i = 0; i < MAX_IOCTL_SGE; i++) { + compat_uptr_t ptr; + + if (get_user(ptr, &cioc->sgl[i].iov_base) || + put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) || + copy_in_user(&ioc->sgl[i].iov_len, + &cioc->sgl[i].iov_len, sizeof(compat_size_t))) + return -EFAULT; + } + + error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc); + + if (copy_in_user(&cioc->frame.hdr.cmd_status, + &ioc->frame.hdr.cmd_status, sizeof(u8))) { + printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n"); + return -EFAULT; + } + return error; +} + +static long +megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { + case MEGASAS_IOC_FIRMWARE:{ + return megasas_mgmt_compat_ioctl_fw(file, arg); + } + case MEGASAS_IOC_GET_AEN: + return megasas_mgmt_ioctl_aen(file, arg); + } + + return -ENOTTY; +} +#endif + +/* + * File operations structure for management interface + */ +static struct file_operations megasas_mgmt_fops = { + .owner = THIS_MODULE, + .open = megasas_mgmt_open, + .release = megasas_mgmt_release, + .fasync = megasas_mgmt_fasync, + .unlocked_ioctl = megasas_mgmt_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = megasas_mgmt_compat_ioctl, +#endif +}; + +/* + * PCI hotplug support registration structure + */ +static struct pci_driver megasas_pci_driver = { + + .name = "megaraid_sas", + .id_table = megasas_pci_table, + .probe = megasas_probe_one, + .remove = __devexit_p(megasas_detach_one), + .shutdown = megasas_shutdown, +}; + +/* + * Sysfs driver attributes + */ +static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf) +{ + return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n", + MEGASAS_VERSION); +} + +static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL); + +static ssize_t +megasas_sysfs_show_release_date(struct device_driver *dd, char *buf) +{ + return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n", + MEGASAS_RELDATE); +} + +static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, + NULL); + +/** + * megasas_init - Driver load entry point + */ +static int __init megasas_init(void) +{ + int rval; + + /* + * Announce driver version and other information + */ + printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION, + MEGASAS_EXT_VERSION); + + memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); + + /* + * Register character device node + */ + rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops); + + if (rval < 0) { + printk(KERN_DEBUG "megasas: failed to open device node\n"); + return rval; + } + + megasas_mgmt_majorno = rval; + + /* + * Register ourselves as PCI hotplug module + */ + rval = pci_module_init(&megasas_pci_driver); + + if (rval) { + printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n"); + unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); + } + + driver_create_file(&megasas_pci_driver.driver, &driver_attr_version); + driver_create_file(&megasas_pci_driver.driver, + &driver_attr_release_date); + + return rval; +} + +/** + * megasas_exit - Driver unload entry point + */ +static void __exit megasas_exit(void) +{ + driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); + driver_remove_file(&megasas_pci_driver.driver, + &driver_attr_release_date); + + pci_unregister_driver(&megasas_pci_driver); + unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl"); +} + +module_init(megasas_init); +module_exit(megasas_exit); diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h new file mode 100644 index 000000000000..eaec9d531424 --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -0,0 +1,1142 @@ +/* + * + * Linux MegaRAID driver for SAS based RAID controllers + * + * Copyright (c) 2003-2005 LSI Logic Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * FILE : megaraid_sas.h + */ + +#ifndef LSI_MEGARAID_SAS_H +#define LSI_MEGARAID_SAS_H + +/** + * MegaRAID SAS Driver meta data + */ +#define MEGASAS_VERSION "00.00.02.00-rc4" +#define MEGASAS_RELDATE "Sep 16, 2005" +#define MEGASAS_EXT_VERSION "Fri Sep 16 12:37:08 EDT 2005" + +/* + * ===================================== + * MegaRAID SAS MFI firmware definitions + * ===================================== + */ + +/* + * MFI stands for MegaRAID SAS FW Interface. This is just a moniker for + * protocol between the software and firmware. Commands are issued using + * "message frames" + */ + +/** + * FW posts its state in upper 4 bits of outbound_msg_0 register + */ +#define MFI_STATE_MASK 0xF0000000 +#define MFI_STATE_UNDEFINED 0x00000000 +#define MFI_STATE_BB_INIT 0x10000000 +#define MFI_STATE_FW_INIT 0x40000000 +#define MFI_STATE_WAIT_HANDSHAKE 0x60000000 +#define MFI_STATE_FW_INIT_2 0x70000000 +#define MFI_STATE_DEVICE_SCAN 0x80000000 +#define MFI_STATE_FLUSH_CACHE 0xA0000000 +#define MFI_STATE_READY 0xB0000000 +#define MFI_STATE_OPERATIONAL 0xC0000000 +#define MFI_STATE_FAULT 0xF0000000 + +#define MEGAMFI_FRAME_SIZE 64 + +/** + * During FW init, clear pending cmds & reset state using inbound_msg_0 + * + * ABORT : Abort all pending cmds + * READY : Move from OPERATIONAL to READY state; discard queue info + * MFIMODE : Discard (possible) low MFA posted in 64-bit mode (??) + * CLR_HANDSHAKE: FW is waiting for HANDSHAKE from BIOS or Driver + */ +#define MFI_INIT_ABORT 0x00000000 +#define MFI_INIT_READY 0x00000002 +#define MFI_INIT_MFIMODE 0x00000004 +#define MFI_INIT_CLEAR_HANDSHAKE 0x00000008 +#define MFI_RESET_FLAGS MFI_INIT_READY|MFI_INIT_MFIMODE + +/** + * MFI frame flags + */ +#define MFI_FRAME_POST_IN_REPLY_QUEUE 0x0000 +#define MFI_FRAME_DONT_POST_IN_REPLY_QUEUE 0x0001 +#define MFI_FRAME_SGL32 0x0000 +#define MFI_FRAME_SGL64 0x0002 +#define MFI_FRAME_SENSE32 0x0000 +#define MFI_FRAME_SENSE64 0x0004 +#define MFI_FRAME_DIR_NONE 0x0000 +#define MFI_FRAME_DIR_WRITE 0x0008 +#define MFI_FRAME_DIR_READ 0x0010 +#define MFI_FRAME_DIR_BOTH 0x0018 + +/** + * Definition for cmd_status + */ +#define MFI_CMD_STATUS_POLL_MODE 0xFF + +/** + * MFI command opcodes + */ +#define MFI_CMD_INIT 0x00 +#define MFI_CMD_LD_READ 0x01 +#define MFI_CMD_LD_WRITE 0x02 +#define MFI_CMD_LD_SCSI_IO 0x03 +#define MFI_CMD_PD_SCSI_IO 0x04 +#define MFI_CMD_DCMD 0x05 +#define MFI_CMD_ABORT 0x06 +#define MFI_CMD_SMP 0x07 +#define MFI_CMD_STP 0x08 + +#define MR_DCMD_CTRL_GET_INFO 0x01010000 + +#define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000 +#define MR_FLUSH_CTRL_CACHE 0x01 +#define MR_FLUSH_DISK_CACHE 0x02 + +#define MR_DCMD_CTRL_SHUTDOWN 0x01050000 +#define MR_ENABLE_DRIVE_SPINDOWN 0x01 + +#define MR_DCMD_CTRL_EVENT_GET_INFO 0x01040100 +#define MR_DCMD_CTRL_EVENT_GET 0x01040300 +#define MR_DCMD_CTRL_EVENT_WAIT 0x01040500 +#define MR_DCMD_LD_GET_PROPERTIES 0x03030000 + +#define MR_DCMD_CLUSTER 0x08000000 +#define MR_DCMD_CLUSTER_RESET_ALL 0x08010100 +#define MR_DCMD_CLUSTER_RESET_LD 0x08010200 + +/** + * MFI command completion codes + */ +enum MFI_STAT { + MFI_STAT_OK = 0x00, + MFI_STAT_INVALID_CMD = 0x01, + MFI_STAT_INVALID_DCMD = 0x02, + MFI_STAT_INVALID_PARAMETER = 0x03, + MFI_STAT_INVALID_SEQUENCE_NUMBER = 0x04, + MFI_STAT_ABORT_NOT_POSSIBLE = 0x05, + MFI_STAT_APP_HOST_CODE_NOT_FOUND = 0x06, + MFI_STAT_APP_IN_USE = 0x07, + MFI_STAT_APP_NOT_INITIALIZED = 0x08, + MFI_STAT_ARRAY_INDEX_INVALID = 0x09, + MFI_STAT_ARRAY_ROW_NOT_EMPTY = 0x0a, + MFI_STAT_CONFIG_RESOURCE_CONFLICT = 0x0b, + MFI_STAT_DEVICE_NOT_FOUND = 0x0c, + MFI_STAT_DRIVE_TOO_SMALL = 0x0d, + MFI_STAT_FLASH_ALLOC_FAIL = 0x0e, + MFI_STAT_FLASH_BUSY = 0x0f, + MFI_STAT_FLASH_ERROR = 0x10, + MFI_STAT_FLASH_IMAGE_BAD = 0x11, + MFI_STAT_FLASH_IMAGE_INCOMPLETE = 0x12, + MFI_STAT_FLASH_NOT_OPEN = 0x13, + MFI_STAT_FLASH_NOT_STARTED = 0x14, + MFI_STAT_FLUSH_FAILED = 0x15, + MFI_STAT_HOST_CODE_NOT_FOUNT = 0x16, + MFI_STAT_LD_CC_IN_PROGRESS = 0x17, + MFI_STAT_LD_INIT_IN_PROGRESS = 0x18, + MFI_STAT_LD_LBA_OUT_OF_RANGE = 0x19, + MFI_STAT_LD_MAX_CONFIGURED = 0x1a, + MFI_STAT_LD_NOT_OPTIMAL = 0x1b, + MFI_STAT_LD_RBLD_IN_PROGRESS = 0x1c, + MFI_STAT_LD_RECON_IN_PROGRESS = 0x1d, + MFI_STAT_LD_WRONG_RAID_LEVEL = 0x1e, + MFI_STAT_MAX_SPARES_EXCEEDED = 0x1f, + MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20, + MFI_STAT_MFC_HW_ERROR = 0x21, + MFI_STAT_NO_HW_PRESENT = 0x22, + MFI_STAT_NOT_FOUND = 0x23, + MFI_STAT_NOT_IN_ENCL = 0x24, + MFI_STAT_PD_CLEAR_IN_PROGRESS = 0x25, + MFI_STAT_PD_TYPE_WRONG = 0x26, + MFI_STAT_PR_DISABLED = 0x27, + MFI_STAT_ROW_INDEX_INVALID = 0x28, + MFI_STAT_SAS_CONFIG_INVALID_ACTION = 0x29, + MFI_STAT_SAS_CONFIG_INVALID_DATA = 0x2a, + MFI_STAT_SAS_CONFIG_INVALID_PAGE = 0x2b, + MFI_STAT_SAS_CONFIG_INVALID_TYPE = 0x2c, + MFI_STAT_SCSI_DONE_WITH_ERROR = 0x2d, + MFI_STAT_SCSI_IO_FAILED = 0x2e, + MFI_STAT_SCSI_RESERVATION_CONFLICT = 0x2f, + MFI_STAT_SHUTDOWN_FAILED = 0x30, + MFI_STAT_TIME_NOT_SET = 0x31, + MFI_STAT_WRONG_STATE = 0x32, + MFI_STAT_LD_OFFLINE = 0x33, + MFI_STAT_PEER_NOTIFICATION_REJECTED = 0x34, + MFI_STAT_PEER_NOTIFICATION_FAILED = 0x35, + MFI_STAT_RESERVATION_IN_PROGRESS = 0x36, + MFI_STAT_I2C_ERRORS_DETECTED = 0x37, + MFI_STAT_PCI_ERRORS_DETECTED = 0x38, + + MFI_STAT_INVALID_STATUS = 0xFF +}; + +/* + * Number of mailbox bytes in DCMD message frame + */ +#define MFI_MBOX_SIZE 12 + +enum MR_EVT_CLASS { + + MR_EVT_CLASS_DEBUG = -2, + MR_EVT_CLASS_PROGRESS = -1, + MR_EVT_CLASS_INFO = 0, + MR_EVT_CLASS_WARNING = 1, + MR_EVT_CLASS_CRITICAL = 2, + MR_EVT_CLASS_FATAL = 3, + MR_EVT_CLASS_DEAD = 4, + +}; + +enum MR_EVT_LOCALE { + + MR_EVT_LOCALE_LD = 0x0001, + MR_EVT_LOCALE_PD = 0x0002, + MR_EVT_LOCALE_ENCL = 0x0004, + MR_EVT_LOCALE_BBU = 0x0008, + MR_EVT_LOCALE_SAS = 0x0010, + MR_EVT_LOCALE_CTRL = 0x0020, + MR_EVT_LOCALE_CONFIG = 0x0040, + MR_EVT_LOCALE_CLUSTER = 0x0080, + MR_EVT_LOCALE_ALL = 0xffff, + +}; + +enum MR_EVT_ARGS { + + MR_EVT_ARGS_NONE, + MR_EVT_ARGS_CDB_SENSE, + MR_EVT_ARGS_LD, + MR_EVT_ARGS_LD_COUNT, + MR_EVT_ARGS_LD_LBA, + MR_EVT_ARGS_LD_OWNER, + MR_EVT_ARGS_LD_LBA_PD_LBA, + MR_EVT_ARGS_LD_PROG, + MR_EVT_ARGS_LD_STATE, + MR_EVT_ARGS_LD_STRIP, + MR_EVT_ARGS_PD, + MR_EVT_ARGS_PD_ERR, + MR_EVT_ARGS_PD_LBA, + MR_EVT_ARGS_PD_LBA_LD, + MR_EVT_ARGS_PD_PROG, + MR_EVT_ARGS_PD_STATE, + MR_EVT_ARGS_PCI, + MR_EVT_ARGS_RATE, + MR_EVT_ARGS_STR, + MR_EVT_ARGS_TIME, + MR_EVT_ARGS_ECC, + +}; + +/* + * SAS controller properties + */ +struct megasas_ctrl_prop { + + u16 seq_num; + u16 pred_fail_poll_interval; + u16 intr_throttle_count; + u16 intr_throttle_timeouts; + u8 rebuild_rate; + u8 patrol_read_rate; + u8 bgi_rate; + u8 cc_rate; + u8 recon_rate; + u8 cache_flush_interval; + u8 spinup_drv_count; + u8 spinup_delay; + u8 cluster_enable; + u8 coercion_mode; + u8 alarm_enable; + u8 disable_auto_rebuild; + u8 disable_battery_warn; + u8 ecc_bucket_size; + u16 ecc_bucket_leak_rate; + u8 restore_hotspare_on_insertion; + u8 expose_encl_devices; + u8 reserved[38]; + +} __attribute__ ((packed)); + +/* + * SAS controller information + */ +struct megasas_ctrl_info { + + /* + * PCI device information + */ + struct { + + u16 vendor_id; + u16 device_id; + u16 sub_vendor_id; + u16 sub_device_id; + u8 reserved[24]; + + } __attribute__ ((packed)) pci; + + /* + * Host interface information + */ + struct { + + u8 PCIX:1; + u8 PCIE:1; + u8 iSCSI:1; + u8 SAS_3G:1; + u8 reserved_0:4; + u8 reserved_1[6]; + u8 port_count; + u64 port_addr[8]; + + } __attribute__ ((packed)) host_interface; + + /* + * Device (backend) interface information + */ + struct { + + u8 SPI:1; + u8 SAS_3G:1; + u8 SATA_1_5G:1; + u8 SATA_3G:1; + u8 reserved_0:4; + u8 reserved_1[6]; + u8 port_count; + u64 port_addr[8]; + + } __attribute__ ((packed)) device_interface; + + /* + * List of components residing in flash. All str are null terminated + */ + u32 image_check_word; + u32 image_component_count; + + struct { + + char name[8]; + char version[32]; + char build_date[16]; + char built_time[16]; + + } __attribute__ ((packed)) image_component[8]; + + /* + * List of flash components that have been flashed on the card, but + * are not in use, pending reset of the adapter. This list will be + * empty if a flash operation has not occurred. All stings are null + * terminated + */ + u32 pending_image_component_count; + + struct { + + char name[8]; + char version[32]; + char build_date[16]; + char build_time[16]; + + } __attribute__ ((packed)) pending_image_component[8]; + + u8 max_arms; + u8 max_spans; + u8 max_arrays; + u8 max_lds; + + char product_name[80]; + char serial_no[32]; + + /* + * Other physical/controller/operation information. Indicates the + * presence of the hardware + */ + struct { + + u32 bbu:1; + u32 alarm:1; + u32 nvram:1; + u32 uart:1; + u32 reserved:28; + + } __attribute__ ((packed)) hw_present; + + u32 current_fw_time; + + /* + * Maximum data transfer sizes + */ + u16 max_concurrent_cmds; + u16 max_sge_count; + u32 max_request_size; + + /* + * Logical and physical device counts + */ + u16 ld_present_count; + u16 ld_degraded_count; + u16 ld_offline_count; + + u16 pd_present_count; + u16 pd_disk_present_count; + u16 pd_disk_pred_failure_count; + u16 pd_disk_failed_count; + + /* + * Memory size information + */ + u16 nvram_size; + u16 memory_size; + u16 flash_size; + + /* + * Error counters + */ + u16 mem_correctable_error_count; + u16 mem_uncorrectable_error_count; + + /* + * Cluster information + */ + u8 cluster_permitted; + u8 cluster_active; + + /* + * Additional max data transfer sizes + */ + u16 max_strips_per_io; + + /* + * Controller capabilities structures + */ + struct { + + u32 raid_level_0:1; + u32 raid_level_1:1; + u32 raid_level_5:1; + u32 raid_level_1E:1; + u32 raid_level_6:1; + u32 reserved:27; + + } __attribute__ ((packed)) raid_levels; + + struct { + + u32 rbld_rate:1; + u32 cc_rate:1; + u32 bgi_rate:1; + u32 recon_rate:1; + u32 patrol_rate:1; + u32 alarm_control:1; + u32 cluster_supported:1; + u32 bbu:1; + u32 spanning_allowed:1; + u32 dedicated_hotspares:1; + u32 revertible_hotspares:1; + u32 foreign_config_import:1; + u32 self_diagnostic:1; + u32 mixed_redundancy_arr:1; + u32 global_hot_spares:1; + u32 reserved:17; + + } __attribute__ ((packed)) adapter_operations; + + struct { + + u32 read_policy:1; + u32 write_policy:1; + u32 io_policy:1; + u32 access_policy:1; + u32 disk_cache_policy:1; + u32 reserved:27; + + } __attribute__ ((packed)) ld_operations; + + struct { + + u8 min; + u8 max; + u8 reserved[2]; + + } __attribute__ ((packed)) stripe_sz_ops; + + struct { + + u32 force_online:1; + u32 force_offline:1; + u32 force_rebuild:1; + u32 reserved:29; + + } __attribute__ ((packed)) pd_operations; + + struct { + + u32 ctrl_supports_sas:1; + u32 ctrl_supports_sata:1; + u32 allow_mix_in_encl:1; + u32 allow_mix_in_ld:1; + u32 allow_sata_in_cluster:1; + u32 reserved:27; + + } __attribute__ ((packed)) pd_mix_support; + + /* + * Define ECC single-bit-error bucket information + */ + u8 ecc_bucket_count; + u8 reserved_2[11]; + + /* + * Include the controller properties (changeable items) + */ + struct megasas_ctrl_prop properties; + + /* + * Define FW pkg version (set in envt v'bles on OEM basis) + */ + char package_version[0x60]; + + u8 pad[0x800 - 0x6a0]; + +} __attribute__ ((packed)); + +/* + * =============================== + * MegaRAID SAS driver definitions + * =============================== + */ +#define MEGASAS_MAX_PD_CHANNELS 2 +#define MEGASAS_MAX_LD_CHANNELS 2 +#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \ + MEGASAS_MAX_LD_CHANNELS) +#define MEGASAS_MAX_DEV_PER_CHANNEL 128 +#define MEGASAS_DEFAULT_INIT_ID -1 +#define MEGASAS_MAX_LUN 8 +#define MEGASAS_MAX_LD 64 + +/* + * When SCSI mid-layer calls driver's reset routine, driver waits for + * MEGASAS_RESET_WAIT_TIME seconds for all outstanding IO to complete. Note + * that the driver cannot _actually_ abort or reset pending commands. While + * it is waiting for the commands to complete, it prints a diagnostic message + * every MEGASAS_RESET_NOTICE_INTERVAL seconds + */ +#define MEGASAS_RESET_WAIT_TIME 180 +#define MEGASAS_RESET_NOTICE_INTERVAL 5 + +#define MEGASAS_IOCTL_CMD 0 + +/* + * FW reports the maximum of number of commands that it can accept (maximum + * commands that can be outstanding) at any time. The driver must report a + * lower number to the mid layer because it can issue a few internal commands + * itself (E.g, AEN, abort cmd, IOCTLs etc). The number of commands it needs + * is shown below + */ +#define MEGASAS_INT_CMDS 32 + +/* + * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit + * SGLs based on the size of dma_addr_t + */ +#define IS_DMA64 (sizeof(dma_addr_t) == 8) + +#define MFI_OB_INTR_STATUS_MASK 0x00000002 +#define MFI_POLL_TIMEOUT_SECS 10 + +struct megasas_register_set { + + u32 reserved_0[4]; /*0000h */ + + u32 inbound_msg_0; /*0010h */ + u32 inbound_msg_1; /*0014h */ + u32 outbound_msg_0; /*0018h */ + u32 outbound_msg_1; /*001Ch */ + + u32 inbound_doorbell; /*0020h */ + u32 inbound_intr_status; /*0024h */ + u32 inbound_intr_mask; /*0028h */ + + u32 outbound_doorbell; /*002Ch */ + u32 outbound_intr_status; /*0030h */ + u32 outbound_intr_mask; /*0034h */ + + u32 reserved_1[2]; /*0038h */ + + u32 inbound_queue_port; /*0040h */ + u32 outbound_queue_port; /*0044h */ + + u32 reserved_2; /*004Ch */ + + u32 index_registers[1004]; /*0050h */ + +} __attribute__ ((packed)); + +struct megasas_sge32 { + + u32 phys_addr; + u32 length; + +} __attribute__ ((packed)); + +struct megasas_sge64 { + + u64 phys_addr; + u32 length; + +} __attribute__ ((packed)); + +union megasas_sgl { + + struct megasas_sge32 sge32[1]; + struct megasas_sge64 sge64[1]; + +} __attribute__ ((packed)); + +struct megasas_header { + + u8 cmd; /*00h */ + u8 sense_len; /*01h */ + u8 cmd_status; /*02h */ + u8 scsi_status; /*03h */ + + u8 target_id; /*04h */ + u8 lun; /*05h */ + u8 cdb_len; /*06h */ + u8 sge_count; /*07h */ + + u32 context; /*08h */ + u32 pad_0; /*0Ch */ + + u16 flags; /*10h */ + u16 timeout; /*12h */ + u32 data_xferlen; /*14h */ + +} __attribute__ ((packed)); + +union megasas_sgl_frame { + + struct megasas_sge32 sge32[8]; + struct megasas_sge64 sge64[5]; + +} __attribute__ ((packed)); + +struct megasas_init_frame { + + u8 cmd; /*00h */ + u8 reserved_0; /*01h */ + u8 cmd_status; /*02h */ + + u8 reserved_1; /*03h */ + u32 reserved_2; /*04h */ + + u32 context; /*08h */ + u32 pad_0; /*0Ch */ + + u16 flags; /*10h */ + u16 reserved_3; /*12h */ + u32 data_xfer_len; /*14h */ + + u32 queue_info_new_phys_addr_lo; /*18h */ + u32 queue_info_new_phys_addr_hi; /*1Ch */ + u32 queue_info_old_phys_addr_lo; /*20h */ + u32 queue_info_old_phys_addr_hi; /*24h */ + + u32 reserved_4[6]; /*28h */ + +} __attribute__ ((packed)); + +struct megasas_init_queue_info { + + u32 init_flags; /*00h */ + u32 reply_queue_entries; /*04h */ + + u32 reply_queue_start_phys_addr_lo; /*08h */ + u32 reply_queue_start_phys_addr_hi; /*0Ch */ + u32 producer_index_phys_addr_lo; /*10h */ + u32 producer_index_phys_addr_hi; /*14h */ + u32 consumer_index_phys_addr_lo; /*18h */ + u32 consumer_index_phys_addr_hi; /*1Ch */ + +} __attribute__ ((packed)); + +struct megasas_io_frame { + + u8 cmd; /*00h */ + u8 sense_len; /*01h */ + u8 cmd_status; /*02h */ + u8 scsi_status; /*03h */ + + u8 target_id; /*04h */ + u8 access_byte; /*05h */ + u8 reserved_0; /*06h */ + u8 sge_count; /*07h */ + + u32 context; /*08h */ + u32 pad_0; /*0Ch */ + + u16 flags; /*10h */ + u16 timeout; /*12h */ + u32 lba_count; /*14h */ + + u32 sense_buf_phys_addr_lo; /*18h */ + u32 sense_buf_phys_addr_hi; /*1Ch */ + + u32 start_lba_lo; /*20h */ + u32 start_lba_hi; /*24h */ + + union megasas_sgl sgl; /*28h */ + +} __attribute__ ((packed)); + +struct megasas_pthru_frame { + + u8 cmd; /*00h */ + u8 sense_len; /*01h */ + u8 cmd_status; /*02h */ + u8 scsi_status; /*03h */ + + u8 target_id; /*04h */ + u8 lun; /*05h */ + u8 cdb_len; /*06h */ + u8 sge_count; /*07h */ + + u32 context; /*08h */ + u32 pad_0; /*0Ch */ + + u16 flags; /*10h */ + u16 timeout; /*12h */ + u32 data_xfer_len; /*14h */ + + u32 sense_buf_phys_addr_lo; /*18h */ + u32 sense_buf_phys_addr_hi; /*1Ch */ + + u8 cdb[16]; /*20h */ + union megasas_sgl sgl; /*30h */ + +} __attribute__ ((packed)); + +struct megasas_dcmd_frame { + + u8 cmd; /*00h */ + u8 reserved_0; /*01h */ + u8 cmd_status; /*02h */ + u8 reserved_1[4]; /*03h */ + u8 sge_count; /*07h */ + + u32 context; /*08h */ + u32 pad_0; /*0Ch */ + + u16 flags; /*10h */ + u16 timeout; /*12h */ + + u32 data_xfer_len; /*14h */ + u32 opcode; /*18h */ + + union { /*1Ch */ + u8 b[12]; + u16 s[6]; + u32 w[3]; + } mbox; + + union megasas_sgl sgl; /*28h */ + +} __attribute__ ((packed)); + +struct megasas_abort_frame { + + u8 cmd; /*00h */ + u8 reserved_0; /*01h */ + u8 cmd_status; /*02h */ + + u8 reserved_1; /*03h */ + u32 reserved_2; /*04h */ + + u32 context; /*08h */ + u32 pad_0; /*0Ch */ + + u16 flags; /*10h */ + u16 reserved_3; /*12h */ + u32 reserved_4; /*14h */ + + u32 abort_context; /*18h */ + u32 pad_1; /*1Ch */ + + u32 abort_mfi_phys_addr_lo; /*20h */ + u32 abort_mfi_phys_addr_hi; /*24h */ + + u32 reserved_5[6]; /*28h */ + +} __attribute__ ((packed)); + +struct megasas_smp_frame { + + u8 cmd; /*00h */ + u8 reserved_1; /*01h */ + u8 cmd_status; /*02h */ + u8 connection_status; /*03h */ + + u8 reserved_2[3]; /*04h */ + u8 sge_count; /*07h */ + + u32 context; /*08h */ + u32 pad_0; /*0Ch */ + + u16 flags; /*10h */ + u16 timeout; /*12h */ + + u32 data_xfer_len; /*14h */ + u64 sas_addr; /*18h */ + + union { + struct megasas_sge32 sge32[2]; /* [0]: resp [1]: req */ + struct megasas_sge64 sge64[2]; /* [0]: resp [1]: req */ + } sgl; + +} __attribute__ ((packed)); + +struct megasas_stp_frame { + + u8 cmd; /*00h */ + u8 reserved_1; /*01h */ + u8 cmd_status; /*02h */ + u8 reserved_2; /*03h */ + + u8 target_id; /*04h */ + u8 reserved_3[2]; /*05h */ + u8 sge_count; /*07h */ + + u32 context; /*08h */ + u32 pad_0; /*0Ch */ + + u16 flags; /*10h */ + u16 timeout; /*12h */ + + u32 data_xfer_len; /*14h */ + + u16 fis[10]; /*18h */ + u32 stp_flags; + + union { + struct megasas_sge32 sge32[2]; /* [0]: resp [1]: data */ + struct megasas_sge64 sge64[2]; /* [0]: resp [1]: data */ + } sgl; + +} __attribute__ ((packed)); + +union megasas_frame { + + struct megasas_header hdr; + struct megasas_init_frame init; + struct megasas_io_frame io; + struct megasas_pthru_frame pthru; + struct megasas_dcmd_frame dcmd; + struct megasas_abort_frame abort; + struct megasas_smp_frame smp; + struct megasas_stp_frame stp; + + u8 raw_bytes[64]; +}; + +struct megasas_cmd; + +union megasas_evt_class_locale { + + struct { + u16 locale; + u8 reserved; + s8 class; + } __attribute__ ((packed)) members; + + u32 word; + +} __attribute__ ((packed)); + +struct megasas_evt_log_info { + u32 newest_seq_num; + u32 oldest_seq_num; + u32 clear_seq_num; + u32 shutdown_seq_num; + u32 boot_seq_num; + +} __attribute__ ((packed)); + +struct megasas_progress { + + u16 progress; + u16 elapsed_seconds; + +} __attribute__ ((packed)); + +struct megasas_evtarg_ld { + + u16 target_id; + u8 ld_index; + u8 reserved; + +} __attribute__ ((packed)); + +struct megasas_evtarg_pd { + u16 device_id; + u8 encl_index; + u8 slot_number; + +} __attribute__ ((packed)); + +struct megasas_evt_detail { + + u32 seq_num; + u32 time_stamp; + u32 code; + union megasas_evt_class_locale cl; + u8 arg_type; + u8 reserved1[15]; + + union { + struct { + struct megasas_evtarg_pd pd; + u8 cdb_length; + u8 sense_length; + u8 reserved[2]; + u8 cdb[16]; + u8 sense[64]; + } __attribute__ ((packed)) cdbSense; + + struct megasas_evtarg_ld ld; + + struct { + struct megasas_evtarg_ld ld; + u64 count; + } __attribute__ ((packed)) ld_count; + + struct { + u64 lba; + struct megasas_evtarg_ld ld; + } __attribute__ ((packed)) ld_lba; + + struct { + struct megasas_evtarg_ld ld; + u32 prevOwner; + u32 newOwner; + } __attribute__ ((packed)) ld_owner; + + struct { + u64 ld_lba; + u64 pd_lba; + struct megasas_evtarg_ld ld; + struct megasas_evtarg_pd pd; + } __attribute__ ((packed)) ld_lba_pd_lba; + + struct { + struct megasas_evtarg_ld ld; + struct megasas_progress prog; + } __attribute__ ((packed)) ld_prog; + + struct { + struct megasas_evtarg_ld ld; + u32 prev_state; + u32 new_state; + } __attribute__ ((packed)) ld_state; + + struct { + u64 strip; + struct megasas_evtarg_ld ld; + } __attribute__ ((packed)) ld_strip; + + struct megasas_evtarg_pd pd; + + struct { + struct megasas_evtarg_pd pd; + u32 err; + } __attribute__ ((packed)) pd_err; + + struct { + u64 lba; + struct megasas_evtarg_pd pd; + } __attribute__ ((packed)) pd_lba; + + struct { + u64 lba; + struct megasas_evtarg_pd pd; + struct megasas_evtarg_ld ld; + } __attribute__ ((packed)) pd_lba_ld; + + struct { + struct megasas_evtarg_pd pd; + struct megasas_progress prog; + } __attribute__ ((packed)) pd_prog; + + struct { + struct megasas_evtarg_pd pd; + u32 prevState; + u32 newState; + } __attribute__ ((packed)) pd_state; + + struct { + u16 vendorId; + u16 deviceId; + u16 subVendorId; + u16 subDeviceId; + } __attribute__ ((packed)) pci; + + u32 rate; + char str[96]; + + struct { + u32 rtc; + u32 elapsedSeconds; + } __attribute__ ((packed)) time; + + struct { + u32 ecar; + u32 elog; + char str[64]; + } __attribute__ ((packed)) ecc; + + u8 b[96]; + u16 s[48]; + u32 w[24]; + u64 d[12]; + } args; + + char description[128]; + +} __attribute__ ((packed)); + +struct megasas_instance { + + u32 *producer; + dma_addr_t producer_h; + u32 *consumer; + dma_addr_t consumer_h; + + u32 *reply_queue; + dma_addr_t reply_queue_h; + + unsigned long base_addr; + struct megasas_register_set __iomem *reg_set; + + s8 init_id; + u8 reserved[3]; + + u16 max_num_sge; + u16 max_fw_cmds; + u32 max_sectors_per_req; + + struct megasas_cmd **cmd_list; + struct list_head cmd_pool; + spinlock_t cmd_pool_lock; + struct dma_pool *frame_dma_pool; + struct dma_pool *sense_dma_pool; + + struct megasas_evt_detail *evt_detail; + dma_addr_t evt_detail_h; + struct megasas_cmd *aen_cmd; + struct semaphore aen_mutex; + struct semaphore ioctl_sem; + + struct Scsi_Host *host; + + wait_queue_head_t int_cmd_wait_q; + wait_queue_head_t abort_cmd_wait_q; + + struct pci_dev *pdev; + u32 unique_id; + + u32 fw_outstanding; + u32 hw_crit_error; + spinlock_t instance_lock; +}; + +#define MEGASAS_IS_LOGICAL(scp) \ + (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1 + +#define MEGASAS_DEV_INDEX(inst, scp) \ + ((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + \ + scp->device->id + +struct megasas_cmd { + + union megasas_frame *frame; + dma_addr_t frame_phys_addr; + u8 *sense; + dma_addr_t sense_phys_addr; + + u32 index; + u8 sync_cmd; + u8 cmd_status; + u16 abort_aen; + + struct list_head list; + struct scsi_cmnd *scmd; + struct megasas_instance *instance; + u32 frame_count; +}; + +#define MAX_MGMT_ADAPTERS 1024 +#define MAX_IOCTL_SGE 16 + +struct megasas_iocpacket { + + u16 host_no; + u16 __pad1; + u32 sgl_off; + u32 sge_count; + u32 sense_off; + u32 sense_len; + union { + u8 raw[128]; + struct megasas_header hdr; + } frame; + + struct iovec sgl[MAX_IOCTL_SGE]; + +} __attribute__ ((packed)); + +struct megasas_aen { + u16 host_no; + u16 __pad1; + u32 seq_num; + u32 class_locale_word; +} __attribute__ ((packed)); + +#ifdef CONFIG_COMPAT +struct compat_megasas_iocpacket { + u16 host_no; + u16 __pad1; + u32 sgl_off; + u32 sge_count; + u32 sense_off; + u32 sense_len; + union { + u8 raw[128]; + struct megasas_header hdr; + } frame; + struct compat_iovec sgl[MAX_IOCTL_SGE]; +} __attribute__ ((packed)); + +#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct compat_megasas_iocpacket) +#else +#define MEGASAS_IOC_FIRMWARE _IOWR('M', 1, struct megasas_iocpacket) +#endif + +#define MEGASAS_IOC_GET_AEN _IOW('M', 3, struct megasas_aen) + +struct megasas_mgmt_info { + + u16 count; + struct megasas_instance *instance[MAX_MGMT_ADAPTERS]; + int max_index; +}; + +#endif /*LSI_MEGARAID_SAS_H */ diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index a4857db4f9b8..b235556b7b65 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c @@ -1959,22 +1959,35 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match) /* Set it up */ mesh_init(ms); - /* XXX FIXME: error should be fatal */ - if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) + /* Request interrupt */ + if (request_irq(ms->meshintr, do_mesh_interrupt, 0, "MESH", ms)) { printk(KERN_ERR "MESH: can't get irq %d\n", ms->meshintr); + goto out_shutdown; + } - /* XXX FIXME: handle failure */ - scsi_add_host(mesh_host, &mdev->ofdev.dev); + /* Add scsi host & scan */ + if (scsi_add_host(mesh_host, &mdev->ofdev.dev)) + goto out_release_irq; scsi_scan_host(mesh_host); return 0; -out_unmap: + out_release_irq: + free_irq(ms->meshintr, ms); + out_shutdown: + /* shutdown & reset bus in case of error or macos can be confused + * at reboot if the bus was set to synchronous mode already + */ + mesh_shutdown(mdev); + set_mesh_power(ms, 0); + pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size, + ms->dma_cmd_space, ms->dma_cmd_bus); + out_unmap: iounmap(ms->dma); iounmap(ms->mesh); -out_free: + out_free: scsi_host_put(mesh_host); -out_release: + out_release: macio_release_resources(mdev); return -ENODEV; @@ -2001,7 +2014,7 @@ static int mesh_remove(struct macio_dev *mdev) /* Free DMA commands memory */ pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size, - ms->dma_cmd_space, ms->dma_cmd_bus); + ms->dma_cmd_space, ms->dma_cmd_bus); /* Release memory resources */ macio_release_resources(mdev); diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 3f2f2464fa63..af1133104b3f 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -5146,7 +5146,8 @@ static long osst_compat_ioctl(struct file * file, unsigned int cmd_in, unsigned /* Try to allocate a new tape buffer skeleton. Caller must not hold os_scsi_tapes_lock */ static struct osst_buffer * new_tape_buffer( int from_initialization, int need_dma, int max_sg ) { - int i, priority; + int i; + gfp_t priority; struct osst_buffer *tb; if (from_initialization) @@ -5178,7 +5179,8 @@ static struct osst_buffer * new_tape_buffer( int from_initialization, int need_d /* Try to allocate a temporary (while a user has the device open) enlarged tape buffer */ static int enlarge_buffer(struct osst_buffer *STbuffer, int need_dma) { - int segs, nbr, max_segs, b_size, priority, order, got; + int segs, nbr, max_segs, b_size, order, got; + gfp_t priority; if (STbuffer->buffer_size >= OS_FRAME_SIZE) return 1; diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c new file mode 100644 index 000000000000..9820f272f889 --- /dev/null +++ b/drivers/scsi/pdc_adma.c @@ -0,0 +1,739 @@ +/* + * pdc_adma.c - Pacific Digital Corporation ADMA + * + * Maintained by: Mark Lord + * + * Copyright 2005 Mark Lord + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * + * libata documentation is available via 'make {ps|pdf}docs', + * as Documentation/DocBook/libata.* + * + * + * Supports ATA disks in single-packet ADMA mode. + * Uses PIO for everything else. + * + * TODO: Use ADMA transfers for ATAPI devices, when possible. + * This requires careful attention to a number of quirks of the chip. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include +#include +#include + +#define DRV_NAME "pdc_adma" +#define DRV_VERSION "0.01" + +/* macro to calculate base address for ATA regs */ +#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40)) + +/* macro to calculate base address for ADMA regs */ +#define ADMA_REGS(base,port_no) ((base) + 0x80 + ((port_no) * 0x20)) + +enum { + ADMA_PORTS = 2, + ADMA_CPB_BYTES = 40, + ADMA_PRD_BYTES = LIBATA_MAX_PRD * 16, + ADMA_PKT_BYTES = ADMA_CPB_BYTES + ADMA_PRD_BYTES, + + ADMA_DMA_BOUNDARY = 0xffffffff, + + /* global register offsets */ + ADMA_MODE_LOCK = 0x00c7, + + /* per-channel register offsets */ + ADMA_CONTROL = 0x0000, /* ADMA control */ + ADMA_STATUS = 0x0002, /* ADMA status */ + ADMA_CPB_COUNT = 0x0004, /* CPB count */ + ADMA_CPB_CURRENT = 0x000c, /* current CPB address */ + ADMA_CPB_NEXT = 0x000c, /* next CPB address */ + ADMA_CPB_LOOKUP = 0x0010, /* CPB lookup table */ + ADMA_FIFO_IN = 0x0014, /* input FIFO threshold */ + ADMA_FIFO_OUT = 0x0016, /* output FIFO threshold */ + + /* ADMA_CONTROL register bits */ + aNIEN = (1 << 8), /* irq mask: 1==masked */ + aGO = (1 << 7), /* packet trigger ("Go!") */ + aRSTADM = (1 << 5), /* ADMA logic reset */ + aRSTA = (1 << 2), /* ATA hard reset */ + aPIOMD4 = 0x0003, /* PIO mode 4 */ + + /* ADMA_STATUS register bits */ + aPSD = (1 << 6), + aUIRQ = (1 << 4), + aPERR = (1 << 0), + + /* CPB bits */ + cDONE = (1 << 0), + cVLD = (1 << 0), + cDAT = (1 << 2), + cIEN = (1 << 3), + + /* PRD bits */ + pORD = (1 << 4), + pDIRO = (1 << 5), + pEND = (1 << 7), + + /* ATA register flags */ + rIGN = (1 << 5), + rEND = (1 << 7), + + /* ATA register addresses */ + ADMA_REGS_CONTROL = 0x0e, + ADMA_REGS_SECTOR_COUNT = 0x12, + ADMA_REGS_LBA_LOW = 0x13, + ADMA_REGS_LBA_MID = 0x14, + ADMA_REGS_LBA_HIGH = 0x15, + ADMA_REGS_DEVICE = 0x16, + ADMA_REGS_COMMAND = 0x17, + + /* PCI device IDs */ + board_1841_idx = 0, /* ADMA 2-port controller */ +}; + +typedef enum { adma_state_idle, adma_state_pkt, adma_state_mmio } adma_state_t; + +struct adma_port_priv { + u8 *pkt; + dma_addr_t pkt_dma; + adma_state_t state; +}; + +static int adma_ata_init_one (struct pci_dev *pdev, + const struct pci_device_id *ent); +static irqreturn_t adma_intr (int irq, void *dev_instance, + struct pt_regs *regs); +static int adma_port_start(struct ata_port *ap); +static void adma_host_stop(struct ata_host_set *host_set); +static void adma_port_stop(struct ata_port *ap); +static void adma_phy_reset(struct ata_port *ap); +static void adma_qc_prep(struct ata_queued_cmd *qc); +static int adma_qc_issue(struct ata_queued_cmd *qc); +static int adma_check_atapi_dma(struct ata_queued_cmd *qc); +static void adma_bmdma_stop(struct ata_queued_cmd *qc); +static u8 adma_bmdma_status(struct ata_port *ap); +static void adma_irq_clear(struct ata_port *ap); +static void adma_eng_timeout(struct ata_port *ap); + +static Scsi_Host_Template adma_ata_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .ioctl = ata_scsi_ioctl, + .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = LIBATA_MAX_PRD, + .max_sectors = ATA_MAX_SECTORS, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ENABLE_CLUSTERING, + .proc_name = DRV_NAME, + .dma_boundary = ADMA_DMA_BOUNDARY, + .slave_configure = ata_scsi_slave_config, + .bios_param = ata_std_bios_param, +}; + +static const struct ata_port_operations adma_ata_ops = { + .port_disable = ata_port_disable, + .tf_load = ata_tf_load, + .tf_read = ata_tf_read, + .check_status = ata_check_status, + .check_atapi_dma = adma_check_atapi_dma, + .exec_command = ata_exec_command, + .dev_select = ata_std_dev_select, + .phy_reset = adma_phy_reset, + .qc_prep = adma_qc_prep, + .qc_issue = adma_qc_issue, + .eng_timeout = adma_eng_timeout, + .irq_handler = adma_intr, + .irq_clear = adma_irq_clear, + .port_start = adma_port_start, + .port_stop = adma_port_stop, + .host_stop = adma_host_stop, + .bmdma_stop = adma_bmdma_stop, + .bmdma_status = adma_bmdma_status, +}; + +static struct ata_port_info adma_port_info[] = { + /* board_1841_idx */ + { + .sht = &adma_ata_sht, + .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | + ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO, + .pio_mask = 0x10, /* pio4 */ + .udma_mask = 0x1f, /* udma0-4 */ + .port_ops = &adma_ata_ops, + }, +}; + +static struct pci_device_id adma_ata_pci_tbl[] = { + { PCI_VENDOR_ID_PDC, 0x1841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_1841_idx }, + + { } /* terminate list */ +}; + +static struct pci_driver adma_ata_pci_driver = { + .name = DRV_NAME, + .id_table = adma_ata_pci_tbl, + .probe = adma_ata_init_one, + .remove = ata_pci_remove_one, +}; + +static int adma_check_atapi_dma(struct ata_queued_cmd *qc) +{ + return 1; /* ATAPI DMA not yet supported */ +} + +static void adma_bmdma_stop(struct ata_queued_cmd *qc) +{ + /* nothing */ +} + +static u8 adma_bmdma_status(struct ata_port *ap) +{ + return 0; +} + +static void adma_irq_clear(struct ata_port *ap) +{ + /* nothing */ +} + +static void adma_reset_engine(void __iomem *chan) +{ + /* reset ADMA to idle state */ + writew(aPIOMD4 | aNIEN | aRSTADM, chan + ADMA_CONTROL); + udelay(2); + writew(aPIOMD4, chan + ADMA_CONTROL); + udelay(2); +} + +static void adma_reinit_engine(struct ata_port *ap) +{ + struct adma_port_priv *pp = ap->private_data; + void __iomem *mmio_base = ap->host_set->mmio_base; + void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no); + + /* mask/clear ATA interrupts */ + writeb(ATA_NIEN, (void __iomem *)ap->ioaddr.ctl_addr); + ata_check_status(ap); + + /* reset the ADMA engine */ + adma_reset_engine(chan); + + /* set in-FIFO threshold to 0x100 */ + writew(0x100, chan + ADMA_FIFO_IN); + + /* set CPB pointer */ + writel((u32)pp->pkt_dma, chan + ADMA_CPB_NEXT); + + /* set out-FIFO threshold to 0x100 */ + writew(0x100, chan + ADMA_FIFO_OUT); + + /* set CPB count */ + writew(1, chan + ADMA_CPB_COUNT); + + /* read/discard ADMA status */ + readb(chan + ADMA_STATUS); +} + +static inline void adma_enter_reg_mode(struct ata_port *ap) +{ + void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no); + + writew(aPIOMD4, chan + ADMA_CONTROL); + readb(chan + ADMA_STATUS); /* flush */ +} + +static void adma_phy_reset(struct ata_port *ap) +{ + struct adma_port_priv *pp = ap->private_data; + + pp->state = adma_state_idle; + adma_reinit_engine(ap); + ata_port_probe(ap); + ata_bus_reset(ap); +} + +static void adma_eng_timeout(struct ata_port *ap) +{ + struct adma_port_priv *pp = ap->private_data; + + if (pp->state != adma_state_idle) /* healthy paranoia */ + pp->state = adma_state_mmio; + adma_reinit_engine(ap); + ata_eng_timeout(ap); +} + +static int adma_fill_sg(struct ata_queued_cmd *qc) +{ + struct scatterlist *sg = qc->sg; + struct ata_port *ap = qc->ap; + struct adma_port_priv *pp = ap->private_data; + u8 *buf = pp->pkt; + int nelem, i = (2 + buf[3]) * 8; + u8 pFLAGS = pORD | ((qc->tf.flags & ATA_TFLAG_WRITE) ? pDIRO : 0); + + for (nelem = 0; nelem < qc->n_elem; nelem++,sg++) { + u32 addr; + u32 len; + + addr = (u32)sg_dma_address(sg); + *(__le32 *)(buf + i) = cpu_to_le32(addr); + i += 4; + + len = sg_dma_len(sg) >> 3; + *(__le32 *)(buf + i) = cpu_to_le32(len); + i += 4; + + if ((nelem + 1) == qc->n_elem) + pFLAGS |= pEND; + buf[i++] = pFLAGS; + buf[i++] = qc->dev->dma_mode & 0xf; + buf[i++] = 0; /* pPKLW */ + buf[i++] = 0; /* reserved */ + + *(__le32 *)(buf + i) + = (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4); + i += 4; + + VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", nelem, + (unsigned long)addr, len); + } + return i; +} + +static void adma_qc_prep(struct ata_queued_cmd *qc) +{ + struct adma_port_priv *pp = qc->ap->private_data; + u8 *buf = pp->pkt; + u32 pkt_dma = (u32)pp->pkt_dma; + int i = 0; + + VPRINTK("ENTER\n"); + + adma_enter_reg_mode(qc->ap); + if (qc->tf.protocol != ATA_PROT_DMA) { + ata_qc_prep(qc); + return; + } + + buf[i++] = 0; /* Response flags */ + buf[i++] = 0; /* reserved */ + buf[i++] = cVLD | cDAT | cIEN; + i++; /* cLEN, gets filled in below */ + + *(__le32 *)(buf+i) = cpu_to_le32(pkt_dma); /* cNCPB */ + i += 4; /* cNCPB */ + i += 4; /* cPRD, gets filled in below */ + + buf[i++] = 0; /* reserved */ + buf[i++] = 0; /* reserved */ + buf[i++] = 0; /* reserved */ + buf[i++] = 0; /* reserved */ + + /* ATA registers; must be a multiple of 4 */ + buf[i++] = qc->tf.device; + buf[i++] = ADMA_REGS_DEVICE; + if ((qc->tf.flags & ATA_TFLAG_LBA48)) { + buf[i++] = qc->tf.hob_nsect; + buf[i++] = ADMA_REGS_SECTOR_COUNT; + buf[i++] = qc->tf.hob_lbal; + buf[i++] = ADMA_REGS_LBA_LOW; + buf[i++] = qc->tf.hob_lbam; + buf[i++] = ADMA_REGS_LBA_MID; + buf[i++] = qc->tf.hob_lbah; + buf[i++] = ADMA_REGS_LBA_HIGH; + } + buf[i++] = qc->tf.nsect; + buf[i++] = ADMA_REGS_SECTOR_COUNT; + buf[i++] = qc->tf.lbal; + buf[i++] = ADMA_REGS_LBA_LOW; + buf[i++] = qc->tf.lbam; + buf[i++] = ADMA_REGS_LBA_MID; + buf[i++] = qc->tf.lbah; + buf[i++] = ADMA_REGS_LBA_HIGH; + buf[i++] = 0; + buf[i++] = ADMA_REGS_CONTROL; + buf[i++] = rIGN; + buf[i++] = 0; + buf[i++] = qc->tf.command; + buf[i++] = ADMA_REGS_COMMAND | rEND; + + buf[3] = (i >> 3) - 2; /* cLEN */ + *(__le32 *)(buf+8) = cpu_to_le32(pkt_dma + i); /* cPRD */ + + i = adma_fill_sg(qc); + wmb(); /* flush PRDs and pkt to memory */ +#if 0 + /* dump out CPB + PRDs for debug */ + { + int j, len = 0; + static char obuf[2048]; + for (j = 0; j < i; ++j) { + len += sprintf(obuf+len, "%02x ", buf[j]); + if ((j & 7) == 7) { + printk("%s\n", obuf); + len = 0; + } + } + if (len) + printk("%s\n", obuf); + } +#endif +} + +static inline void adma_packet_start(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no); + + VPRINTK("ENTER, ap %p\n", ap); + + /* fire up the ADMA engine */ + writew(aPIOMD4 | aGO, chan + ADMA_CONTROL); +} + +static int adma_qc_issue(struct ata_queued_cmd *qc) +{ + struct adma_port_priv *pp = qc->ap->private_data; + + switch (qc->tf.protocol) { + case ATA_PROT_DMA: + pp->state = adma_state_pkt; + adma_packet_start(qc); + return 0; + + case ATA_PROT_ATAPI_DMA: + BUG(); + break; + + default: + break; + } + + pp->state = adma_state_mmio; + return ata_qc_issue_prot(qc); +} + +static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set) +{ + unsigned int handled = 0, port_no; + u8 __iomem *mmio_base = host_set->mmio_base; + + for (port_no = 0; port_no < host_set->n_ports; ++port_no) { + struct ata_port *ap = host_set->ports[port_no]; + struct adma_port_priv *pp; + struct ata_queued_cmd *qc; + void __iomem *chan = ADMA_REGS(mmio_base, port_no); + u8 drv_stat, status = readb(chan + ADMA_STATUS); + + if (status == 0) + continue; + handled = 1; + adma_enter_reg_mode(ap); + if ((ap->flags & ATA_FLAG_PORT_DISABLED)) + continue; + pp = ap->private_data; + if (!pp || pp->state != adma_state_pkt) + continue; + qc = ata_qc_from_tag(ap, ap->active_tag); + drv_stat = 0; + if ((status & (aPERR | aPSD | aUIRQ))) + drv_stat = ATA_ERR; + else if (pp->pkt[0] != cDONE) + drv_stat = ATA_ERR; + ata_qc_complete(qc, drv_stat); + } + return handled; +} + +static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set) +{ + unsigned int handled = 0, port_no; + + for (port_no = 0; port_no < host_set->n_ports; ++port_no) { + struct ata_port *ap; + ap = host_set->ports[port_no]; + if (ap && (!(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR)))) { + struct ata_queued_cmd *qc; + struct adma_port_priv *pp = ap->private_data; + if (!pp || pp->state != adma_state_mmio) + continue; + qc = ata_qc_from_tag(ap, ap->active_tag); + if (qc && (!(qc->tf.ctl & ATA_NIEN))) { + + /* check main status, clearing INTRQ */ + u8 status = ata_chk_status(ap); + if ((status & ATA_BUSY)) + continue; + DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", + ap->id, qc->tf.protocol, status); + + /* complete taskfile transaction */ + pp->state = adma_state_idle; + ata_qc_complete(qc, status); + handled = 1; + } + } + } + return handled; +} + +static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ata_host_set *host_set = dev_instance; + unsigned int handled = 0; + + VPRINTK("ENTER\n"); + + spin_lock(&host_set->lock); + handled = adma_intr_pkt(host_set) | adma_intr_mmio(host_set); + spin_unlock(&host_set->lock); + + VPRINTK("EXIT\n"); + + return IRQ_RETVAL(handled); +} + +static void adma_ata_setup_port(struct ata_ioports *port, unsigned long base) +{ + port->cmd_addr = + port->data_addr = base + 0x000; + port->error_addr = + port->feature_addr = base + 0x004; + port->nsect_addr = base + 0x008; + port->lbal_addr = base + 0x00c; + port->lbam_addr = base + 0x010; + port->lbah_addr = base + 0x014; + port->device_addr = base + 0x018; + port->status_addr = + port->command_addr = base + 0x01c; + port->altstatus_addr = + port->ctl_addr = base + 0x038; +} + +static int adma_port_start(struct ata_port *ap) +{ + struct device *dev = ap->host_set->dev; + struct adma_port_priv *pp; + int rc; + + rc = ata_port_start(ap); + if (rc) + return rc; + adma_enter_reg_mode(ap); + rc = -ENOMEM; + pp = kcalloc(1, sizeof(*pp), GFP_KERNEL); + if (!pp) + goto err_out; + pp->pkt = dma_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma, + GFP_KERNEL); + if (!pp->pkt) + goto err_out_kfree; + /* paranoia? */ + if ((pp->pkt_dma & 7) != 0) { + printk("bad alignment for pp->pkt_dma: %08x\n", + (u32)pp->pkt_dma); + goto err_out_kfree2; + } + memset(pp->pkt, 0, ADMA_PKT_BYTES); + ap->private_data = pp; + adma_reinit_engine(ap); + return 0; + +err_out_kfree2: + kfree(pp); +err_out_kfree: + kfree(pp); +err_out: + ata_port_stop(ap); + return rc; +} + +static void adma_port_stop(struct ata_port *ap) +{ + struct device *dev = ap->host_set->dev; + struct adma_port_priv *pp = ap->private_data; + + adma_reset_engine(ADMA_REGS(ap->host_set->mmio_base, ap->port_no)); + if (pp != NULL) { + ap->private_data = NULL; + if (pp->pkt != NULL) + dma_free_coherent(dev, ADMA_PKT_BYTES, + pp->pkt, pp->pkt_dma); + kfree(pp); + } + ata_port_stop(ap); +} + +static void adma_host_stop(struct ata_host_set *host_set) +{ + unsigned int port_no; + + for (port_no = 0; port_no < ADMA_PORTS; ++port_no) + adma_reset_engine(ADMA_REGS(host_set->mmio_base, port_no)); + + ata_pci_host_stop(host_set); +} + +static void adma_host_init(unsigned int chip_id, + struct ata_probe_ent *probe_ent) +{ + unsigned int port_no; + void __iomem *mmio_base = probe_ent->mmio_base; + + /* enable/lock aGO operation */ + writeb(7, mmio_base + ADMA_MODE_LOCK); + + /* reset the ADMA logic */ + for (port_no = 0; port_no < ADMA_PORTS; ++port_no) + adma_reset_engine(ADMA_REGS(mmio_base, port_no)); +} + +static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base) +{ + int rc; + + rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (rc) { + printk(KERN_ERR DRV_NAME + "(%s): 32-bit DMA enable failed\n", + pci_name(pdev)); + return rc; + } + rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + if (rc) { + printk(KERN_ERR DRV_NAME + "(%s): 32-bit consistent DMA enable failed\n", + pci_name(pdev)); + return rc; + } + return 0; +} + +static int adma_ata_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + static int printed_version; + struct ata_probe_ent *probe_ent = NULL; + void __iomem *mmio_base; + unsigned int board_idx = (unsigned int) ent->driver_data; + int rc, port_no; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + rc = pci_enable_device(pdev); + if (rc) + return rc; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + + if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0) { + rc = -ENODEV; + goto err_out_regions; + } + + mmio_base = pci_iomap(pdev, 4, 0); + if (mmio_base == NULL) { + rc = -ENOMEM; + goto err_out_regions; + } + + rc = adma_set_dma_masks(pdev, mmio_base); + if (rc) + goto err_out_iounmap; + + probe_ent = kcalloc(1, sizeof(*probe_ent), GFP_KERNEL); + if (probe_ent == NULL) { + rc = -ENOMEM; + goto err_out_iounmap; + } + + probe_ent->dev = pci_dev_to_dev(pdev); + INIT_LIST_HEAD(&probe_ent->node); + + probe_ent->sht = adma_port_info[board_idx].sht; + probe_ent->host_flags = adma_port_info[board_idx].host_flags; + probe_ent->pio_mask = adma_port_info[board_idx].pio_mask; + probe_ent->mwdma_mask = adma_port_info[board_idx].mwdma_mask; + probe_ent->udma_mask = adma_port_info[board_idx].udma_mask; + probe_ent->port_ops = adma_port_info[board_idx].port_ops; + + probe_ent->irq = pdev->irq; + probe_ent->irq_flags = SA_SHIRQ; + probe_ent->mmio_base = mmio_base; + probe_ent->n_ports = ADMA_PORTS; + + for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) { + adma_ata_setup_port(&probe_ent->port[port_no], + ADMA_ATA_REGS((unsigned long)mmio_base, port_no)); + } + + pci_set_master(pdev); + + /* initialize adapter */ + adma_host_init(board_idx, probe_ent); + + rc = ata_device_add(probe_ent); + kfree(probe_ent); + if (rc != ADMA_PORTS) + goto err_out_iounmap; + return 0; + +err_out_iounmap: + pci_iounmap(pdev, mmio_base); +err_out_regions: + pci_release_regions(pdev); +err_out: + pci_disable_device(pdev); + return rc; +} + +static int __init adma_ata_init(void) +{ + return pci_module_init(&adma_ata_pci_driver); +} + +static void __exit adma_ata_exit(void) +{ + pci_unregister_driver(&adma_ata_pci_driver); +} + +MODULE_AUTHOR("Mark Lord"); +MODULE_DESCRIPTION("Pacific Digital Corporation ADMA low-level driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, adma_ata_pci_tbl); +MODULE_VERSION(DRV_VERSION); + +module_init(adma_ata_init); +module_exit(adma_ata_exit); diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 1ed32e7b5472..e451941ad81d 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -52,7 +52,7 @@ extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *); extern int qla24xx_load_risc_flash(scsi_qla_host_t *, uint32_t *); extern int qla24xx_load_risc_hotplug(scsi_qla_host_t *, uint32_t *); -extern fc_port_t *qla2x00_alloc_fcport(scsi_qla_host_t *, int); +extern fc_port_t *qla2x00_alloc_fcport(scsi_qla_host_t *, gfp_t); extern int qla2x00_loop_resync(scsi_qla_host_t *); @@ -277,7 +277,7 @@ extern int qla2x00_fdmi_register(scsi_qla_host_t *); /* * Global Function Prototypes in qla_rscn.c source file. */ -extern fc_port_t *qla2x00_alloc_rscn_fcport(scsi_qla_host_t *, int); +extern fc_port_t *qla2x00_alloc_rscn_fcport(scsi_qla_host_t *, gfp_t); extern int qla2x00_handle_port_rscn(scsi_qla_host_t *, uint32_t, fc_port_t *, int); extern void qla2x00_process_iodesc(scsi_qla_host_t *, struct mbx_entry *); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 23d095d3817b..fbb6feee40cf 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1685,7 +1685,7 @@ qla2x00_nvram_config(scsi_qla_host_t *ha) * Returns a pointer to the allocated fcport, or NULL, if none available. */ fc_port_t * -qla2x00_alloc_fcport(scsi_qla_host_t *ha, int flags) +qla2x00_alloc_fcport(scsi_qla_host_t *ha, gfp_t flags) { fc_port_t *fcport; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 8982978c42fd..7aec93f9d423 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1325,6 +1325,8 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info) ha->brd_info = brd_info; sprintf(ha->host_str, "%s_%ld", ha->brd_info->drv_name, ha->host_no); + ha->dpc_pid = -1; + /* Configure PCI I/O space */ ret = qla2x00_iospace_config(ha); if (ret) @@ -1448,7 +1450,6 @@ int qla2x00_probe_one(struct pci_dev *pdev, struct qla_board_info *brd_info) */ spin_lock_init(&ha->mbx_reg_lock); - ha->dpc_pid = -1; init_completion(&ha->dpc_inited); init_completion(&ha->dpc_exited); diff --git a/drivers/scsi/qla2xxx/qla_rscn.c b/drivers/scsi/qla2xxx/qla_rscn.c index bdc3bc74bbe1..7534efcc8918 100644 --- a/drivers/scsi/qla2xxx/qla_rscn.c +++ b/drivers/scsi/qla2xxx/qla_rscn.c @@ -330,6 +330,8 @@ qla2x00_update_login_fcport(scsi_qla_host_t *ha, struct mbx_entry *mbxstat, fcport->flags &= ~FCF_FAILOVER_NEEDED; fcport->iodesc_idx_sent = IODESC_INVALID_INDEX; atomic_set(&fcport->state, FCS_ONLINE); + if (fcport->rport) + fc_remote_port_unblock(fcport->rport); } @@ -1064,7 +1066,7 @@ qla2x00_send_login_iocb_cb(scsi_qla_host_t *ha, struct io_descriptor *iodesc, * Returns a pointer to the allocated RSCN fcport, or NULL, if none available. */ fc_port_t * -qla2x00_alloc_rscn_fcport(scsi_qla_host_t *ha, int flags) +qla2x00_alloc_rscn_fcport(scsi_qla_host_t *ha, gfp_t flags) { fc_port_t *fcport; diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index a917ab7475ac..1fd5fc6d0fe3 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -1119,6 +1119,36 @@ static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int host->sg_tablesize = QLOGICPTI_MAX_SG(num_free); } +static unsigned int scsi_rbuf_get(struct scsi_cmnd *cmd, unsigned char **buf_out) +{ + unsigned char *buf; + unsigned int buflen; + + if (cmd->use_sg) { + struct scatterlist *sg; + + sg = (struct scatterlist *) cmd->request_buffer; + buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; + buflen = sg->length; + } else { + buf = cmd->request_buffer; + buflen = cmd->request_bufflen; + } + + *buf_out = buf; + return buflen; +} + +static void scsi_rbuf_put(struct scsi_cmnd *cmd, unsigned char *buf) +{ + if (cmd->use_sg) { + struct scatterlist *sg; + + sg = (struct scatterlist *) cmd->request_buffer; + kunmap_atomic(buf - sg->offset, KM_IRQ0); + } +} + /* * Until we scan the entire bus with inquiries, go throught this fella... */ @@ -1145,11 +1175,9 @@ static void ourdone(struct scsi_cmnd *Cmnd) int ok = host_byte(Cmnd->result) == DID_OK; if (Cmnd->cmnd[0] == 0x12 && ok) { unsigned char *iqd; + unsigned int iqd_len; - if (Cmnd->use_sg != 0) - BUG(); - - iqd = ((unsigned char *)Cmnd->buffer); + iqd_len = scsi_rbuf_get(Cmnd, &iqd); /* tags handled in midlayer */ /* enable sync mode? */ @@ -1163,6 +1191,9 @@ static void ourdone(struct scsi_cmnd *Cmnd) if (iqd[7] & 0x20) { qpti->dev_param[tgt].device_flags |= 0x20; } + + scsi_rbuf_put(Cmnd, iqd); + qpti->sbits |= (1 << tgt); } else if (!ok) { qpti->sbits |= (1 << tgt); diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index ea76fe44585e..422e0b6f603a 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c @@ -35,7 +35,7 @@ #include #define DRV_NAME "sata_mv" -#define DRV_VERSION "0.12" +#define DRV_VERSION "0.25" enum { /* BAR's are enumerated in terms of pci_resource_start() terms */ @@ -55,31 +55,61 @@ enum { MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, - MV_Q_CT = 32, - MV_CRQB_SZ = 32, - MV_CRPB_SZ = 8, + MV_USE_Q_DEPTH = ATA_DEF_QUEUE, - MV_DMA_BOUNDARY = 0xffffffffU, - SATAHC_MASK = (~(MV_SATAHC_REG_SZ - 1)), + MV_MAX_Q_DEPTH = 32, + MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, + + /* CRQB needs alignment on a 1KB boundary. Size == 1KB + * CRPB needs alignment on a 256B boundary. Size == 256B + * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB + * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B + */ + MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), + MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), + MV_MAX_SG_CT = 176, + MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), + MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ), + + /* Our DMA boundary is determined by an ePRD being unable to handle + * anything larger than 64KB + */ + MV_DMA_BOUNDARY = 0xffffU, MV_PORTS_PER_HC = 4, /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */ MV_PORT_HC_SHIFT = 2, - /* == (port % MV_PORTS_PER_HC) to determine port from 0-7 port */ + /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */ MV_PORT_MASK = 3, /* Host Flags */ MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ - MV_FLAG_BDMA = (1 << 28), /* Basic DMA */ + MV_FLAG_GLBL_SFT_RST = (1 << 28), /* Global Soft Reset support */ + MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO), + MV_6XXX_FLAGS = (MV_FLAG_IRQ_COALESCE | + MV_FLAG_GLBL_SFT_RST), chip_504x = 0, chip_508x = 1, chip_604x = 2, chip_608x = 3, + CRQB_FLAG_READ = (1 << 0), + CRQB_TAG_SHIFT = 1, + CRQB_CMD_ADDR_SHIFT = 8, + CRQB_CMD_CS = (0x2 << 11), + CRQB_CMD_LAST = (1 << 15), + + CRPB_FLAG_STATUS_SHIFT = 8, + + EPRD_FLAG_END_OF_TBL = (1 << 31), + /* PCI interface registers */ + PCI_COMMAND_OFS = 0xc00, + PCI_MAIN_CMD_STS_OFS = 0xd30, STOP_PCI_MASTER = (1 << 2), PCI_MASTER_EMPTY = (1 << 3), @@ -111,20 +141,13 @@ enum { HC_CFG_OFS = 0, HC_IRQ_CAUSE_OFS = 0x14, - CRBP_DMA_DONE = (1 << 0), /* shift by port # */ + CRPB_DMA_DONE = (1 << 0), /* shift by port # */ HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */ DEV_IRQ = (1 << 8), /* shift by port # */ /* Shadow block registers */ - SHD_PIO_DATA_OFS = 0x100, - SHD_FEA_ERR_OFS = 0x104, - SHD_SECT_CNT_OFS = 0x108, - SHD_LBA_L_OFS = 0x10C, - SHD_LBA_M_OFS = 0x110, - SHD_LBA_H_OFS = 0x114, - SHD_DEV_HD_OFS = 0x118, - SHD_CMD_STA_OFS = 0x11C, - SHD_CTL_AST_OFS = 0x120, + SHD_BLK_OFS = 0x100, + SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */ /* SATA registers */ SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ @@ -132,6 +155,11 @@ enum { /* Port registers */ EDMA_CFG_OFS = 0, + EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */ + EDMA_CFG_NCQ = (1 << 5), + EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ + EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ + EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ EDMA_ERR_IRQ_CAUSE_OFS = 0x8, EDMA_ERR_IRQ_MASK_OFS = 0xc, @@ -161,33 +189,85 @@ enum { EDMA_ERR_LNK_DATA_TX | EDMA_ERR_TRANS_PROTO), + EDMA_REQ_Q_BASE_HI_OFS = 0x10, + EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */ + EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, + + EDMA_REQ_Q_OUT_PTR_OFS = 0x18, + EDMA_REQ_Q_PTR_SHIFT = 5, + + EDMA_RSP_Q_BASE_HI_OFS = 0x1c, + EDMA_RSP_Q_IN_PTR_OFS = 0x20, + EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */ + EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, + EDMA_RSP_Q_PTR_SHIFT = 3, + EDMA_CMD_OFS = 0x28, EDMA_EN = (1 << 0), EDMA_DS = (1 << 1), ATA_RST = (1 << 2), - /* BDMA is 6xxx part only */ - BDMA_CMD_OFS = 0x224, - BDMA_START = (1 << 0), + /* Host private flags (hp_flags) */ + MV_HP_FLAG_MSI = (1 << 0), - MV_UNDEF = 0, + /* Port private flags (pp_flags) */ + MV_PP_FLAG_EDMA_EN = (1 << 0), + MV_PP_FLAG_EDMA_DS_ACT = (1 << 1), +}; + +/* Command ReQuest Block: 32B */ +struct mv_crqb { + u32 sg_addr; + u32 sg_addr_hi; + u16 ctrl_flags; + u16 ata_cmd[11]; +}; + +/* Command ResPonse Block: 8B */ +struct mv_crpb { + u16 id; + u16 flags; + u32 tmstmp; +}; + +/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ +struct mv_sg { + u32 addr; + u32 flags_size; + u32 addr_hi; + u32 reserved; }; struct mv_port_priv { + struct mv_crqb *crqb; + dma_addr_t crqb_dma; + struct mv_crpb *crpb; + dma_addr_t crpb_dma; + struct mv_sg *sg_tbl; + dma_addr_t sg_tbl_dma; + unsigned req_producer; /* cp of req_in_ptr */ + unsigned rsp_consumer; /* cp of rsp_out_ptr */ + u32 pp_flags; }; struct mv_host_priv { - + u32 hp_flags; }; static void mv_irq_clear(struct ata_port *ap); static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in); static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); +static u8 mv_check_err(struct ata_port *ap); static void mv_phy_reset(struct ata_port *ap); -static int mv_master_reset(void __iomem *mmio_base); +static void mv_host_stop(struct ata_host_set *host_set); +static int mv_port_start(struct ata_port *ap); +static void mv_port_stop(struct ata_port *ap); +static void mv_qc_prep(struct ata_queued_cmd *qc); +static int mv_qc_issue(struct ata_queued_cmd *qc); static irqreturn_t mv_interrupt(int irq, void *dev_instance, struct pt_regs *regs); +static void mv_eng_timeout(struct ata_port *ap); static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); static Scsi_Host_Template mv_sht = { @@ -196,13 +276,13 @@ static Scsi_Host_Template mv_sht = { .ioctl = ata_scsi_ioctl, .queuecommand = ata_scsi_queuecmd, .eh_strategy_handler = ata_scsi_error, - .can_queue = ATA_DEF_QUEUE, + .can_queue = MV_USE_Q_DEPTH, .this_id = ATA_SHT_THIS_ID, - .sg_tablesize = MV_UNDEF, + .sg_tablesize = MV_MAX_SG_CT, .max_sectors = ATA_MAX_SECTORS, .cmd_per_lun = ATA_SHT_CMD_PER_LUN, .emulated = ATA_SHT_EMULATED, - .use_clustering = MV_UNDEF, + .use_clustering = ATA_SHT_USE_CLUSTERING, .proc_name = DRV_NAME, .dma_boundary = MV_DMA_BOUNDARY, .slave_configure = ata_scsi_slave_config, @@ -210,21 +290,22 @@ static Scsi_Host_Template mv_sht = { .ordered_flush = 1, }; -static struct ata_port_operations mv_ops = { +static const struct ata_port_operations mv_ops = { .port_disable = ata_port_disable, .tf_load = ata_tf_load, .tf_read = ata_tf_read, .check_status = ata_check_status, + .check_err = mv_check_err, .exec_command = ata_exec_command, .dev_select = ata_std_dev_select, .phy_reset = mv_phy_reset, - .qc_prep = ata_qc_prep, - .qc_issue = ata_qc_issue_prot, + .qc_prep = mv_qc_prep, + .qc_issue = mv_qc_issue, - .eng_timeout = ata_eng_timeout, + .eng_timeout = mv_eng_timeout, .irq_handler = mv_interrupt, .irq_clear = mv_irq_clear, @@ -232,46 +313,39 @@ static struct ata_port_operations mv_ops = { .scr_read = mv_scr_read, .scr_write = mv_scr_write, - .port_start = ata_port_start, - .port_stop = ata_port_stop, - .host_stop = ata_host_stop, + .port_start = mv_port_start, + .port_stop = mv_port_stop, + .host_stop = mv_host_stop, }; static struct ata_port_info mv_port_info[] = { { /* chip_504x */ .sht = &mv_sht, - .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO), - .pio_mask = 0x1f, /* pio4-0 */ - .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */ + .host_flags = MV_COMMON_FLAGS, + .pio_mask = 0x1f, /* pio0-4 */ + .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */ .port_ops = &mv_ops, }, { /* chip_508x */ .sht = &mv_sht, - .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | - MV_FLAG_DUAL_HC), - .pio_mask = 0x1f, /* pio4-0 */ - .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */ + .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), + .pio_mask = 0x1f, /* pio0-4 */ + .udma_mask = 0, /* 0x7f (udma0-6 disabled for now) */ .port_ops = &mv_ops, }, { /* chip_604x */ .sht = &mv_sht, - .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | - MV_FLAG_IRQ_COALESCE | MV_FLAG_BDMA), - .pio_mask = 0x1f, /* pio4-0 */ - .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */ + .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), + .pio_mask = 0x1f, /* pio0-4 */ + .udma_mask = 0x7f, /* udma0-6 */ .port_ops = &mv_ops, }, { /* chip_608x */ .sht = &mv_sht, - .host_flags = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | - ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | - MV_FLAG_IRQ_COALESCE | MV_FLAG_DUAL_HC | - MV_FLAG_BDMA), - .pio_mask = 0x1f, /* pio4-0 */ - .udma_mask = 0, /* 0x7f (udma6-0 disabled for now) */ + .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | + MV_FLAG_DUAL_HC), + .pio_mask = 0x1f, /* pio0-4 */ + .udma_mask = 0x7f, /* udma0-6 */ .port_ops = &mv_ops, }, }; @@ -306,12 +380,6 @@ static inline void writelfl(unsigned long data, void __iomem *addr) (void) readl(addr); /* flush to avoid PCI posted write */ } -static inline void __iomem *mv_port_addr_to_hc_base(void __iomem *port_mmio) -{ - return ((void __iomem *)((unsigned long)port_mmio & - (unsigned long)SATAHC_MASK)); -} - static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) { return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); @@ -329,26 +397,152 @@ static inline void __iomem *mv_ap_base(struct ata_port *ap) return mv_port_base(ap->host_set->mmio_base, ap->port_no); } -static inline int mv_get_hc_count(unsigned long flags) +static inline int mv_get_hc_count(unsigned long hp_flags) { - return ((flags & MV_FLAG_DUAL_HC) ? 2 : 1); -} - -static inline int mv_is_edma_active(struct ata_port *ap) -{ - void __iomem *port_mmio = mv_ap_base(ap); - return (EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); -} - -static inline int mv_port_bdma_capable(struct ata_port *ap) -{ - return (ap->flags & MV_FLAG_BDMA); + return ((hp_flags & MV_FLAG_DUAL_HC) ? 2 : 1); } static void mv_irq_clear(struct ata_port *ap) { } +/** + * mv_start_dma - Enable eDMA engine + * @base: port base address + * @pp: port private data + * + * Verify the local cache of the eDMA state is accurate with an + * assert. + * + * LOCKING: + * Inherited from caller. + */ +static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp) +{ + if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) { + writelfl(EDMA_EN, base + EDMA_CMD_OFS); + pp->pp_flags |= MV_PP_FLAG_EDMA_EN; + } + assert(EDMA_EN & readl(base + EDMA_CMD_OFS)); +} + +/** + * mv_stop_dma - Disable eDMA engine + * @ap: ATA channel to manipulate + * + * Verify the local cache of the eDMA state is accurate with an + * assert. + * + * LOCKING: + * Inherited from caller. + */ +static void mv_stop_dma(struct ata_port *ap) +{ + void __iomem *port_mmio = mv_ap_base(ap); + struct mv_port_priv *pp = ap->private_data; + u32 reg; + int i; + + if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) { + /* Disable EDMA if active. The disable bit auto clears. + */ + writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); + pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; + } else { + assert(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS))); + } + + /* now properly wait for the eDMA to stop */ + for (i = 1000; i > 0; i--) { + reg = readl(port_mmio + EDMA_CMD_OFS); + if (!(EDMA_EN & reg)) { + break; + } + udelay(100); + } + + if (EDMA_EN & reg) { + printk(KERN_ERR "ata%u: Unable to stop eDMA\n", ap->id); + /* FIXME: Consider doing a reset here to recover */ + } +} + +#ifdef ATA_DEBUG +static void mv_dump_mem(void __iomem *start, unsigned bytes) +{ + int b, w; + for (b = 0; b < bytes; ) { + DPRINTK("%p: ", start + b); + for (w = 0; b < bytes && w < 4; w++) { + printk("%08x ",readl(start + b)); + b += sizeof(u32); + } + printk("\n"); + } +} +#endif + +static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) +{ +#ifdef ATA_DEBUG + int b, w; + u32 dw; + for (b = 0; b < bytes; ) { + DPRINTK("%02x: ", b); + for (w = 0; b < bytes && w < 4; w++) { + (void) pci_read_config_dword(pdev,b,&dw); + printk("%08x ",dw); + b += sizeof(u32); + } + printk("\n"); + } +#endif +} +static void mv_dump_all_regs(void __iomem *mmio_base, int port, + struct pci_dev *pdev) +{ +#ifdef ATA_DEBUG + void __iomem *hc_base = mv_hc_base(mmio_base, + port >> MV_PORT_HC_SHIFT); + void __iomem *port_base; + int start_port, num_ports, p, start_hc, num_hcs, hc; + + if (0 > port) { + start_hc = start_port = 0; + num_ports = 8; /* shld be benign for 4 port devs */ + num_hcs = 2; + } else { + start_hc = port >> MV_PORT_HC_SHIFT; + start_port = port; + num_ports = num_hcs = 1; + } + DPRINTK("All registers for port(s) %u-%u:\n", start_port, + num_ports > 1 ? num_ports - 1 : start_port); + + if (NULL != pdev) { + DPRINTK("PCI config space regs:\n"); + mv_dump_pci_cfg(pdev, 0x68); + } + DPRINTK("PCI regs:\n"); + mv_dump_mem(mmio_base+0xc00, 0x3c); + mv_dump_mem(mmio_base+0xd00, 0x34); + mv_dump_mem(mmio_base+0xf00, 0x4); + mv_dump_mem(mmio_base+0x1d00, 0x6c); + for (hc = start_hc; hc < start_hc + num_hcs; hc++) { + hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT); + DPRINTK("HC regs (HC %i):\n", hc); + mv_dump_mem(hc_base, 0x1c); + } + for (p = start_port; p < start_port + num_ports; p++) { + port_base = mv_port_base(mmio_base, p); + DPRINTK("EDMA regs (port %i):\n",p); + mv_dump_mem(port_base, 0x54); + DPRINTK("SATA regs (port %i):\n",p); + mv_dump_mem(port_base+0x300, 0x60); + } +#endif +} + static unsigned int mv_scr_offset(unsigned int sc_reg_in) { unsigned int ofs; @@ -389,30 +583,37 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) } } -static int mv_master_reset(void __iomem *mmio_base) +/** + * mv_global_soft_reset - Perform the 6xxx global soft reset + * @mmio_base: base address of the HBA + * + * This routine only applies to 6xxx parts. + * + * LOCKING: + * Inherited from caller. + */ +static int mv_global_soft_reset(void __iomem *mmio_base) { void __iomem *reg = mmio_base + PCI_MAIN_CMD_STS_OFS; int i, rc = 0; u32 t; - VPRINTK("ENTER\n"); - /* Following procedure defined in PCI "main command and status * register" table. */ t = readl(reg); writel(t | STOP_PCI_MASTER, reg); - for (i = 0; i < 100; i++) { - msleep(10); + for (i = 0; i < 1000; i++) { + udelay(1); t = readl(reg); if (PCI_MASTER_EMPTY & t) { break; } } if (!(PCI_MASTER_EMPTY & t)) { - printk(KERN_ERR DRV_NAME "PCI master won't flush\n"); - rc = 1; /* broken HW? */ + printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); + rc = 1; goto done; } @@ -425,48 +626,412 @@ static int mv_master_reset(void __iomem *mmio_base) } while (!(GLOB_SFT_RST & t) && (i-- > 0)); if (!(GLOB_SFT_RST & t)) { - printk(KERN_ERR DRV_NAME "can't set global reset\n"); - rc = 1; /* broken HW? */ + printk(KERN_ERR DRV_NAME ": can't set global reset\n"); + rc = 1; goto done; } - /* clear reset */ + /* clear reset and *reenable the PCI master* (not mentioned in spec) */ i = 5; do { - writel(t & ~GLOB_SFT_RST, reg); + writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg); t = readl(reg); udelay(1); } while ((GLOB_SFT_RST & t) && (i-- > 0)); if (GLOB_SFT_RST & t) { - printk(KERN_ERR DRV_NAME "can't clear global reset\n"); - rc = 1; /* broken HW? */ + printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); + rc = 1; } - - done: - VPRINTK("EXIT, rc = %i\n", rc); +done: return rc; } +/** + * mv_host_stop - Host specific cleanup/stop routine. + * @host_set: host data structure + * + * Disable ints, cleanup host memory, call general purpose + * host_stop. + * + * LOCKING: + * Inherited from caller. + */ +static void mv_host_stop(struct ata_host_set *host_set) +{ + struct mv_host_priv *hpriv = host_set->private_data; + struct pci_dev *pdev = to_pci_dev(host_set->dev); + + if (hpriv->hp_flags & MV_HP_FLAG_MSI) { + pci_disable_msi(pdev); + } else { + pci_intx(pdev, 0); + } + kfree(hpriv); + ata_host_stop(host_set); +} + +/** + * mv_port_start - Port specific init/start routine. + * @ap: ATA channel to manipulate + * + * Allocate and point to DMA memory, init port private memory, + * zero indices. + * + * LOCKING: + * Inherited from caller. + */ +static int mv_port_start(struct ata_port *ap) +{ + struct device *dev = ap->host_set->dev; + struct mv_port_priv *pp; + void __iomem *port_mmio = mv_ap_base(ap); + void *mem; + dma_addr_t mem_dma; + + pp = kmalloc(sizeof(*pp), GFP_KERNEL); + if (!pp) { + return -ENOMEM; + } + memset(pp, 0, sizeof(*pp)); + + mem = dma_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma, + GFP_KERNEL); + if (!mem) { + kfree(pp); + return -ENOMEM; + } + memset(mem, 0, MV_PORT_PRIV_DMA_SZ); + + /* First item in chunk of DMA memory: + * 32-slot command request table (CRQB), 32 bytes each in size + */ + pp->crqb = mem; + pp->crqb_dma = mem_dma; + mem += MV_CRQB_Q_SZ; + mem_dma += MV_CRQB_Q_SZ; + + /* Second item: + * 32-slot command response table (CRPB), 8 bytes each in size + */ + pp->crpb = mem; + pp->crpb_dma = mem_dma; + mem += MV_CRPB_Q_SZ; + mem_dma += MV_CRPB_Q_SZ; + + /* Third item: + * Table of scatter-gather descriptors (ePRD), 16 bytes each + */ + pp->sg_tbl = mem; + pp->sg_tbl_dma = mem_dma; + + writelfl(EDMA_CFG_Q_DEPTH | EDMA_CFG_RD_BRST_EXT | + EDMA_CFG_WR_BUFF_LEN, port_mmio + EDMA_CFG_OFS); + + writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); + writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK, + port_mmio + EDMA_REQ_Q_IN_PTR_OFS); + + writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); + writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); + + writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); + writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, + port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); + + pp->req_producer = pp->rsp_consumer = 0; + + /* Don't turn on EDMA here...do it before DMA commands only. Else + * we'll be unable to send non-data, PIO, etc due to restricted access + * to shadow regs. + */ + ap->private_data = pp; + return 0; +} + +/** + * mv_port_stop - Port specific cleanup/stop routine. + * @ap: ATA channel to manipulate + * + * Stop DMA, cleanup port memory. + * + * LOCKING: + * This routine uses the host_set lock to protect the DMA stop. + */ +static void mv_port_stop(struct ata_port *ap) +{ + struct device *dev = ap->host_set->dev; + struct mv_port_priv *pp = ap->private_data; + unsigned long flags; + + spin_lock_irqsave(&ap->host_set->lock, flags); + mv_stop_dma(ap); + spin_unlock_irqrestore(&ap->host_set->lock, flags); + + ap->private_data = NULL; + dma_free_coherent(dev, MV_PORT_PRIV_DMA_SZ, pp->crpb, pp->crpb_dma); + kfree(pp); +} + +/** + * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries + * @qc: queued command whose SG list to source from + * + * Populate the SG list and mark the last entry. + * + * LOCKING: + * Inherited from caller. + */ +static void mv_fill_sg(struct ata_queued_cmd *qc) +{ + struct mv_port_priv *pp = qc->ap->private_data; + unsigned int i; + + for (i = 0; i < qc->n_elem; i++) { + u32 sg_len; + dma_addr_t addr; + + addr = sg_dma_address(&qc->sg[i]); + sg_len = sg_dma_len(&qc->sg[i]); + + pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff); + pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16); + assert(0 == (sg_len & ~MV_DMA_BOUNDARY)); + pp->sg_tbl[i].flags_size = cpu_to_le32(sg_len); + } + if (0 < qc->n_elem) { + pp->sg_tbl[qc->n_elem - 1].flags_size |= + cpu_to_le32(EPRD_FLAG_END_OF_TBL); + } +} + +static inline unsigned mv_inc_q_index(unsigned *index) +{ + *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK; + return *index; +} + +static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last) +{ + *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | + (last ? CRQB_CMD_LAST : 0); +} + +/** + * mv_qc_prep - Host specific command preparation. + * @qc: queued command to prepare + * + * This routine simply redirects to the general purpose routine + * if command is not DMA. Else, it handles prep of the CRQB + * (command request block), does some sanity checking, and calls + * the SG load routine. + * + * LOCKING: + * Inherited from caller. + */ +static void mv_qc_prep(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct mv_port_priv *pp = ap->private_data; + u16 *cw; + struct ata_taskfile *tf; + u16 flags = 0; + + if (ATA_PROT_DMA != qc->tf.protocol) { + return; + } + + /* the req producer index should be the same as we remember it */ + assert(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> + EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == + pp->req_producer); + + /* Fill in command request block + */ + if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { + flags |= CRQB_FLAG_READ; + } + assert(MV_MAX_Q_DEPTH > qc->tag); + flags |= qc->tag << CRQB_TAG_SHIFT; + + pp->crqb[pp->req_producer].sg_addr = + cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); + pp->crqb[pp->req_producer].sg_addr_hi = + cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); + pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags); + + cw = &pp->crqb[pp->req_producer].ata_cmd[0]; + tf = &qc->tf; + + /* Sadly, the CRQB cannot accomodate all registers--there are + * only 11 bytes...so we must pick and choose required + * registers based on the command. So, we drop feature and + * hob_feature for [RW] DMA commands, but they are needed for + * NCQ. NCQ will drop hob_nsect. + */ + switch (tf->command) { + case ATA_CMD_READ: + case ATA_CMD_READ_EXT: + case ATA_CMD_WRITE: + case ATA_CMD_WRITE_EXT: + mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); + break; +#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */ + case ATA_CMD_FPDMA_READ: + case ATA_CMD_FPDMA_WRITE: + mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); + mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); + break; +#endif /* FIXME: remove this line when NCQ added */ + default: + /* The only other commands EDMA supports in non-queued and + * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none + * of which are defined/used by Linux. If we get here, this + * driver needs work. + * + * FIXME: modify libata to give qc_prep a return value and + * return error here. + */ + BUG_ON(tf->command); + break; + } + mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); + mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); + mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0); + mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0); + mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0); + mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0); + mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0); + mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); + mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ + + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) { + return; + } + mv_fill_sg(qc); +} + +/** + * mv_qc_issue - Initiate a command to the host + * @qc: queued command to start + * + * This routine simply redirects to the general purpose routine + * if command is not DMA. Else, it sanity checks our local + * caches of the request producer/consumer indices then enables + * DMA and bumps the request producer index. + * + * LOCKING: + * Inherited from caller. + */ +static int mv_qc_issue(struct ata_queued_cmd *qc) +{ + void __iomem *port_mmio = mv_ap_base(qc->ap); + struct mv_port_priv *pp = qc->ap->private_data; + u32 in_ptr; + + if (ATA_PROT_DMA != qc->tf.protocol) { + /* We're about to send a non-EDMA capable command to the + * port. Turn off EDMA so there won't be problems accessing + * shadow block, etc registers. + */ + mv_stop_dma(qc->ap); + return ata_qc_issue_prot(qc); + } + + in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); + + /* the req producer index should be the same as we remember it */ + assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == + pp->req_producer); + /* until we do queuing, the queue should be empty at this point */ + assert(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == + ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> + EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); + + mv_inc_q_index(&pp->req_producer); /* now incr producer index */ + + mv_start_dma(port_mmio, pp); + + /* and write the request in pointer to kick the EDMA to life */ + in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; + in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT; + writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); + + return 0; +} + +/** + * mv_get_crpb_status - get status from most recently completed cmd + * @ap: ATA channel to manipulate + * + * This routine is for use when the port is in DMA mode, when it + * will be using the CRPB (command response block) method of + * returning command completion information. We assert indices + * are good, grab status, and bump the response consumer index to + * prove that we're up to date. + * + * LOCKING: + * Inherited from caller. + */ +static u8 mv_get_crpb_status(struct ata_port *ap) +{ + void __iomem *port_mmio = mv_ap_base(ap); + struct mv_port_priv *pp = ap->private_data; + u32 out_ptr; + + out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); + + /* the response consumer index should be the same as we remember it */ + assert(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == + pp->rsp_consumer); + + /* increment our consumer index... */ + pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); + + /* and, until we do NCQ, there should only be 1 CRPB waiting */ + assert(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> + EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) == + pp->rsp_consumer); + + /* write out our inc'd consumer index so EDMA knows we're caught up */ + out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; + out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT; + writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); + + /* Return ATA status register for completed CRPB */ + return (pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT); +} + +/** + * mv_err_intr - Handle error interrupts on the port + * @ap: ATA channel to manipulate + * + * In most cases, just clear the interrupt and move on. However, + * some cases require an eDMA reset, which is done right before + * the COMRESET in mv_phy_reset(). The SERR case requires a + * clear of pending errors in the SATA SERROR register. Finally, + * if the port disabled DMA, update our cached copy to match. + * + * LOCKING: + * Inherited from caller. + */ static void mv_err_intr(struct ata_port *ap) { - void __iomem *port_mmio; + void __iomem *port_mmio = mv_ap_base(ap); u32 edma_err_cause, serr = 0; - /* bug here b/c we got an err int on a port we don't know about, - * so there's no way to clear it - */ - BUG_ON(NULL == ap); - port_mmio = mv_ap_base(ap); - edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); if (EDMA_ERR_SERR & edma_err_cause) { serr = scr_read(ap, SCR_ERROR); scr_write_flush(ap, SCR_ERROR, serr); } - DPRINTK("port %u error; EDMA err cause: 0x%08x SERR: 0x%08x\n", - ap->port_no, edma_err_cause, serr); + if (EDMA_ERR_SELF_DIS & edma_err_cause) { + struct mv_port_priv *pp = ap->private_data; + pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; + } + DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x " + "SERR: 0x%08x\n", ap->id, edma_err_cause, serr); /* Clear EDMA now that SERR cleanup done */ writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); @@ -477,7 +1042,21 @@ static void mv_err_intr(struct ata_port *ap) } } -/* Handle any outstanding interrupts in a single SATAHC +/** + * mv_host_intr - Handle all interrupts on the given host controller + * @host_set: host specific structure + * @relevant: port error bits relevant to this host controller + * @hc: which host controller we're to look at + * + * Read then write clear the HC interrupt status then walk each + * port connected to the HC and see if it needs servicing. Port + * success ints are reported in the HC interrupt status reg, the + * port error ints are reported in the higher level main + * interrupt status register and thus are passed in via the + * 'relevant' argument. + * + * LOCKING: + * Inherited from caller. */ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, unsigned int hc) @@ -487,8 +1066,8 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, struct ata_port *ap; struct ata_queued_cmd *qc; u32 hc_irq_cause; - int shift, port, port0, hard_port; - u8 ata_status; + int shift, port, port0, hard_port, handled; + u8 ata_status = 0; if (hc == 0) { port0 = 0; @@ -499,7 +1078,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, /* we'll need the HC success int register in most cases */ hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); if (hc_irq_cause) { - writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); + writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); } VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", @@ -508,35 +1087,38 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { ap = host_set->ports[port]; hard_port = port & MV_PORT_MASK; /* range 0-3 */ - ata_status = 0xffU; + handled = 0; /* ensure ata_status is set if handled++ */ - if (((CRBP_DMA_DONE | DEV_IRQ) << hard_port) & hc_irq_cause) { - BUG_ON(NULL == ap); - /* rcv'd new resp, basic DMA complete, or ATA IRQ */ - /* This is needed to clear the ATA INTRQ. - * FIXME: don't read the status reg in EDMA mode! + if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) { + /* new CRPB on the queue; just one at a time until NCQ + */ + ata_status = mv_get_crpb_status(ap); + handled++; + } else if ((DEV_IRQ << hard_port) & hc_irq_cause) { + /* received ATA IRQ; read the status reg to clear INTRQ */ ata_status = readb((void __iomem *) ap->ioaddr.status_addr); + handled++; } - shift = port * 2; + shift = port << 1; /* (port * 2) */ if (port >= MV_PORTS_PER_HC) { shift++; /* skip bit 8 in the HC Main IRQ reg */ } if ((PORT0_ERR << shift) & relevant) { mv_err_intr(ap); - /* FIXME: smart to OR in ATA_ERR? */ + /* OR in ATA_ERR to ensure libata knows we took one */ ata_status = readb((void __iomem *) ap->ioaddr.status_addr) | ATA_ERR; + handled++; } - if (ap) { + if (handled && ap) { qc = ata_qc_from_tag(ap, ap->active_tag); if (NULL != qc) { VPRINTK("port %u IRQ found for qc, " "ata_status 0x%x\n", port,ata_status); - BUG_ON(0xffU == ata_status); /* mark qc status appropriately */ ata_qc_complete(qc, ata_status); } @@ -545,17 +1127,30 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, VPRINTK("EXIT\n"); } +/** + * mv_interrupt - + * @irq: unused + * @dev_instance: private data; in this case the host structure + * @regs: unused + * + * Read the read only register to determine if any host + * controllers have pending interrupts. If so, call lower level + * routine to handle. Also check for PCI errors which are only + * reported here. + * + * LOCKING: + * This routine holds the host_set lock while processing pending + * interrupts. + */ static irqreturn_t mv_interrupt(int irq, void *dev_instance, struct pt_regs *regs) { struct ata_host_set *host_set = dev_instance; unsigned int hc, handled = 0, n_hcs; - void __iomem *mmio; + void __iomem *mmio = host_set->mmio_base; u32 irq_stat; - mmio = host_set->mmio_base; irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); - n_hcs = mv_get_hc_count(host_set->ports[0]->flags); /* check the cases where we either have nothing pending or have read * a bogus register value which can indicate HW removal or PCI fault @@ -564,64 +1159,105 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance, return IRQ_NONE; } + n_hcs = mv_get_hc_count(host_set->ports[0]->flags); spin_lock(&host_set->lock); for (hc = 0; hc < n_hcs; hc++) { u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); if (relevant) { mv_host_intr(host_set, relevant, hc); - handled = 1; + handled++; } } if (PCI_ERR & irq_stat) { - /* FIXME: these are all masked by default, but still need - * to recover from them properly. - */ - } + printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n", + readl(mmio + PCI_IRQ_CAUSE_OFS)); + DPRINTK("All regs @ PCI error\n"); + mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev)); + + writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); + handled++; + } spin_unlock(&host_set->lock); return IRQ_RETVAL(handled); } +/** + * mv_check_err - Return the error shadow register to caller. + * @ap: ATA channel to manipulate + * + * Marvell requires DMA to be stopped before accessing shadow + * registers. So we do that, then return the needed register. + * + * LOCKING: + * Inherited from caller. FIXME: protect mv_stop_dma with lock? + */ +static u8 mv_check_err(struct ata_port *ap) +{ + mv_stop_dma(ap); /* can't read shadow regs if DMA on */ + return readb((void __iomem *) ap->ioaddr.error_addr); +} + +/** + * mv_phy_reset - Perform eDMA reset followed by COMRESET + * @ap: ATA channel to manipulate + * + * Part of this is taken from __sata_phy_reset and modified to + * not sleep since this routine gets called from interrupt level. + * + * LOCKING: + * Inherited from caller. This is coded to safe to call at + * interrupt level, i.e. it does not sleep. + */ static void mv_phy_reset(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); struct ata_taskfile tf; struct ata_device *dev = &ap->device[0]; - u32 edma = 0, bdma; + unsigned long timeout; VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio); - edma = readl(port_mmio + EDMA_CMD_OFS); - if (EDMA_EN & edma) { - /* disable EDMA if active */ - edma &= ~EDMA_EN; - writelfl(edma | EDMA_DS, port_mmio + EDMA_CMD_OFS); - udelay(1); - } else if (mv_port_bdma_capable(ap) && - (bdma = readl(port_mmio + BDMA_CMD_OFS)) & BDMA_START) { - /* disable BDMA if active */ - writelfl(bdma & ~BDMA_START, port_mmio + BDMA_CMD_OFS); - } + mv_stop_dma(ap); - writelfl(edma | ATA_RST, port_mmio + EDMA_CMD_OFS); + writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); udelay(25); /* allow reset propagation */ /* Spec never mentions clearing the bit. Marvell's driver does * clear the bit, however. */ - writelfl(edma & ~ATA_RST, port_mmio + EDMA_CMD_OFS); + writelfl(0, port_mmio + EDMA_CMD_OFS); - VPRINTK("Done. Now calling __sata_phy_reset()\n"); + VPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x " + "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), + mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); /* proceed to init communications via the scr_control reg */ - __sata_phy_reset(ap); + scr_write_flush(ap, SCR_CONTROL, 0x301); + mdelay(1); + scr_write_flush(ap, SCR_CONTROL, 0x300); + timeout = jiffies + (HZ * 1); + do { + mdelay(10); + if ((scr_read(ap, SCR_STATUS) & 0xf) != 1) + break; + } while (time_before(jiffies, timeout)); - if (ap->flags & ATA_FLAG_PORT_DISABLED) { - VPRINTK("Port disabled pre-sig. Exiting.\n"); + VPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x " + "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS), + mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL)); + + if (sata_dev_present(ap)) { + ata_port_probe(ap); + } else { + printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n", + ap->id, scr_read(ap, SCR_STATUS)); + ata_port_disable(ap); return; } + ap->cbl = ATA_CBL_SATA; tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr); tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr); @@ -636,37 +1272,118 @@ static void mv_phy_reset(struct ata_port *ap) VPRINTK("EXIT\n"); } -static void mv_port_init(struct ata_ioports *port, unsigned long base) +/** + * mv_eng_timeout - Routine called by libata when SCSI times out I/O + * @ap: ATA channel to manipulate + * + * Intent is to clear all pending error conditions, reset the + * chip/bus, fail the command, and move on. + * + * LOCKING: + * This routine holds the host_set lock while failing the command. + */ +static void mv_eng_timeout(struct ata_port *ap) { - /* PIO related setup */ - port->data_addr = base + SHD_PIO_DATA_OFS; - port->error_addr = port->feature_addr = base + SHD_FEA_ERR_OFS; - port->nsect_addr = base + SHD_SECT_CNT_OFS; - port->lbal_addr = base + SHD_LBA_L_OFS; - port->lbam_addr = base + SHD_LBA_M_OFS; - port->lbah_addr = base + SHD_LBA_H_OFS; - port->device_addr = base + SHD_DEV_HD_OFS; - port->status_addr = port->command_addr = base + SHD_CMD_STA_OFS; - port->altstatus_addr = port->ctl_addr = base + SHD_CTL_AST_OFS; - /* unused */ - port->cmd_addr = port->bmdma_addr = port->scr_addr = 0; + struct ata_queued_cmd *qc; + unsigned long flags; - /* unmask all EDMA error interrupts */ - writel(~0, (void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS); + printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id); + DPRINTK("All regs @ start of eng_timeout\n"); + mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no, + to_pci_dev(ap->host_set->dev)); - VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", - readl((void __iomem *)base + EDMA_CFG_OFS), - readl((void __iomem *)base + EDMA_ERR_IRQ_CAUSE_OFS), - readl((void __iomem *)base + EDMA_ERR_IRQ_MASK_OFS)); + qc = ata_qc_from_tag(ap, ap->active_tag); + printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n", + ap->host_set->mmio_base, ap, qc, qc->scsicmd, + &qc->scsicmd->cmnd); + + mv_err_intr(ap); + mv_phy_reset(ap); + + if (!qc) { + printk(KERN_ERR "ata%u: BUG: timeout without command\n", + ap->id); + } else { + /* hack alert! We cannot use the supplied completion + * function from inside the ->eh_strategy_handler() thread. + * libata is the only user of ->eh_strategy_handler() in + * any kernel, so the default scsi_done() assumes it is + * not being called from the SCSI EH. + */ + spin_lock_irqsave(&ap->host_set->lock, flags); + qc->scsidone = scsi_finish_command; + ata_qc_complete(qc, ATA_ERR); + spin_unlock_irqrestore(&ap->host_set->lock, flags); + } } +/** + * mv_port_init - Perform some early initialization on a single port. + * @port: libata data structure storing shadow register addresses + * @port_mmio: base address of the port + * + * Initialize shadow register mmio addresses, clear outstanding + * interrupts on the port, and unmask interrupts for the future + * start of the port. + * + * LOCKING: + * Inherited from caller. + */ +static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) +{ + unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS; + unsigned serr_ofs; + + /* PIO related setup + */ + port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); + port->error_addr = + port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); + port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); + port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); + port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); + port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); + port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); + port->status_addr = + port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); + /* special case: control/altstatus doesn't have ATA_REG_ address */ + port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS; + + /* unused: */ + port->cmd_addr = port->bmdma_addr = port->scr_addr = 0; + + /* Clear any currently outstanding port interrupt conditions */ + serr_ofs = mv_scr_offset(SCR_ERROR); + writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs); + writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); + + /* unmask all EDMA error interrupts */ + writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS); + + VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", + readl(port_mmio + EDMA_CFG_OFS), + readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS), + readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS)); +} + +/** + * mv_host_init - Perform some early initialization of the host. + * @probe_ent: early data struct representing the host + * + * If possible, do an early global reset of the host. Then do + * our port init and clear/unmask all/relevant host interrupts. + * + * LOCKING: + * Inherited from caller. + */ static int mv_host_init(struct ata_probe_ent *probe_ent) { int rc = 0, n_hc, port, hc; void __iomem *mmio = probe_ent->mmio_base; void __iomem *port_mmio; - if (mv_master_reset(probe_ent->mmio_base)) { + if ((MV_FLAG_GLBL_SFT_RST & probe_ent->host_flags) && + mv_global_soft_reset(probe_ent->mmio_base)) { rc = 1; goto done; } @@ -676,17 +1393,27 @@ static int mv_host_init(struct ata_probe_ent *probe_ent) for (port = 0; port < probe_ent->n_ports; port++) { port_mmio = mv_port_base(mmio, port); - mv_port_init(&probe_ent->port[port], (unsigned long)port_mmio); + mv_port_init(&probe_ent->port[port], port_mmio); } for (hc = 0; hc < n_hc; hc++) { - VPRINTK("HC%i: HC config=0x%08x HC IRQ cause=0x%08x\n", hc, - readl(mv_hc_base(mmio, hc) + HC_CFG_OFS), - readl(mv_hc_base(mmio, hc) + HC_IRQ_CAUSE_OFS)); + void __iomem *hc_mmio = mv_hc_base(mmio, hc); + + VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " + "(before clear)=0x%08x\n", hc, + readl(hc_mmio + HC_CFG_OFS), + readl(hc_mmio + HC_IRQ_CAUSE_OFS)); + + /* Clear any currently outstanding hc interrupt conditions */ + writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); } - writel(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); - writel(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); + /* Clear any currently outstanding host interrupt conditions */ + writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); + + /* and unmask interrupt generation for host regs */ + writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS); + writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS); VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " "PCI int cause/mask=0x%08x/0x%08x\n", @@ -694,11 +1421,53 @@ static int mv_host_init(struct ata_probe_ent *probe_ent) readl(mmio + HC_MAIN_IRQ_MASK_OFS), readl(mmio + PCI_IRQ_CAUSE_OFS), readl(mmio + PCI_IRQ_MASK_OFS)); - - done: +done: return rc; } +/** + * mv_print_info - Dump key info to kernel log for perusal. + * @probe_ent: early data struct representing the host + * + * FIXME: complete this. + * + * LOCKING: + * Inherited from caller. + */ +static void mv_print_info(struct ata_probe_ent *probe_ent) +{ + struct pci_dev *pdev = to_pci_dev(probe_ent->dev); + struct mv_host_priv *hpriv = probe_ent->private_data; + u8 rev_id, scc; + const char *scc_s; + + /* Use this to determine the HW stepping of the chip so we know + * what errata to workaround + */ + pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); + + pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); + if (scc == 0) + scc_s = "SCSI"; + else if (scc == 0x01) + scc_s = "RAID"; + else + scc_s = "unknown"; + + printk(KERN_INFO DRV_NAME + "(%s) %u slots %u ports %s mode IRQ via %s\n", + pci_name(pdev), (unsigned)MV_MAX_Q_DEPTH, probe_ent->n_ports, + scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); +} + +/** + * mv_init_one - handle a positive probe of a Marvell host + * @pdev: PCI device found + * @ent: PCI device ID entry for the matched host + * + * LOCKING: + * Inherited from caller. + */ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version = 0; @@ -706,16 +1475,12 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) struct mv_host_priv *hpriv; unsigned int board_idx = (unsigned int)ent->driver_data; void __iomem *mmio_base; - int pci_dev_busy = 0; - int rc; + int pci_dev_busy = 0, rc; if (!printed_version++) { - printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + printk(KERN_INFO DRV_NAME " version " DRV_VERSION "\n"); } - VPRINTK("ENTER for PCI Bus:Slot.Func=%u:%u.%u\n", pdev->bus->number, - PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); - rc = pci_enable_device(pdev); if (rc) { return rc; @@ -727,8 +1492,6 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out; } - pci_intx(pdev, 1); - probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); if (probe_ent == NULL) { rc = -ENOMEM; @@ -739,8 +1502,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) probe_ent->dev = pci_dev_to_dev(pdev); INIT_LIST_HEAD(&probe_ent->node); - mmio_base = ioremap_nocache(pci_resource_start(pdev, MV_PRIMARY_BAR), - pci_resource_len(pdev, MV_PRIMARY_BAR)); + mmio_base = pci_iomap(pdev, MV_PRIMARY_BAR, 0); if (mmio_base == NULL) { rc = -ENOMEM; goto err_out_free_ent; @@ -769,37 +1531,40 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) { goto err_out_hpriv; } -/* mv_print_info(probe_ent); */ - { - int b, w; - u32 dw[4]; /* hold a line of 16b */ - VPRINTK("PCI config space:\n"); - for (b = 0; b < 0x40; ) { - for (w = 0; w < 4; w++) { - (void) pci_read_config_dword(pdev,b,&dw[w]); - b += sizeof(*dw); - } - VPRINTK("%08x %08x %08x %08x\n", - dw[0],dw[1],dw[2],dw[3]); - } + /* Enable interrupts */ + if (pci_enable_msi(pdev) == 0) { + hpriv->hp_flags |= MV_HP_FLAG_MSI; + } else { + pci_intx(pdev, 1); } - /* FIXME: check ata_device_add return value */ - ata_device_add(probe_ent); - kfree(probe_ent); + mv_dump_pci_cfg(pdev, 0x68); + mv_print_info(probe_ent); + if (ata_device_add(probe_ent) == 0) { + rc = -ENODEV; /* No devices discovered */ + goto err_out_dev_add; + } + + kfree(probe_ent); return 0; - err_out_hpriv: +err_out_dev_add: + if (MV_HP_FLAG_MSI & hpriv->hp_flags) { + pci_disable_msi(pdev); + } else { + pci_intx(pdev, 0); + } +err_out_hpriv: kfree(hpriv); - err_out_iounmap: - iounmap(mmio_base); - err_out_free_ent: +err_out_iounmap: + pci_iounmap(pdev, mmio_base); +err_out_free_ent: kfree(probe_ent); - err_out_regions: +err_out_regions: pci_release_regions(pdev); - err_out: +err_out: if (!pci_dev_busy) { pci_disable_device(pdev); } diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c index a1d62dee3be6..1a56d6c79ddd 100644 --- a/drivers/scsi/sata_nv.c +++ b/drivers/scsi/sata_nv.c @@ -29,6 +29,8 @@ * NV-specific details such as register offsets, SATA phy location, * hotplug info, etc. * + * 0.09 + * - Fixed bug introduced by 0.08's MCP51 and MCP55 support. * * 0.08 * - Added support for MCP51 and MCP55. @@ -132,9 +134,7 @@ enum nv_host_type GENERIC, NFORCE2, NFORCE3, - CK804, - MCP51, - MCP55 + CK804 }; static struct pci_device_id nv_pci_tbl[] = { @@ -153,11 +153,13 @@ static struct pci_device_id nv_pci_tbl[] = { { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 }, { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC }, { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC }, @@ -236,7 +238,7 @@ static Scsi_Host_Template nv_sht = { .ordered_flush = 1, }; -static struct ata_port_operations nv_ops = { +static const struct ata_port_operations nv_ops = { .port_disable = ata_port_disable, .tf_load = ata_tf_load, .tf_read = ata_tf_read, @@ -329,7 +331,7 @@ static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg) return 0xffffffffU; if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) - return readl((void*)ap->ioaddr.scr_addr + (sc_reg * 4)); + return readl((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4)); else return inl(ap->ioaddr.scr_addr + (sc_reg * 4)); } @@ -343,7 +345,7 @@ static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) return; if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) - writel(val, (void*)ap->ioaddr.scr_addr + (sc_reg * 4)); + writel(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4)); else outl(val, ap->ioaddr.scr_addr + (sc_reg * 4)); } @@ -403,7 +405,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) rc = -ENOMEM; ppi = &nv_port_info; - probe_ent = ata_pci_init_native_mode(pdev, &ppi); + probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); if (!probe_ent) goto err_out_regions; diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c index 538ad727bd2e..eee93b0016df 100644 --- a/drivers/scsi/sata_promise.c +++ b/drivers/scsi/sata_promise.c @@ -87,8 +87,8 @@ static void pdc_port_stop(struct ata_port *ap); static void pdc_pata_phy_reset(struct ata_port *ap); static void pdc_sata_phy_reset(struct ata_port *ap); static void pdc_qc_prep(struct ata_queued_cmd *qc); -static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf); -static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf); +static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); +static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static void pdc_irq_clear(struct ata_port *ap); static int pdc_qc_issue_prot(struct ata_queued_cmd *qc); @@ -113,7 +113,7 @@ static Scsi_Host_Template pdc_ata_sht = { .ordered_flush = 1, }; -static struct ata_port_operations pdc_sata_ops = { +static const struct ata_port_operations pdc_sata_ops = { .port_disable = ata_port_disable, .tf_load = pdc_tf_load_mmio, .tf_read = ata_tf_read, @@ -136,7 +136,7 @@ static struct ata_port_operations pdc_sata_ops = { .host_stop = ata_pci_host_stop, }; -static struct ata_port_operations pdc_pata_ops = { +static const struct ata_port_operations pdc_pata_ops = { .port_disable = ata_port_disable, .tf_load = pdc_tf_load_mmio, .tf_read = ata_tf_read, @@ -324,7 +324,7 @@ static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) { if (sc_reg > SCR_CONTROL) return 0xffffffffU; - return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4)); + return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); } @@ -333,7 +333,7 @@ static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, { if (sc_reg > SCR_CONTROL) return; - writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4)); + writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); } static void pdc_qc_prep(struct ata_queued_cmd *qc) @@ -438,11 +438,11 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap, break; default: - ap->stats.idle_irq++; - break; + ap->stats.idle_irq++; + break; } - return handled; + return handled; } static void pdc_irq_clear(struct ata_port *ap) @@ -523,8 +523,8 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc) pp->pkt[2] = seq; wmb(); /* flush PRD, pkt writes */ - writel(pp->pkt_dma, (void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); - readl((void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */ + writel(pp->pkt_dma, (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); + readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */ } static int pdc_qc_issue_prot(struct ata_queued_cmd *qc) @@ -546,7 +546,7 @@ static int pdc_qc_issue_prot(struct ata_queued_cmd *qc) return ata_qc_issue_prot(qc); } -static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf) +static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) { WARN_ON (tf->protocol == ATA_PROT_DMA || tf->protocol == ATA_PROT_NODATA); @@ -554,7 +554,7 @@ static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf) } -static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf) +static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) { WARN_ON (tf->protocol == ATA_PROT_DMA || tf->protocol == ATA_PROT_NODATA); diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c index ffcdeb68641c..250dafa6bc36 100644 --- a/drivers/scsi/sata_qstor.c +++ b/drivers/scsi/sata_qstor.c @@ -51,8 +51,6 @@ enum { QS_PRD_BYTES = QS_MAX_PRD * 16, QS_PKT_BYTES = QS_CPB_BYTES + QS_PRD_BYTES, - QS_DMA_BOUNDARY = ~0UL, - /* global register offsets */ QS_HCF_CNFG3 = 0x0003, /* host configuration offset */ QS_HID_HPHY = 0x0004, /* host physical interface info */ @@ -101,6 +99,10 @@ enum { board_2068_idx = 0, /* QStor 4-port SATA/RAID */ }; +enum { + QS_DMA_BOUNDARY = ~0UL +}; + typedef enum { qs_state_idle, qs_state_pkt, qs_state_mmio } qs_state_t; struct qs_port_priv { @@ -145,7 +147,7 @@ static Scsi_Host_Template qs_ata_sht = { .bios_param = ata_std_bios_param, }; -static struct ata_port_operations qs_ata_ops = { +static const struct ata_port_operations qs_ata_ops = { .port_disable = ata_port_disable, .tf_load = ata_tf_load, .tf_read = ata_tf_read, diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c index ba98a175ee3a..3a056173fb95 100644 --- a/drivers/scsi/sata_sil.c +++ b/drivers/scsi/sata_sil.c @@ -150,7 +150,7 @@ static Scsi_Host_Template sil_sht = { .ordered_flush = 1, }; -static struct ata_port_operations sil_ops = { +static const struct ata_port_operations sil_ops = { .port_disable = ata_port_disable, .dev_config = sil_dev_config, .tf_load = ata_tf_load, @@ -289,7 +289,7 @@ static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_re static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg) { - void *mmio = (void *) sil_scr_addr(ap, sc_reg); + void __iomem *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg); if (mmio) return readl(mmio); return 0xffffffffU; @@ -297,7 +297,7 @@ static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg) static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) { - void *mmio = (void *) sil_scr_addr(ap, sc_reg); + void *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg); if (mmio) writel(val, mmio); } diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c new file mode 100644 index 000000000000..32d730bd5bb6 --- /dev/null +++ b/drivers/scsi/sata_sil24.c @@ -0,0 +1,875 @@ +/* + * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers + * + * Copyright 2005 Tejun Heo + * + * Based on preview driver from Silicon Image. + * + * NOTE: No NCQ/ATAPI support yet. The preview driver didn't support + * NCQ nor ATAPI, and, unfortunately, I couldn't find out how to make + * those work. Enabling those shouldn't be difficult. Basic + * structure is all there (in libata-dev tree). If you have any + * information about this hardware, please contact me or linux-ide. + * Info is needed on... + * + * - How to issue tagged commands and turn on sactive on issue accordingly. + * - Where to put an ATAPI command and how to tell the device to send it. + * - How to enable/use 64bit. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include +#include + +#define DRV_NAME "sata_sil24" +#define DRV_VERSION "0.22" /* Silicon Image's preview driver was 0.10 */ + +/* + * Port request block (PRB) 32 bytes + */ +struct sil24_prb { + u16 ctrl; + u16 prot; + u32 rx_cnt; + u8 fis[6 * 4]; +}; + +/* + * Scatter gather entry (SGE) 16 bytes + */ +struct sil24_sge { + u64 addr; + u32 cnt; + u32 flags; +}; + +/* + * Port multiplier + */ +struct sil24_port_multiplier { + u32 diag; + u32 sactive; +}; + +enum { + /* + * Global controller registers (128 bytes @ BAR0) + */ + /* 32 bit regs */ + HOST_SLOT_STAT = 0x00, /* 32 bit slot stat * 4 */ + HOST_CTRL = 0x40, + HOST_IRQ_STAT = 0x44, + HOST_PHY_CFG = 0x48, + HOST_BIST_CTRL = 0x50, + HOST_BIST_PTRN = 0x54, + HOST_BIST_STAT = 0x58, + HOST_MEM_BIST_STAT = 0x5c, + HOST_FLASH_CMD = 0x70, + /* 8 bit regs */ + HOST_FLASH_DATA = 0x74, + HOST_TRANSITION_DETECT = 0x75, + HOST_GPIO_CTRL = 0x76, + HOST_I2C_ADDR = 0x78, /* 32 bit */ + HOST_I2C_DATA = 0x7c, + HOST_I2C_XFER_CNT = 0x7e, + HOST_I2C_CTRL = 0x7f, + + /* HOST_SLOT_STAT bits */ + HOST_SSTAT_ATTN = (1 << 31), + + /* + * Port registers + * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2) + */ + PORT_REGS_SIZE = 0x2000, + PORT_PRB = 0x0000, /* (32 bytes PRB + 16 bytes SGEs * 6) * 31 (3968 bytes) */ + + PORT_PM = 0x0f80, /* 8 bytes PM * 16 (128 bytes) */ + /* 32 bit regs */ + PORT_CTRL_STAT = 0x1000, /* write: ctrl-set, read: stat */ + PORT_CTRL_CLR = 0x1004, /* write: ctrl-clear */ + PORT_IRQ_STAT = 0x1008, /* high: status, low: interrupt */ + PORT_IRQ_ENABLE_SET = 0x1010, /* write: enable-set */ + PORT_IRQ_ENABLE_CLR = 0x1014, /* write: enable-clear */ + PORT_ACTIVATE_UPPER_ADDR= 0x101c, + PORT_EXEC_FIFO = 0x1020, /* command execution fifo */ + PORT_CMD_ERR = 0x1024, /* command error number */ + PORT_FIS_CFG = 0x1028, + PORT_FIFO_THRES = 0x102c, + /* 16 bit regs */ + PORT_DECODE_ERR_CNT = 0x1040, + PORT_DECODE_ERR_THRESH = 0x1042, + PORT_CRC_ERR_CNT = 0x1044, + PORT_CRC_ERR_THRESH = 0x1046, + PORT_HSHK_ERR_CNT = 0x1048, + PORT_HSHK_ERR_THRESH = 0x104a, + /* 32 bit regs */ + PORT_PHY_CFG = 0x1050, + PORT_SLOT_STAT = 0x1800, + PORT_CMD_ACTIVATE = 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */ + PORT_EXEC_DIAG = 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */ + PORT_PSD_DIAG = 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */ + PORT_SCONTROL = 0x1f00, + PORT_SSTATUS = 0x1f04, + PORT_SERROR = 0x1f08, + PORT_SACTIVE = 0x1f0c, + + /* PORT_CTRL_STAT bits */ + PORT_CS_PORT_RST = (1 << 0), /* port reset */ + PORT_CS_DEV_RST = (1 << 1), /* device reset */ + PORT_CS_INIT = (1 << 2), /* port initialize */ + PORT_CS_IRQ_WOC = (1 << 3), /* interrupt write one to clear */ + PORT_CS_RESUME = (1 << 6), /* port resume */ + PORT_CS_32BIT_ACTV = (1 << 10), /* 32-bit activation */ + PORT_CS_PM_EN = (1 << 13), /* port multiplier enable */ + PORT_CS_RDY = (1 << 31), /* port ready to accept commands */ + + /* PORT_IRQ_STAT/ENABLE_SET/CLR */ + /* bits[11:0] are masked */ + PORT_IRQ_COMPLETE = (1 << 0), /* command(s) completed */ + PORT_IRQ_ERROR = (1 << 1), /* command execution error */ + PORT_IRQ_PORTRDY_CHG = (1 << 2), /* port ready change */ + PORT_IRQ_PWR_CHG = (1 << 3), /* power management change */ + PORT_IRQ_PHYRDY_CHG = (1 << 4), /* PHY ready change */ + PORT_IRQ_COMWAKE = (1 << 5), /* COMWAKE received */ + PORT_IRQ_UNK_FIS = (1 << 6), /* Unknown FIS received */ + PORT_IRQ_SDB_FIS = (1 << 11), /* SDB FIS received */ + + /* bits[27:16] are unmasked (raw) */ + PORT_IRQ_RAW_SHIFT = 16, + PORT_IRQ_MASKED_MASK = 0x7ff, + PORT_IRQ_RAW_MASK = (0x7ff << PORT_IRQ_RAW_SHIFT), + + /* ENABLE_SET/CLR specific, intr steering - 2 bit field */ + PORT_IRQ_STEER_SHIFT = 30, + PORT_IRQ_STEER_MASK = (3 << PORT_IRQ_STEER_SHIFT), + + /* PORT_CMD_ERR constants */ + PORT_CERR_DEV = 1, /* Error bit in D2H Register FIS */ + PORT_CERR_SDB = 2, /* Error bit in SDB FIS */ + PORT_CERR_DATA = 3, /* Error in data FIS not detected by dev */ + PORT_CERR_SEND = 4, /* Initial cmd FIS transmission failure */ + PORT_CERR_INCONSISTENT = 5, /* Protocol mismatch */ + PORT_CERR_DIRECTION = 6, /* Data direction mismatch */ + PORT_CERR_UNDERRUN = 7, /* Ran out of SGEs while writing */ + PORT_CERR_OVERRUN = 8, /* Ran out of SGEs while reading */ + PORT_CERR_PKT_PROT = 11, /* DIR invalid in 1st PIO setup of ATAPI */ + PORT_CERR_SGT_BOUNDARY = 16, /* PLD ecode 00 - SGT not on qword boundary */ + PORT_CERR_SGT_TGTABRT = 17, /* PLD ecode 01 - target abort */ + PORT_CERR_SGT_MSTABRT = 18, /* PLD ecode 10 - master abort */ + PORT_CERR_SGT_PCIPERR = 19, /* PLD ecode 11 - PCI parity err while fetching SGT */ + PORT_CERR_CMD_BOUNDARY = 24, /* ctrl[15:13] 001 - PRB not on qword boundary */ + PORT_CERR_CMD_TGTABRT = 25, /* ctrl[15:13] 010 - target abort */ + PORT_CERR_CMD_MSTABRT = 26, /* ctrl[15:13] 100 - master abort */ + PORT_CERR_CMD_PCIPERR = 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */ + PORT_CERR_XFR_UNDEF = 32, /* PSD ecode 00 - undefined */ + PORT_CERR_XFR_TGTABRT = 33, /* PSD ecode 01 - target abort */ + PORT_CERR_XFR_MSGABRT = 34, /* PSD ecode 10 - master abort */ + PORT_CERR_XFR_PCIPERR = 35, /* PSD ecode 11 - PCI prity err during transfer */ + PORT_CERR_SENDSERVICE = 36, /* FIS received while sending service */ + + /* + * Other constants + */ + SGE_TRM = (1 << 31), /* Last SGE in chain */ + PRB_SOFT_RST = (1 << 7), /* Soft reset request (ign BSY?) */ + + /* board id */ + BID_SIL3124 = 0, + BID_SIL3132 = 1, + BID_SIL3131 = 2, + + IRQ_STAT_4PORTS = 0xf, +}; + +struct sil24_cmd_block { + struct sil24_prb prb; + struct sil24_sge sge[LIBATA_MAX_PRD]; +}; + +/* + * ap->private_data + * + * The preview driver always returned 0 for status. We emulate it + * here from the previous interrupt. + */ +struct sil24_port_priv { + struct sil24_cmd_block *cmd_block; /* 32 cmd blocks */ + dma_addr_t cmd_block_dma; /* DMA base addr for them */ + struct ata_taskfile tf; /* Cached taskfile registers */ +}; + +/* ap->host_set->private_data */ +struct sil24_host_priv { + void *host_base; /* global controller control (128 bytes @BAR0) */ + void *port_base; /* port registers (4 * 8192 bytes @BAR2) */ +}; + +static u8 sil24_check_status(struct ata_port *ap); +static u8 sil24_check_err(struct ata_port *ap); +static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg); +static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val); +static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf); +static void sil24_phy_reset(struct ata_port *ap); +static void sil24_qc_prep(struct ata_queued_cmd *qc); +static int sil24_qc_issue(struct ata_queued_cmd *qc); +static void sil24_irq_clear(struct ata_port *ap); +static void sil24_eng_timeout(struct ata_port *ap); +static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs); +static int sil24_port_start(struct ata_port *ap); +static void sil24_port_stop(struct ata_port *ap); +static void sil24_host_stop(struct ata_host_set *host_set); +static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); + +static struct pci_device_id sil24_pci_tbl[] = { + { 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 }, + { 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 }, + { 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 }, + { 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 }, + { } /* terminate list */ +}; + +static struct pci_driver sil24_pci_driver = { + .name = DRV_NAME, + .id_table = sil24_pci_tbl, + .probe = sil24_init_one, + .remove = ata_pci_remove_one, /* safe? */ +}; + +static Scsi_Host_Template sil24_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .ioctl = ata_scsi_ioctl, + .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = LIBATA_MAX_PRD, + .max_sectors = ATA_MAX_SECTORS, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ATA_SHT_USE_CLUSTERING, + .proc_name = DRV_NAME, + .dma_boundary = ATA_DMA_BOUNDARY, + .slave_configure = ata_scsi_slave_config, + .bios_param = ata_std_bios_param, + .ordered_flush = 1, /* NCQ not supported yet */ +}; + +static const struct ata_port_operations sil24_ops = { + .port_disable = ata_port_disable, + + .check_status = sil24_check_status, + .check_altstatus = sil24_check_status, + .check_err = sil24_check_err, + .dev_select = ata_noop_dev_select, + + .tf_read = sil24_tf_read, + + .phy_reset = sil24_phy_reset, + + .qc_prep = sil24_qc_prep, + .qc_issue = sil24_qc_issue, + + .eng_timeout = sil24_eng_timeout, + + .irq_handler = sil24_interrupt, + .irq_clear = sil24_irq_clear, + + .scr_read = sil24_scr_read, + .scr_write = sil24_scr_write, + + .port_start = sil24_port_start, + .port_stop = sil24_port_stop, + .host_stop = sil24_host_stop, +}; + +/* + * Use bits 30-31 of host_flags to encode available port numbers. + * Current maxium is 4. + */ +#define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30) +#define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1) + +static struct ata_port_info sil24_port_info[] = { + /* sil_3124 */ + { + .sht = &sil24_sht, + .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | + ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(4), + .pio_mask = 0x1f, /* pio0-4 */ + .mwdma_mask = 0x07, /* mwdma0-2 */ + .udma_mask = 0x3f, /* udma0-5 */ + .port_ops = &sil24_ops, + }, + /* sil_3132 */ + { + .sht = &sil24_sht, + .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | + ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(2), + .pio_mask = 0x1f, /* pio0-4 */ + .mwdma_mask = 0x07, /* mwdma0-2 */ + .udma_mask = 0x3f, /* udma0-5 */ + .port_ops = &sil24_ops, + }, + /* sil_3131/sil_3531 */ + { + .sht = &sil24_sht, + .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO | + ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(1), + .pio_mask = 0x1f, /* pio0-4 */ + .mwdma_mask = 0x07, /* mwdma0-2 */ + .udma_mask = 0x3f, /* udma0-5 */ + .port_ops = &sil24_ops, + }, +}; + +static inline void sil24_update_tf(struct ata_port *ap) +{ + struct sil24_port_priv *pp = ap->private_data; + void *port = (void *)ap->ioaddr.cmd_addr; + struct sil24_prb *prb = port; + + ata_tf_from_fis(prb->fis, &pp->tf); +} + +static u8 sil24_check_status(struct ata_port *ap) +{ + struct sil24_port_priv *pp = ap->private_data; + return pp->tf.command; +} + +static u8 sil24_check_err(struct ata_port *ap) +{ + struct sil24_port_priv *pp = ap->private_data; + return pp->tf.feature; +} + +static int sil24_scr_map[] = { + [SCR_CONTROL] = 0, + [SCR_STATUS] = 1, + [SCR_ERROR] = 2, + [SCR_ACTIVE] = 3, +}; + +static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg) +{ + void *scr_addr = (void *)ap->ioaddr.scr_addr; + if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { + void *addr; + addr = scr_addr + sil24_scr_map[sc_reg] * 4; + return readl(scr_addr + sil24_scr_map[sc_reg] * 4); + } + return 0xffffffffU; +} + +static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val) +{ + void *scr_addr = (void *)ap->ioaddr.scr_addr; + if (sc_reg < ARRAY_SIZE(sil24_scr_map)) { + void *addr; + addr = scr_addr + sil24_scr_map[sc_reg] * 4; + writel(val, scr_addr + sil24_scr_map[sc_reg] * 4); + } +} + +static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct sil24_port_priv *pp = ap->private_data; + *tf = pp->tf; +} + +static void sil24_phy_reset(struct ata_port *ap) +{ + __sata_phy_reset(ap); + /* + * No ATAPI yet. Just unconditionally indicate ATA device. + * If ATAPI device is attached, it will fail ATA_CMD_ID_ATA + * and libata core will ignore the device. + */ + if (!(ap->flags & ATA_FLAG_PORT_DISABLED)) + ap->device[0].class = ATA_DEV_ATA; +} + +static inline void sil24_fill_sg(struct ata_queued_cmd *qc, + struct sil24_cmd_block *cb) +{ + struct scatterlist *sg = qc->sg; + struct sil24_sge *sge = cb->sge; + unsigned i; + + for (i = 0; i < qc->n_elem; i++, sg++, sge++) { + sge->addr = cpu_to_le64(sg_dma_address(sg)); + sge->cnt = cpu_to_le32(sg_dma_len(sg)); + sge->flags = 0; + sge->flags = i < qc->n_elem - 1 ? 0 : cpu_to_le32(SGE_TRM); + } +} + +static void sil24_qc_prep(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct sil24_port_priv *pp = ap->private_data; + struct sil24_cmd_block *cb = pp->cmd_block + qc->tag; + struct sil24_prb *prb = &cb->prb; + + switch (qc->tf.protocol) { + case ATA_PROT_PIO: + case ATA_PROT_DMA: + case ATA_PROT_NODATA: + break; + default: + /* ATAPI isn't supported yet */ + BUG(); + } + + ata_tf_to_fis(&qc->tf, prb->fis, 0); + + if (qc->flags & ATA_QCFLAG_DMAMAP) + sil24_fill_sg(qc, cb); +} + +static int sil24_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + void *port = (void *)ap->ioaddr.cmd_addr; + struct sil24_port_priv *pp = ap->private_data; + dma_addr_t paddr = pp->cmd_block_dma + qc->tag * sizeof(*pp->cmd_block); + + writel((u32)paddr, port + PORT_CMD_ACTIVATE); + return 0; +} + +static void sil24_irq_clear(struct ata_port *ap) +{ + /* unused */ +} + +static int __sil24_reset_controller(void *port) +{ + int cnt; + u32 tmp; + + /* Reset controller state. Is this correct? */ + writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT); + readl(port + PORT_CTRL_STAT); /* sync */ + + /* Max ~100ms */ + for (cnt = 0; cnt < 1000; cnt++) { + udelay(100); + tmp = readl(port + PORT_CTRL_STAT); + if (!(tmp & PORT_CS_DEV_RST)) + break; + } + + if (tmp & PORT_CS_DEV_RST) + return -1; + return 0; +} + +static void sil24_reset_controller(struct ata_port *ap) +{ + printk(KERN_NOTICE DRV_NAME + " ata%u: resetting controller...\n", ap->id); + if (__sil24_reset_controller((void *)ap->ioaddr.cmd_addr)) + printk(KERN_ERR DRV_NAME + " ata%u: failed to reset controller\n", ap->id); +} + +static void sil24_eng_timeout(struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + + qc = ata_qc_from_tag(ap, ap->active_tag); + if (!qc) { + printk(KERN_ERR "ata%u: BUG: tiemout without command\n", + ap->id); + return; + } + + /* + * hack alert! We cannot use the supplied completion + * function from inside the ->eh_strategy_handler() thread. + * libata is the only user of ->eh_strategy_handler() in + * any kernel, so the default scsi_done() assumes it is + * not being called from the SCSI EH. + */ + printk(KERN_ERR "ata%u: command timeout\n", ap->id); + qc->scsidone = scsi_finish_command; + ata_qc_complete(qc, ATA_ERR); + + sil24_reset_controller(ap); +} + +static void sil24_error_intr(struct ata_port *ap, u32 slot_stat) +{ + struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); + struct sil24_port_priv *pp = ap->private_data; + void *port = (void *)ap->ioaddr.cmd_addr; + u32 irq_stat, cmd_err, sstatus, serror; + + irq_stat = readl(port + PORT_IRQ_STAT); + writel(irq_stat, port + PORT_IRQ_STAT); /* clear irq */ + + if (!(irq_stat & PORT_IRQ_ERROR)) { + /* ignore non-completion, non-error irqs for now */ + printk(KERN_WARNING DRV_NAME + "ata%u: non-error exception irq (irq_stat %x)\n", + ap->id, irq_stat); + return; + } + + cmd_err = readl(port + PORT_CMD_ERR); + sstatus = readl(port + PORT_SSTATUS); + serror = readl(port + PORT_SERROR); + if (serror) + writel(serror, port + PORT_SERROR); + + printk(KERN_ERR DRV_NAME " ata%u: error interrupt on port%d\n" + " stat=0x%x irq=0x%x cmd_err=%d sstatus=0x%x serror=0x%x\n", + ap->id, ap->port_no, slot_stat, irq_stat, cmd_err, sstatus, serror); + + if (cmd_err == PORT_CERR_DEV || cmd_err == PORT_CERR_SDB) { + /* + * Device is reporting error, tf registers are valid. + */ + sil24_update_tf(ap); + } else { + /* + * Other errors. libata currently doesn't have any + * mechanism to report these errors. Just turn on + * ATA_ERR. + */ + pp->tf.command = ATA_ERR; + } + + if (qc) + ata_qc_complete(qc, pp->tf.command); + + sil24_reset_controller(ap); +} + +static inline void sil24_host_intr(struct ata_port *ap) +{ + struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag); + void *port = (void *)ap->ioaddr.cmd_addr; + u32 slot_stat; + + slot_stat = readl(port + PORT_SLOT_STAT); + if (!(slot_stat & HOST_SSTAT_ATTN)) { + struct sil24_port_priv *pp = ap->private_data; + /* + * !HOST_SSAT_ATTN guarantees successful completion, + * so reading back tf registers is unnecessary for + * most commands. TODO: read tf registers for + * commands which require these values on successful + * completion (EXECUTE DEVICE DIAGNOSTIC, CHECK POWER, + * DEVICE RESET and READ PORT MULTIPLIER (any more?). + */ + sil24_update_tf(ap); + + if (qc) + ata_qc_complete(qc, pp->tf.command); + } else + sil24_error_intr(ap, slot_stat); +} + +static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ata_host_set *host_set = dev_instance; + struct sil24_host_priv *hpriv = host_set->private_data; + unsigned handled = 0; + u32 status; + int i; + + status = readl(hpriv->host_base + HOST_IRQ_STAT); + + if (status == 0xffffffff) { + printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, " + "PCI fault or device removal?\n"); + goto out; + } + + if (!(status & IRQ_STAT_4PORTS)) + goto out; + + spin_lock(&host_set->lock); + + for (i = 0; i < host_set->n_ports; i++) + if (status & (1 << i)) { + struct ata_port *ap = host_set->ports[i]; + if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) { + sil24_host_intr(host_set->ports[i]); + handled++; + } else + printk(KERN_ERR DRV_NAME + ": interrupt from disabled port %d\n", i); + } + + spin_unlock(&host_set->lock); + out: + return IRQ_RETVAL(handled); +} + +static int sil24_port_start(struct ata_port *ap) +{ + struct device *dev = ap->host_set->dev; + struct sil24_port_priv *pp; + struct sil24_cmd_block *cb; + size_t cb_size = sizeof(*cb); + dma_addr_t cb_dma; + + pp = kmalloc(sizeof(*pp), GFP_KERNEL); + if (!pp) + return -ENOMEM; + memset(pp, 0, sizeof(*pp)); + + pp->tf.command = ATA_DRDY; + + cb = dma_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL); + if (!cb) { + kfree(pp); + return -ENOMEM; + } + memset(cb, 0, cb_size); + + pp->cmd_block = cb; + pp->cmd_block_dma = cb_dma; + + ap->private_data = pp; + + return 0; +} + +static void sil24_port_stop(struct ata_port *ap) +{ + struct device *dev = ap->host_set->dev; + struct sil24_port_priv *pp = ap->private_data; + size_t cb_size = sizeof(*pp->cmd_block); + + dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma); + kfree(pp); +} + +static void sil24_host_stop(struct ata_host_set *host_set) +{ + struct sil24_host_priv *hpriv = host_set->private_data; + + iounmap(hpriv->host_base); + iounmap(hpriv->port_base); + kfree(hpriv); +} + +static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int printed_version = 0; + unsigned int board_id = (unsigned int)ent->driver_data; + struct ata_port_info *pinfo = &sil24_port_info[board_id]; + struct ata_probe_ent *probe_ent = NULL; + struct sil24_host_priv *hpriv = NULL; + void *host_base = NULL, *port_base = NULL; + int i, rc; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + rc = pci_enable_device(pdev); + if (rc) + return rc; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto out_disable; + + rc = -ENOMEM; + /* ioremap mmio registers */ + host_base = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!host_base) + goto out_free; + port_base = ioremap(pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); + if (!port_base) + goto out_free; + + /* allocate & init probe_ent and hpriv */ + probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + if (!probe_ent) + goto out_free; + + hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL); + if (!hpriv) + goto out_free; + + memset(probe_ent, 0, sizeof(*probe_ent)); + probe_ent->dev = pci_dev_to_dev(pdev); + INIT_LIST_HEAD(&probe_ent->node); + + probe_ent->sht = pinfo->sht; + probe_ent->host_flags = pinfo->host_flags; + probe_ent->pio_mask = pinfo->pio_mask; + probe_ent->udma_mask = pinfo->udma_mask; + probe_ent->port_ops = pinfo->port_ops; + probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags); + + probe_ent->irq = pdev->irq; + probe_ent->irq_flags = SA_SHIRQ; + probe_ent->mmio_base = port_base; + probe_ent->private_data = hpriv; + + memset(hpriv, 0, sizeof(*hpriv)); + hpriv->host_base = host_base; + hpriv->port_base = port_base; + + /* + * Configure the device + */ + /* + * FIXME: This device is certainly 64-bit capable. We just + * don't know how to use it. After fixing 32bit activation in + * this function, enable 64bit masks here. + */ + rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (rc) { + printk(KERN_ERR DRV_NAME "(%s): 32-bit DMA enable failed\n", + pci_name(pdev)); + goto out_free; + } + rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + if (rc) { + printk(KERN_ERR DRV_NAME "(%s): 32-bit consistent DMA enable failed\n", + pci_name(pdev)); + goto out_free; + } + + /* GPIO off */ + writel(0, host_base + HOST_FLASH_CMD); + + /* Mask interrupts during initialization */ + writel(0, host_base + HOST_CTRL); + + for (i = 0; i < probe_ent->n_ports; i++) { + void *port = port_base + i * PORT_REGS_SIZE; + unsigned long portu = (unsigned long)port; + u32 tmp; + int cnt; + + probe_ent->port[i].cmd_addr = portu + PORT_PRB; + probe_ent->port[i].scr_addr = portu + PORT_SCONTROL; + + ata_std_ports(&probe_ent->port[i]); + + /* Initial PHY setting */ + writel(0x20c, port + PORT_PHY_CFG); + + /* Clear port RST */ + tmp = readl(port + PORT_CTRL_STAT); + if (tmp & PORT_CS_PORT_RST) { + writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); + readl(port + PORT_CTRL_STAT); /* sync */ + for (cnt = 0; cnt < 10; cnt++) { + msleep(10); + tmp = readl(port + PORT_CTRL_STAT); + if (!(tmp & PORT_CS_PORT_RST)) + break; + } + if (tmp & PORT_CS_PORT_RST) + printk(KERN_ERR DRV_NAME + "(%s): failed to clear port RST\n", + pci_name(pdev)); + } + + /* Zero error counters. */ + writel(0x8000, port + PORT_DECODE_ERR_THRESH); + writel(0x8000, port + PORT_CRC_ERR_THRESH); + writel(0x8000, port + PORT_HSHK_ERR_THRESH); + writel(0x0000, port + PORT_DECODE_ERR_CNT); + writel(0x0000, port + PORT_CRC_ERR_CNT); + writel(0x0000, port + PORT_HSHK_ERR_CNT); + + /* FIXME: 32bit activation? */ + writel(0, port + PORT_ACTIVATE_UPPER_ADDR); + writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_STAT); + + /* Configure interrupts */ + writel(0xffff, port + PORT_IRQ_ENABLE_CLR); + writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR | PORT_IRQ_SDB_FIS, + port + PORT_IRQ_ENABLE_SET); + + /* Clear interrupts */ + writel(0x0fff0fff, port + PORT_IRQ_STAT); + writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR); + + /* Clear port multiplier enable and resume bits */ + writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR); + + /* Reset itself */ + if (__sil24_reset_controller(port)) + printk(KERN_ERR DRV_NAME + "(%s): failed to reset controller\n", + pci_name(pdev)); + } + + /* Turn on interrupts */ + writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL); + + pci_set_master(pdev); + + /* FIXME: check ata_device_add return value */ + ata_device_add(probe_ent); + + kfree(probe_ent); + return 0; + + out_free: + if (host_base) + iounmap(host_base); + if (port_base) + iounmap(port_base); + kfree(probe_ent); + kfree(hpriv); + pci_release_regions(pdev); + out_disable: + pci_disable_device(pdev); + return rc; +} + +static int __init sil24_init(void) +{ + return pci_module_init(&sil24_pci_driver); +} + +static void __exit sil24_exit(void) +{ + pci_unregister_driver(&sil24_pci_driver); +} + +MODULE_AUTHOR("Tejun Heo"); +MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, sil24_pci_tbl); + +module_init(sil24_init); +module_exit(sil24_exit); diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c index b227e51d12f4..057f7b98b6c4 100644 --- a/drivers/scsi/sata_sis.c +++ b/drivers/scsi/sata_sis.c @@ -102,7 +102,7 @@ static Scsi_Host_Template sis_sht = { .ordered_flush = 1, }; -static struct ata_port_operations sis_ops = { +static const struct ata_port_operations sis_ops = { .port_disable = ata_port_disable, .tf_load = ata_tf_load, .tf_read = ata_tf_read, @@ -263,7 +263,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_regions; ppi = &sis_port_info; - probe_ent = ata_pci_init_native_mode(pdev, &ppi); + probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); if (!probe_ent) { rc = -ENOMEM; goto err_out_regions; diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c index d89d968bedac..e0f9570bc6dd 100644 --- a/drivers/scsi/sata_svw.c +++ b/drivers/scsi/sata_svw.c @@ -102,7 +102,7 @@ static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, } -static void k2_sata_tf_load(struct ata_port *ap, struct ata_taskfile *tf) +static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; @@ -297,7 +297,7 @@ static Scsi_Host_Template k2_sata_sht = { }; -static struct ata_port_operations k2_sata_ops = { +static const struct ata_port_operations k2_sata_ops = { .port_disable = ata_port_disable, .tf_load = k2_sata_tf_load, .tf_read = k2_sata_tf_read, diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c index 540a85191172..af08f4f650c1 100644 --- a/drivers/scsi/sata_sx4.c +++ b/drivers/scsi/sata_sx4.c @@ -137,7 +137,7 @@ struct pdc_port_priv { }; struct pdc_host_priv { - void *dimm_mmio; + void __iomem *dimm_mmio; unsigned int doing_hdma; unsigned int hdma_prod; @@ -157,8 +157,8 @@ static void pdc_20621_phy_reset (struct ata_port *ap); static int pdc_port_start(struct ata_port *ap); static void pdc_port_stop(struct ata_port *ap); static void pdc20621_qc_prep(struct ata_queued_cmd *qc); -static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf); -static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf); +static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); +static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); static void pdc20621_host_stop(struct ata_host_set *host_set); static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe); static int pdc20621_detect_dimm(struct ata_probe_ent *pe); @@ -196,7 +196,7 @@ static Scsi_Host_Template pdc_sata_sht = { .ordered_flush = 1, }; -static struct ata_port_operations pdc_20621_ops = { +static const struct ata_port_operations pdc_20621_ops = { .port_disable = ata_port_disable, .tf_load = pdc_tf_load_mmio, .tf_read = ata_tf_read, @@ -247,7 +247,7 @@ static void pdc20621_host_stop(struct ata_host_set *host_set) { struct pci_dev *pdev = to_pci_dev(host_set->dev); struct pdc_host_priv *hpriv = host_set->private_data; - void *dimm_mmio = hpriv->dimm_mmio; + void __iomem *dimm_mmio = hpriv->dimm_mmio; pci_iounmap(pdev, dimm_mmio); kfree(hpriv); @@ -669,8 +669,8 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc) readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */ writel(port_ofs + PDC_DIMM_ATA_PKT, - (void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); - readl((void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); + (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); + readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); VPRINTK("submitted ofs 0x%x (%u), seq %u\n", port_ofs + PDC_DIMM_ATA_PKT, port_ofs + PDC_DIMM_ATA_PKT, @@ -747,8 +747,8 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap, writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); readl(mmio + PDC_20621_SEQCTL + (seq * 4)); writel(port_ofs + PDC_DIMM_ATA_PKT, - (void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); - readl((void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); + (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); + readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); } /* step two - execute ATA command */ @@ -899,7 +899,7 @@ out: DPRINTK("EXIT\n"); } -static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf) +static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) { WARN_ON (tf->protocol == ATA_PROT_DMA || tf->protocol == ATA_PROT_NODATA); @@ -907,7 +907,7 @@ static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf) } -static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf) +static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) { WARN_ON (tf->protocol == ATA_PROT_DMA || tf->protocol == ATA_PROT_NODATA); @@ -1014,7 +1014,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource, idx++; dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size : (long) (window_size - offset); - memcpy_toio((char *) (dimm_mmio + offset / 4), (char *) psource, dist); + memcpy_toio(dimm_mmio + offset / 4, psource, dist); writel(0x01, mmio + PDC_GENERAL_CTLR); readl(mmio + PDC_GENERAL_CTLR); @@ -1023,8 +1023,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource, for (; (long) size >= (long) window_size ;) { writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); readl(mmio + PDC_DIMM_WINDOW_CTLR); - memcpy_toio((char *) (dimm_mmio), (char *) psource, - window_size / 4); + memcpy_toio(dimm_mmio, psource, window_size / 4); writel(0x01, mmio + PDC_GENERAL_CTLR); readl(mmio + PDC_GENERAL_CTLR); psource += window_size; @@ -1035,7 +1034,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource, if (size) { writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); readl(mmio + PDC_DIMM_WINDOW_CTLR); - memcpy_toio((char *) (dimm_mmio), (char *) psource, size / 4); + memcpy_toio(dimm_mmio, psource, size / 4); writel(0x01, mmio + PDC_GENERAL_CTLR); readl(mmio + PDC_GENERAL_CTLR); } diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c index 4c9fb8b71be1..d68dc7d3422c 100644 --- a/drivers/scsi/sata_uli.c +++ b/drivers/scsi/sata_uli.c @@ -90,7 +90,7 @@ static Scsi_Host_Template uli_sht = { .ordered_flush = 1, }; -static struct ata_port_operations uli_ops = { +static const struct ata_port_operations uli_ops = { .port_disable = ata_port_disable, .tf_load = ata_tf_load, @@ -202,7 +202,7 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_regions; ppi = &uli_port_info; - probe_ent = ata_pci_init_native_mode(pdev, &ppi); + probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); if (!probe_ent) { rc = -ENOMEM; goto err_out_regions; diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c index 128b996b07b7..80e291a909a9 100644 --- a/drivers/scsi/sata_via.c +++ b/drivers/scsi/sata_via.c @@ -109,7 +109,7 @@ static Scsi_Host_Template svia_sht = { .ordered_flush = 1, }; -static struct ata_port_operations svia_sata_ops = { +static const struct ata_port_operations svia_sata_ops = { .port_disable = ata_port_disable, .tf_load = ata_tf_load, @@ -212,7 +212,7 @@ static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev) struct ata_probe_ent *probe_ent; struct ata_port_info *ppi = &svia_port_info; - probe_ent = ata_pci_init_native_mode(pdev, &ppi); + probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); if (!probe_ent) return NULL; diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c index cf94e0158a8d..5af05fdf8544 100644 --- a/drivers/scsi/sata_vsc.c +++ b/drivers/scsi/sata_vsc.c @@ -86,7 +86,7 @@ static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) { if (sc_reg > SCR_CONTROL) return 0xffffffffU; - return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4)); + return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); } @@ -95,16 +95,16 @@ static void vsc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, { if (sc_reg > SCR_CONTROL) return; - writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4)); + writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4)); } static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl) { - unsigned long mask_addr; + void __iomem *mask_addr; u8 mask; - mask_addr = (unsigned long) ap->host_set->mmio_base + + mask_addr = ap->host_set->mmio_base + VSC_SATA_INT_MASK_OFFSET + ap->port_no; mask = readb(mask_addr); if (ctl & ATA_NIEN) @@ -115,7 +115,7 @@ static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl) } -static void vsc_sata_tf_load(struct ata_port *ap, struct ata_taskfile *tf) +static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; @@ -231,7 +231,7 @@ static Scsi_Host_Template vsc_sata_sht = { }; -static struct ata_port_operations vsc_sata_ops = { +static const struct ata_port_operations vsc_sata_ops = { .port_disable = ata_port_disable, .tf_load = vsc_sata_tf_load, .tf_read = vsc_sata_tf_read, @@ -283,7 +283,7 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d struct ata_probe_ent *probe_ent = NULL; unsigned long base; int pci_dev_busy = 0; - void *mmio_base; + void __iomem *mmio_base; int rc; if (!printed_version++) diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 1f0ebabf6d47..a5711d545d71 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -130,7 +130,7 @@ EXPORT_SYMBOL(scsi_device_types); * Returns: Pointer to request block. */ struct scsi_request *scsi_allocate_request(struct scsi_device *sdev, - int gfp_mask) + gfp_t gfp_mask) { const int offset = ALIGN(sizeof(struct scsi_request), 4); const int size = offset + sizeof(struct request); @@ -196,7 +196,7 @@ struct scsi_host_cmd_pool { unsigned int users; char *name; unsigned int slab_flags; - unsigned int gfp_mask; + gfp_t gfp_mask; }; static struct scsi_host_cmd_pool scsi_cmd_pool = { @@ -213,7 +213,7 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = { static DECLARE_MUTEX(host_cmd_pool_mutex); static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, - int gfp_mask) + gfp_t gfp_mask) { struct scsi_cmnd *cmd; @@ -245,7 +245,7 @@ static struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, * * Returns: The allocated scsi command structure. */ -struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, int gfp_mask) +struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask) { struct scsi_cmnd *cmd; diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 64fc9e21f35b..e69477d1889b 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -185,6 +185,7 @@ static struct { {"PIONEER", "CD-ROM DRM-600", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"PIONEER", "CD-ROM DRM-602X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, + {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN}, {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN}, {"SEAGATE", "ST34555N", "0930", BLIST_NOTQ}, /* Chokes on tagged INQUIRY */ diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index ad5342165079..52b348c36d56 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -1645,6 +1645,8 @@ int scsi_error_handler(void *data) set_current_state(TASK_INTERRUPTIBLE); } + __set_current_state(TASK_RUNNING); + SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler scsi_eh_%d" " exiting\n",shost->host_no)); diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index de7f98cc38fe..6a3f6aae8a97 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -205,7 +205,8 @@ int scsi_ioctl_send_command(struct scsi_device *sdev, unsigned int inlen, outlen, cmdlen; unsigned int needed, buf_needed; int timeout, retries, result; - int data_direction, gfp_mask = GFP_KERNEL; + int data_direction; + gfp_t gfp_mask = GFP_KERNEL; if (!sic) return -EINVAL; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index dc9c772bc874..3ff538809786 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -97,7 +97,6 @@ int scsi_insert_special_req(struct scsi_request *sreq, int at_head) } static void scsi_run_queue(struct request_queue *q); -static void scsi_release_buffers(struct scsi_cmnd *cmd); /* * Function: scsi_unprep_request() @@ -678,7 +677,7 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, return NULL; } -static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, int gfp_mask) +static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) { struct scsi_host_sg_pool *sgp; struct scatterlist *sgl; @@ -1040,8 +1039,10 @@ static int scsi_init_io(struct scsi_cmnd *cmd) * if sg table allocation fails, requeue request later. */ sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC); - if (unlikely(!sgpnt)) + if (unlikely(!sgpnt)) { + scsi_unprep_request(req); return BLKPREP_DEFER; + } cmd->request_buffer = (char *) sgpnt; cmd->request_bufflen = req->nr_sectors << 9; @@ -1245,8 +1246,8 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req) */ ret = scsi_init_io(cmd); switch(ret) { + /* For BLKPREP_KILL/DEFER the cmd was released */ case BLKPREP_KILL: - /* BLKPREP_KILL return also releases the command */ goto kill; case BLKPREP_DEFER: goto defer; diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index fcf9f6cbb142..327c5d7e5bd2 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -587,6 +587,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, char *inq_result, if (sdev->scsi_level >= 2 || (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1)) sdev->scsi_level++; + sdev->sdev_target->scsi_level = sdev->scsi_level; return 0; } @@ -771,6 +772,15 @@ static int scsi_add_lun(struct scsi_device *sdev, char *inq_result, int *bflags) return SCSI_SCAN_LUN_PRESENT; } +static inline void scsi_destroy_sdev(struct scsi_device *sdev) +{ + if (sdev->host->hostt->slave_destroy) + sdev->host->hostt->slave_destroy(sdev); + transport_destroy_device(&sdev->sdev_gendev); + put_device(&sdev->sdev_gendev); +} + + /** * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it * @starget: pointer to target device structure @@ -803,9 +813,9 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, * The rescan flag is used as an optimization, the first scan of a * host adapter calls into here with rescan == 0. */ - if (rescan) { - sdev = scsi_device_lookup_by_target(starget, lun); - if (sdev) { + sdev = scsi_device_lookup_by_target(starget, lun); + if (sdev) { + if (rescan || sdev->sdev_state != SDEV_CREATED) { SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: device exists on %s\n", sdev->sdev_gendev.bus_id)); @@ -820,9 +830,9 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, sdev->model); return SCSI_SCAN_LUN_PRESENT; } - } - - sdev = scsi_alloc_sdev(starget, lun, hostdata); + scsi_device_put(sdev); + } else + sdev = scsi_alloc_sdev(starget, lun, hostdata); if (!sdev) goto out; @@ -877,12 +887,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget, res = SCSI_SCAN_NO_RESPONSE; } } - } else { - if (sdev->host->hostt->slave_destroy) - sdev->host->hostt->slave_destroy(sdev); - transport_destroy_device(&sdev->sdev_gendev); - put_device(&sdev->sdev_gendev); - } + } else + scsi_destroy_sdev(sdev); out: return res; } @@ -1054,7 +1060,7 @@ EXPORT_SYMBOL(int_to_scsilun); * 0: scan completed (or no memory, so further scanning is futile) * 1: no report lun scan, or not configured **/ -static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags, +static int scsi_report_lun_scan(struct scsi_target *starget, int bflags, int rescan) { char devname[64]; @@ -1067,7 +1073,8 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags, struct scsi_lun *lunp, *lun_data; u8 *data; struct scsi_sense_hdr sshdr; - struct scsi_target *starget = scsi_target(sdev); + struct scsi_device *sdev; + struct Scsi_Host *shost = dev_to_shost(&starget->dev); /* * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set. @@ -1075,15 +1082,23 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags, * support more than 8 LUNs. */ if ((bflags & BLIST_NOREPORTLUN) || - sdev->scsi_level < SCSI_2 || - (sdev->scsi_level < SCSI_3 && - (!(bflags & BLIST_REPORTLUN2) || sdev->host->max_lun <= 8)) ) + starget->scsi_level < SCSI_2 || + (starget->scsi_level < SCSI_3 && + (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8)) ) return 1; if (bflags & BLIST_NOLUN) return 0; + if (!(sdev = scsi_device_lookup_by_target(starget, 0))) { + sdev = scsi_alloc_sdev(starget, 0, NULL); + if (!sdev) + return 0; + if (scsi_device_get(sdev)) + return 0; + } + sprintf(devname, "host %d channel %d id %d", - sdev->host->host_no, sdev->channel, sdev->id); + shost->host_no, sdev->channel, sdev->id); /* * Allocate enough to hold the header (the same size as one scsi_lun) @@ -1098,8 +1113,10 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags, length = (max_scsi_report_luns + 1) * sizeof(struct scsi_lun); lun_data = kmalloc(length, GFP_ATOMIC | (sdev->host->unchecked_isa_dma ? __GFP_DMA : 0)); - if (!lun_data) + if (!lun_data) { + printk(ALLOC_FAILURE_MSG, __FUNCTION__); goto out; + } scsi_cmd[0] = REPORT_LUNS; @@ -1201,10 +1218,6 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags, for (i = 0; i < sizeof(struct scsi_lun); i++) printk("%02x", data[i]); printk(" has a LUN larger than currently supported.\n"); - } else if (lun == 0) { - /* - * LUN 0 has already been scanned. - */ } else if (lun > sdev->host->max_lun) { printk(KERN_WARNING "scsi: %s lun%d has a LUN larger" " than allowed by the host adapter\n", @@ -1227,13 +1240,13 @@ static int scsi_report_lun_scan(struct scsi_device *sdev, int bflags, } kfree(lun_data); - return 0; - out: - /* - * We are out of memory, don't try scanning any further. - */ - printk(ALLOC_FAILURE_MSG, __FUNCTION__); + scsi_device_put(sdev); + if (sdev->sdev_state == SDEV_CREATED) + /* + * the sdev we used didn't appear in the report luns scan + */ + scsi_destroy_sdev(sdev); return 0; } @@ -1299,7 +1312,6 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel, struct Scsi_Host *shost = dev_to_shost(parent); int bflags = 0; int res; - struct scsi_device *sdev = NULL; struct scsi_target *starget; if (shost->this_id == id) @@ -1325,27 +1337,16 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel, * Scan LUN 0, if there is some response, scan further. Ideally, we * would not configure LUN 0 until all LUNs are scanned. */ - res = scsi_probe_and_add_lun(starget, 0, &bflags, &sdev, rescan, NULL); - if (res == SCSI_SCAN_LUN_PRESENT) { - if (scsi_report_lun_scan(sdev, bflags, rescan) != 0) + res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL); + if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) { + if (scsi_report_lun_scan(starget, bflags, rescan) != 0) /* * The REPORT LUN did not scan the target, * do a sequential scan. */ scsi_sequential_lun_scan(starget, bflags, - res, sdev->scsi_level, rescan); - } else if (res == SCSI_SCAN_TARGET_PRESENT) { - /* - * There's a target here, but lun 0 is offline so we - * can't use the report_lun scan. Fall back to a - * sequential lun scan with a bflags of SPARSELUN and - * a default scsi level of SCSI_2 - */ - scsi_sequential_lun_scan(starget, BLIST_SPARSELUN, - SCSI_SCAN_TARGET_PRESENT, SCSI_2, rescan); + res, starget->scsi_level, rescan); } - if (sdev) - scsi_device_put(sdev); out_reap: /* now determine if the target has any children at all @@ -1542,10 +1543,7 @@ void scsi_free_host_dev(struct scsi_device *sdev) { BUG_ON(sdev->id != sdev->host->this_id); - if (sdev->host->hostt->slave_destroy) - sdev->host->hostt->slave_destroy(sdev); - transport_destroy_device(&sdev->sdev_gendev); - put_device(&sdev->sdev_gendev); + scsi_destroy_sdev(sdev); } EXPORT_SYMBOL(scsi_free_host_dev); diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 2cab556b6e82..771e97ef136e 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -819,12 +819,15 @@ show_fc_private_host_tgtid_bind_type(struct class_device *cdev, char *buf) return snprintf(buf, FC_BINDTYPE_MAX_NAMELEN, "%s\n", name); } +#define get_list_head_entry(pos, head, member) \ + pos = list_entry((head)->next, typeof(*pos), member) + static ssize_t store_fc_private_host_tgtid_bind_type(struct class_device *cdev, const char *buf, size_t count) { struct Scsi_Host *shost = transport_class_to_shost(cdev); - struct fc_rport *rport, *next_rport; + struct fc_rport *rport; enum fc_tgtid_binding_type val; unsigned long flags; @@ -834,9 +837,13 @@ store_fc_private_host_tgtid_bind_type(struct class_device *cdev, /* if changing bind type, purge all unused consistent bindings */ if (val != fc_host_tgtid_bind_type(shost)) { spin_lock_irqsave(shost->host_lock, flags); - list_for_each_entry_safe(rport, next_rport, - &fc_host_rport_bindings(shost), peers) + while (!list_empty(&fc_host_rport_bindings(shost))) { + get_list_head_entry(rport, + &fc_host_rport_bindings(shost), peers); + spin_unlock_irqrestore(shost->host_lock, flags); fc_rport_terminate(rport); + spin_lock_irqsave(shost->host_lock, flags); + } spin_unlock_irqrestore(shost->host_lock, flags); } diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index ff724bbe6611..1d145d2f9a38 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -628,17 +628,16 @@ sas_rphy_delete(struct sas_rphy *rphy) struct Scsi_Host *shost = dev_to_shost(parent->dev.parent); struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); - transport_destroy_device(&rphy->dev); + scsi_remove_target(dev); - scsi_remove_target(&rphy->dev); + transport_remove_device(dev); + device_del(dev); + transport_destroy_device(dev); spin_lock(&sas_host->lock); list_del(&rphy->list); spin_unlock(&sas_host->lock); - transport_remove_device(dev); - device_del(dev); - transport_destroy_device(dev); put_device(&parent->dev); } EXPORT_SYMBOL(sas_rphy_delete); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 4d09a6e4dd2e..fd56b7ec88b6 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -2644,7 +2644,7 @@ static char * sg_page_malloc(int rqSz, int lowDma, int *retSzp) { char *resp = NULL; - int page_mask; + gfp_t page_mask; int order, a_size; int resSz = rqSz; @@ -2849,8 +2849,7 @@ sg_proc_init(void) struct proc_dir_entry *pdep; struct sg_proc_leaf * leaf; - sg_proc_sgp = create_proc_entry(sg_proc_sg_dirname, - S_IFDIR | S_IRUGO | S_IXUGO, NULL); + sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL); if (!sg_proc_sgp) return 1; for (k = 0; k < num_leaves; ++k) { diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index d001c046551b..927d700f0073 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -3577,7 +3577,8 @@ static long st_compat_ioctl(struct file *file, unsigned int cmd, unsigned long a static struct st_buffer * new_tape_buffer(int from_initialization, int need_dma, int max_sg) { - int i, priority, got = 0, segs = 0; + int i, got = 0, segs = 0; + gfp_t priority; struct st_buffer *tb; if (from_initialization) @@ -3610,7 +3611,8 @@ static struct st_buffer * /* Try to allocate enough space in the tape buffer */ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma) { - int segs, nbr, max_segs, b_size, priority, order, got; + int segs, nbr, max_segs, b_size, order, got; + gfp_t priority; if (new_size <= STbuffer->buffer_size) return 1; diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index 0e21f583690e..5c3c03932d6d 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c @@ -152,6 +152,7 @@ static int __devinit pci_hp_diva_init(struct pci_dev *dev) rc = 4; break; case PCI_DEVICE_ID_HP_DIVA_POWERBAR: + case PCI_DEVICE_ID_HP_DIVA_HURRICANE: rc = 1; break; } @@ -226,8 +227,10 @@ static int __devinit pci_plx9050_init(struct pci_dev *dev) } irq_config = 0x41; - if (dev->vendor == PCI_VENDOR_ID_PANACOM) + if (dev->vendor == PCI_VENDOR_ID_PANACOM || + dev->subsystem_vendor == PCI_SUBVENDOR_ID_EXSYS) { irq_config = 0x43; + } if ((dev->vendor == PCI_VENDOR_ID_PLX) && (dev->device == PCI_DEVICE_ID_PLX_ROMULUS)) { /* @@ -661,6 +664,15 @@ static struct pci_serial_quirk pci_serial_quirks[] = { /* * PLX */ + { + .vendor = PCI_VENDOR_ID_PLX, + .device = PCI_DEVICE_ID_PLX_9050, + .subvendor = PCI_SUBVENDOR_ID_EXSYS, + .subdevice = PCI_SUBDEVICE_ID_EXSYS_4055, + .init = pci_plx9050_init, + .setup = pci_default_setup, + .exit = __devexit_p(pci_plx9050_exit), + }, { .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9050, @@ -927,6 +939,7 @@ enum pci_board_num_t { pbn_panacom, pbn_panacom2, pbn_panacom4, + pbn_exsys_4055, pbn_plx_romulus, pbn_oxsemi, pbn_intel_i960, @@ -1292,6 +1305,13 @@ static struct pciserial_board pci_boards[] __devinitdata = { .reg_shift = 7, }, + [pbn_exsys_4055] = { + .flags = FL_BASE2, + .num_ports = 4, + .base_baud = 115200, + .uart_offset = 8, + }, + /* I think this entry is broken - the first_offset looks wrong --rmk */ [pbn_plx_romulus] = { .flags = FL_BASE2, @@ -1853,6 +1873,10 @@ static struct pci_device_id serial_pci_tbl[] = { PCI_SUBVENDOR_ID_CHASE_PCIRAS, PCI_SUBDEVICE_ID_CHASE_PCIRAS8, 0, 0, pbn_b2_8_460800 }, + { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, + PCI_SUBVENDOR_ID_EXSYS, + PCI_SUBDEVICE_ID_EXSYS_4055, 0, 0, + pbn_exsys_4055 }, /* * Megawolf Romulus PCI Serial Card, from Mike Hudson * (Exoray@isys.ca) diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c index 6b321e82cafb..5d8660a42b77 100644 --- a/drivers/serial/8250_pnp.c +++ b/drivers/serial/8250_pnp.c @@ -272,8 +272,12 @@ static const struct pnp_device_id pnp_dev_table[] = { { "SUP1421", 0 }, /* SupraExpress 33.6 Data/Fax PnP modem */ { "SUP1590", 0 }, + /* SupraExpress 336i Sp ASVD */ + { "SUP1620", 0 }, /* SupraExpress 33.6 Data/Fax PnP modem */ { "SUP1760", 0 }, + /* SupraExpress 56i Sp Intl */ + { "SUP2171", 0 }, /* Phoebe Micro */ /* Phoebe Micro 33.6 Data Fax 1433VQH Plug & Play */ { "TEX0011", 0 }, diff --git a/drivers/serial/clps711x.c b/drivers/serial/clps711x.c index 78c1f36ad9b7..87ef368384fb 100644 --- a/drivers/serial/clps711x.c +++ b/drivers/serial/clps711x.c @@ -98,7 +98,7 @@ static irqreturn_t clps711xuart_int_rx(int irq, void *dev_id, struct pt_regs *re { struct uart_port *port = dev_id; struct tty_struct *tty = port->info->tty; - unsigned int status, ch, flg, ignored = 0; + unsigned int status, ch, flg; status = clps_readl(SYSFLG(port)); while (!(status & SYSFLG_URXFE)) { diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c index 4c985e6b3784..bdb4e454b8b0 100644 --- a/drivers/serial/imx.c +++ b/drivers/serial/imx.c @@ -73,7 +73,7 @@ struct imx_port { struct uart_port port; struct timer_list timer; unsigned int old_status; - int txirq,rxirq; + int txirq,rxirq,rtsirq; }; /* @@ -181,6 +181,22 @@ static void imx_start_tx(struct uart_port *port) imx_transmit_buffer(sport); } +static irqreturn_t imx_rtsint(int irq, void *dev_id, struct pt_regs *regs) +{ + struct imx_port *sport = (struct imx_port *)dev_id; + unsigned int val = USR1((u32)sport->port.membase)&USR1_RTSS; + unsigned long flags; + + spin_lock_irqsave(&sport->port.lock, flags); + + USR1((u32)sport->port.membase) = USR1_RTSD; + uart_handle_cts_change(&sport->port, !!val); + wake_up_interruptible(&sport->port.info->delta_msr_wait); + + spin_unlock_irqrestore(&sport->port.lock, flags); + return IRQ_HANDLED; +} + static irqreturn_t imx_txint(int irq, void *dev_id, struct pt_regs *regs) { struct imx_port *sport = (struct imx_port *)dev_id; @@ -383,18 +399,24 @@ static int imx_startup(struct uart_port *port) */ retval = request_irq(sport->rxirq, imx_rxint, 0, DRIVER_NAME, sport); - if (retval) goto error_out2; + if (retval) goto error_out1; retval = request_irq(sport->txirq, imx_txint, 0, - "imx-uart", sport); - if (retval) goto error_out1; + DRIVER_NAME, sport); + if (retval) goto error_out2; + + retval = request_irq(sport->rtsirq, imx_rtsint, 0, + DRIVER_NAME, sport); + if (retval) goto error_out3; + set_irq_type(sport->rtsirq, IRQT_BOTHEDGE); /* * Finally, clear and enable interrupts */ + USR1((u32)sport->port.membase) = USR1_RTSD; UCR1((u32)sport->port.membase) |= - (UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_UARTEN); + (UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN); UCR2((u32)sport->port.membase) |= (UCR2_RXEN | UCR2_TXEN); /* @@ -406,10 +428,11 @@ static int imx_startup(struct uart_port *port) return 0; -error_out1: - free_irq(sport->rxirq, sport); -error_out2: +error_out3: free_irq(sport->txirq, sport); +error_out2: + free_irq(sport->rxirq, sport); +error_out1: return retval; } @@ -425,6 +448,7 @@ static void imx_shutdown(struct uart_port *port) /* * Free the interrupts */ + free_irq(sport->rtsirq, sport); free_irq(sport->txirq, sport); free_irq(sport->rxirq, sport); @@ -433,7 +457,7 @@ static void imx_shutdown(struct uart_port *port) */ UCR1((u32)sport->port.membase) &= - ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_UARTEN); + ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN); } static void @@ -523,7 +547,7 @@ imx_set_termios(struct uart_port *port, struct termios *termios, * disable interrupts and drain transmitter */ old_ucr1 = UCR1((u32)sport->port.membase); - UCR1((u32)sport->port.membase) &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN); + UCR1((u32)sport->port.membase) &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN); while ( !(USR2((u32)sport->port.membase) & USR2_TXDC)) barrier(); @@ -644,6 +668,7 @@ static struct imx_port imx_ports[] = { { .txirq = UART1_MINT_TX, .rxirq = UART1_MINT_RX, + .rtsirq = UART1_MINT_RTS, .port = { .type = PORT_IMX, .iotype = SERIAL_IO_MEM, @@ -659,6 +684,7 @@ static struct imx_port imx_ports[] = { }, { .txirq = UART2_MINT_TX, .rxirq = UART2_MINT_RX, + .rtsirq = UART2_MINT_RTS, .port = { .type = PORT_IMX, .iotype = SERIAL_IO_MEM, @@ -738,7 +764,7 @@ imx_console_write(struct console *co, const char *s, unsigned int count) UCR1((u32)sport->port.membase) = (old_ucr1 | UCR1_UARTCLKEN | UCR1_UARTEN) - & ~(UCR1_TXMPTYEN | UCR1_RRDYEN); + & ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN); UCR2((u32)sport->port.membase) = old_ucr2 | UCR2_TXEN; /* @@ -860,7 +886,7 @@ imx_console_setup(struct console *co, char *options) return uart_set_options(&sport->port, co, baud, parity, bits, flow); } -extern struct uart_driver imx_reg; +static struct uart_driver imx_reg; static struct console imx_console = { .name = "ttySMX", .write = imx_console_write, diff --git a/drivers/serial/ioc4_serial.c b/drivers/serial/ioc4_serial.c index 0c5c96a582b3..f88fdd480685 100644 --- a/drivers/serial/ioc4_serial.c +++ b/drivers/serial/ioc4_serial.c @@ -973,18 +973,6 @@ static irqreturn_t ioc4_intr(int irq, void *arg, struct pt_regs *regs) this_ir &= ~this_mir; } } - if (this_ir) { - printk(KERN_ERR - "unknown IOC4 %s interrupt 0x%x, sio_ir = 0x%x," - " sio_ies = 0x%x, other_ir = 0x%x :" - "other_ies = 0x%x\n", - (intr_type == IOC4_SIO_INTR_TYPE) ? "sio" : - "other", this_ir, - readl(&soft->is_ioc4_misc_addr->sio_ir.raw), - readl(&soft->is_ioc4_misc_addr->sio_ies.raw), - readl(&soft->is_ioc4_misc_addr->other_ir.raw), - readl(&soft->is_ioc4_misc_addr->other_ies.raw)); - } } #ifdef DEBUG_INTERRUPTS { diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c index 672b359b07ce..90c2a86c421b 100644 --- a/drivers/serial/pxa.c +++ b/drivers/serial/pxa.c @@ -499,7 +499,7 @@ serial_pxa_set_termios(struct uart_port *port, struct termios *termios, /* * Update the per-port timeout. */ - uart_update_timeout(port, termios->c_cflag, quot); + uart_update_timeout(port, termios->c_cflag, baud); up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; if (termios->c_iflag & INPCK) diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c index c361c6fb0809..52692aa345ec 100644 --- a/drivers/serial/s3c2410.c +++ b/drivers/serial/s3c2410.c @@ -82,8 +82,6 @@ #include #include -#include - /* structures */ struct s3c24xx_uart_info { @@ -753,8 +751,8 @@ static void s3c24xx_serial_set_termios(struct uart_port *port, { struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port); struct s3c24xx_uart_port *ourport = to_ourport(port); - struct s3c24xx_uart_clksrc *clksrc; - struct clk *clk; + struct s3c24xx_uart_clksrc *clksrc = NULL; + struct clk *clk = NULL; unsigned long flags; unsigned int baud, quot; unsigned int ulcon; @@ -1094,8 +1092,8 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, static int probe_index = 0; -int s3c24xx_serial_probe(struct device *_dev, - struct s3c24xx_uart_info *info) +static int s3c24xx_serial_probe(struct device *_dev, + struct s3c24xx_uart_info *info) { struct s3c24xx_uart_port *ourport; struct platform_device *dev = to_platform_device(_dev); @@ -1122,7 +1120,7 @@ int s3c24xx_serial_probe(struct device *_dev, return ret; } -int s3c24xx_serial_remove(struct device *_dev) +static int s3c24xx_serial_remove(struct device *_dev) { struct uart_port *port = s3c24xx_dev_to_port(_dev); @@ -1136,7 +1134,8 @@ int s3c24xx_serial_remove(struct device *_dev) #ifdef CONFIG_PM -int s3c24xx_serial_suspend(struct device *dev, pm_message_t state, u32 level) +static int s3c24xx_serial_suspend(struct device *dev, pm_message_t state, + u32 level) { struct uart_port *port = s3c24xx_dev_to_port(dev); @@ -1146,7 +1145,7 @@ int s3c24xx_serial_suspend(struct device *dev, pm_message_t state, u32 level) return 0; } -int s3c24xx_serial_resume(struct device *dev, u32 level) +static int s3c24xx_serial_resume(struct device *dev, u32 level) { struct uart_port *port = s3c24xx_dev_to_port(dev); struct s3c24xx_uart_port *ourport = to_ourport(port); @@ -1167,8 +1166,8 @@ int s3c24xx_serial_resume(struct device *dev, u32 level) #define s3c24xx_serial_resume NULL #endif -int s3c24xx_serial_init(struct device_driver *drv, - struct s3c24xx_uart_info *info) +static int s3c24xx_serial_init(struct device_driver *drv, + struct s3c24xx_uart_info *info) { dbg("s3c24xx_serial_init(%p,%p)\n", drv, info); return driver_register(drv); @@ -1237,6 +1236,7 @@ static int s3c2400_serial_probe(struct device *dev) static struct device_driver s3c2400_serial_drv = { .name = "s3c2400-uart", + .owner = THIS_MODULE, .bus = &platform_bus_type, .probe = s3c2400_serial_probe, .remove = s3c24xx_serial_remove, @@ -1340,6 +1340,7 @@ static int s3c2410_serial_probe(struct device *dev) static struct device_driver s3c2410_serial_drv = { .name = "s3c2410-uart", + .owner = THIS_MODULE, .bus = &platform_bus_type, .probe = s3c2410_serial_probe, .remove = s3c24xx_serial_remove, @@ -1501,6 +1502,7 @@ static int s3c2440_serial_probe(struct device *dev) static struct device_driver s3c2440_serial_drv = { .name = "s3c2440-uart", + .owner = THIS_MODULE, .bus = &platform_bus_type, .probe = s3c2440_serial_probe, .remove = s3c24xx_serial_remove, diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index 1ae0b381c162..2c7d3ef76e8e 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c @@ -859,6 +859,7 @@ static struct pcmcia_device_id serial_ids[] = { PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "DP83903.cis"), PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "3CXEM556.cis"), PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "3CXEM556.cis"), + PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0x0710, "SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */ PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "MT5634ZLX.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "COMpad4.cis"), PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"), diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 512266307866..430754ebac8a 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c @@ -967,7 +967,7 @@ static int sci_startup(struct uart_port *port) #endif sci_request_irq(s); - sci_start_tx(port, 1); + sci_start_tx(port); sci_start_rx(port, 1); return 0; diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c index e971156daa60..ba9381fd3f2d 100644 --- a/drivers/serial/sunsab.c +++ b/drivers/serial/sunsab.c @@ -274,7 +274,6 @@ static void transmit_chars(struct uart_sunsab_port *up, if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { up->interrupt_mask1 |= SAB82532_IMR1_XPR; writeb(up->interrupt_mask1, &up->regs->w.imr1); - uart_write_wakeup(&up->port); return; } diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c index 5959e6755a81..656c0e8d160e 100644 --- a/drivers/serial/sunsu.c +++ b/drivers/serial/sunsu.c @@ -518,11 +518,7 @@ static void sunsu_change_mouse_baud(struct uart_sunsu_port *up) quot = up->port.uartclk / (16 * new_baud); - spin_unlock(&up->port.lock); - sunsu_change_speed(&up->port, up->cflag, 0, quot); - - spin_lock(&up->port.lock); } static void receive_kbd_ms_chars(struct uart_sunsu_port *up, struct pt_regs *regs, int is_break) diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c index d75445738c88..7653d6cf05af 100644 --- a/drivers/serial/sunzilog.c +++ b/drivers/serial/sunzilog.c @@ -517,10 +517,9 @@ static void sunzilog_transmit_chars(struct uart_sunzilog_port *up, if (up->port.info == NULL) goto ack_tx_int; xmit = &up->port.info->xmit; - if (uart_circ_empty(xmit)) { - uart_write_wakeup(&up->port); + if (uart_circ_empty(xmit)) goto ack_tx_int; - } + if (uart_tx_stopped(&up->port)) goto ack_tx_int; diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c index fc15b4acc8af..57e800ac3cee 100644 --- a/drivers/usb/core/buffer.c +++ b/drivers/usb/core/buffer.c @@ -106,7 +106,7 @@ void hcd_buffer_destroy (struct usb_hcd *hcd) void *hcd_buffer_alloc ( struct usb_bus *bus, size_t size, - unsigned mem_flags, + gfp_t mem_flags, dma_addr_t *dma ) { diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index b4265aa7d45e..487ff672b104 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -30,6 +30,8 @@ * Revision history * 22.12.1999 0.1 Initial release (split from proc_usb.c) * 04.01.2000 0.2 Turned into its own filesystem + * 30.09.2005 0.3 Fix user-triggerable oops in async URB delivery + * (CAN-2005-3055) */ /*****************************************************************************/ @@ -58,7 +60,8 @@ static struct class *usb_device_class; struct async { struct list_head asynclist; struct dev_state *ps; - struct task_struct *task; + pid_t pid; + uid_t uid, euid; unsigned int signr; unsigned int ifnum; void __user *userbuffer; @@ -290,7 +293,8 @@ static void async_completed(struct urb *urb, struct pt_regs *regs) sinfo.si_errno = as->urb->status; sinfo.si_code = SI_ASYNCIO; sinfo.si_addr = as->userurb; - send_sig_info(as->signr, &sinfo, as->task); + kill_proc_info_as_uid(as->signr, &sinfo, as->pid, as->uid, + as->euid); } wake_up(&ps->wait); } @@ -526,7 +530,9 @@ static int usbdev_open(struct inode *inode, struct file *file) INIT_LIST_HEAD(&ps->async_completed); init_waitqueue_head(&ps->wait); ps->discsignr = 0; - ps->disctask = current; + ps->disc_pid = current->pid; + ps->disc_uid = current->uid; + ps->disc_euid = current->euid; ps->disccontext = NULL; ps->ifclaimed = 0; wmb(); @@ -988,7 +994,9 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, as->userbuffer = NULL; as->signr = uurb->signr; as->ifnum = ifnum; - as->task = current; + as->pid = current->pid; + as->uid = current->uid; + as->euid = current->euid; if (!(uurb->endpoint & USB_DIR_IN)) { if (copy_from_user(as->urb->transfer_buffer, uurb->buffer, as->urb->transfer_buffer_length)) { free_async(as); diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index cbb451d227d2..6385d1a99b60 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -242,7 +242,6 @@ int usb_hcd_pci_suspend (struct pci_dev *dev, pm_message_t message) case HC_STATE_SUSPENDED: /* no DMA or IRQs except when HC is active */ if (dev->current_state == PCI_D0) { - free_irq (hcd->irq, hcd); pci_save_state (dev); pci_disable_device (dev); } @@ -374,14 +373,6 @@ int usb_hcd_pci_resume (struct pci_dev *dev) hcd->state = HC_STATE_RESUMING; hcd->saw_irq = 0; - retval = request_irq (dev->irq, usb_hcd_irq, SA_SHIRQ, - hcd->irq_descr, hcd); - if (retval < 0) { - dev_err (hcd->self.controller, - "can't restore IRQ after resume!\n"); - usb_hc_died (hcd); - return retval; - } retval = hcd->driver->resume (hcd); if (!HC_IS_RUNNING (hcd->state)) { diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 1017a97a418b..ff19d64041b5 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1112,7 +1112,7 @@ static void urb_unlink (struct urb *urb) * expects usb_submit_urb() to have sanity checked and conditioned all * inputs in the urb */ -static int hcd_submit_urb (struct urb *urb, unsigned mem_flags) +static int hcd_submit_urb (struct urb *urb, gfp_t mem_flags) { int status; struct usb_hcd *hcd = urb->dev->bus->hcpriv; diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h index ac451fa7e4d2..1f1ed6211af8 100644 --- a/drivers/usb/core/hcd.h +++ b/drivers/usb/core/hcd.h @@ -142,12 +142,12 @@ struct hcd_timeout { /* timeouts we allocate */ struct usb_operations { int (*get_frame_number) (struct usb_device *usb_dev); - int (*submit_urb) (struct urb *urb, unsigned mem_flags); + int (*submit_urb) (struct urb *urb, gfp_t mem_flags); int (*unlink_urb) (struct urb *urb, int status); /* allocate dma-consistent buffer for URB_DMA_NOMAPPING */ void *(*buffer_alloc)(struct usb_bus *bus, size_t size, - unsigned mem_flags, + gfp_t mem_flags, dma_addr_t *dma); void (*buffer_free)(struct usb_bus *bus, size_t size, void *addr, dma_addr_t dma); @@ -200,7 +200,7 @@ struct hc_driver { int (*urb_enqueue) (struct usb_hcd *hcd, struct usb_host_endpoint *ep, struct urb *urb, - unsigned mem_flags); + gfp_t mem_flags); int (*urb_dequeue) (struct usb_hcd *hcd, struct urb *urb); /* hw synch, freeing endpoint resources that urb_dequeue can't */ @@ -247,7 +247,7 @@ int hcd_buffer_create (struct usb_hcd *hcd); void hcd_buffer_destroy (struct usb_hcd *hcd); void *hcd_buffer_alloc (struct usb_bus *bus, size_t size, - unsigned mem_flags, dma_addr_t *dma); + gfp_t mem_flags, dma_addr_t *dma); void hcd_buffer_free (struct usb_bus *bus, size_t size, void *addr, dma_addr_t dma); diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c index 640f41e47029..d07bba01995b 100644 --- a/drivers/usb/core/inode.c +++ b/drivers/usb/core/inode.c @@ -713,7 +713,7 @@ void usbfs_remove_device(struct usb_device *dev) sinfo.si_errno = EPIPE; sinfo.si_code = SI_ASYNCIO; sinfo.si_addr = ds->disccontext; - send_sig_info(ds->discsignr, &sinfo, ds->disctask); + kill_proc_info_as_uid(ds->discsignr, &sinfo, ds->disc_pid, ds->disc_uid, ds->disc_euid); } } usbfs_update_special(); diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index c47c8052b486..f9a81e84dbdf 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -321,7 +321,7 @@ int usb_sg_init ( struct scatterlist *sg, int nents, size_t length, - unsigned mem_flags + gfp_t mem_flags ) { int i; @@ -987,7 +987,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) /* remove this interface if it has been registered */ interface = dev->actconfig->interface[i]; - if (!klist_node_attached(&interface->dev.knode_bus)) + if (!device_is_registered(&interface->dev)) continue; dev_dbg (&dev->dev, "unregistering interface %s\n", interface->dev.bus_id); diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index c846fefb7386..b32898e0a27d 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -60,7 +60,7 @@ void usb_init_urb(struct urb *urb) * * The driver must call usb_free_urb() when it is finished with the urb. */ -struct urb *usb_alloc_urb(int iso_packets, unsigned mem_flags) +struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags) { struct urb *urb; @@ -224,7 +224,7 @@ struct urb * usb_get_urb(struct urb *urb) * GFP_NOIO, unless b) or c) apply * */ -int usb_submit_urb(struct urb *urb, unsigned mem_flags) +int usb_submit_urb(struct urb *urb, gfp_t mem_flags) { int pipe, temp, max; struct usb_device *dev; diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 087af73a59dd..4c57f3f649ed 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -303,7 +303,7 @@ int usb_driver_claim_interface(struct usb_driver *driver, /* if interface was already added, bind now; else let * the future device_add() bind it, bypassing probe() */ - if (klist_node_attached(&dev->knode_bus)) + if (device_is_registered(dev)) device_bind_driver(dev); return 0; @@ -336,8 +336,8 @@ void usb_driver_release_interface(struct usb_driver *driver, if (iface->condition != USB_INTERFACE_BOUND) return; - /* release only after device_add() */ - if (klist_node_attached(&dev->knode_bus)) { + /* don't release if the interface hasn't been added yet */ + if (device_is_registered(dev)) { iface->condition = USB_INTERFACE_UNBINDING; device_release_driver(dev); } @@ -1147,7 +1147,7 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size, void *usb_buffer_alloc ( struct usb_device *dev, size_t size, - unsigned mem_flags, + gfp_t mem_flags, dma_addr_t *dma ) { diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index 83d48c8133af..e6504f3370ad 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -52,7 +52,8 @@ struct dev_state { struct list_head async_completed; wait_queue_head_t wait; /* wake up if a request completed */ unsigned int discsignr; - struct task_struct *disctask; + pid_t disc_pid; + uid_t disc_uid, disc_euid; void __user *disccontext; unsigned long ifclaimed; }; diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c index 583db7c38cf1..8d9d8ee89554 100644 --- a/drivers/usb/gadget/dummy_hcd.c +++ b/drivers/usb/gadget/dummy_hcd.c @@ -470,7 +470,7 @@ static int dummy_disable (struct usb_ep *_ep) } static struct usb_request * -dummy_alloc_request (struct usb_ep *_ep, unsigned mem_flags) +dummy_alloc_request (struct usb_ep *_ep, gfp_t mem_flags) { struct dummy_ep *ep; struct dummy_request *req; @@ -507,7 +507,7 @@ dummy_alloc_buffer ( struct usb_ep *_ep, unsigned bytes, dma_addr_t *dma, - unsigned mem_flags + gfp_t mem_flags ) { char *retval; struct dummy_ep *ep; @@ -541,7 +541,7 @@ fifo_complete (struct usb_ep *ep, struct usb_request *req) static int dummy_queue (struct usb_ep *_ep, struct usb_request *_req, - unsigned mem_flags) + gfp_t mem_flags) { struct dummy_ep *ep; struct dummy_request *req; @@ -999,7 +999,7 @@ static int dummy_urb_enqueue ( struct usb_hcd *hcd, struct usb_host_endpoint *ep, struct urb *urb, - unsigned mem_flags + gfp_t mem_flags ) { struct dummy *dum; struct urbp *urbp; diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c index 49459e33e952..f1024e804d5c 100644 --- a/drivers/usb/gadget/ether.c +++ b/drivers/usb/gadget/ether.c @@ -945,11 +945,11 @@ config_buf (enum usb_device_speed speed, /*-------------------------------------------------------------------------*/ -static void eth_start (struct eth_dev *dev, unsigned gfp_flags); -static int alloc_requests (struct eth_dev *dev, unsigned n, unsigned gfp_flags); +static void eth_start (struct eth_dev *dev, gfp_t gfp_flags); +static int alloc_requests (struct eth_dev *dev, unsigned n, gfp_t gfp_flags); static int -set_ether_config (struct eth_dev *dev, unsigned gfp_flags) +set_ether_config (struct eth_dev *dev, gfp_t gfp_flags) { int result = 0; struct usb_gadget *gadget = dev->gadget; @@ -1081,7 +1081,7 @@ static void eth_reset_config (struct eth_dev *dev) * that returns config descriptors, and altsetting code. */ static int -eth_set_config (struct eth_dev *dev, unsigned number, unsigned gfp_flags) +eth_set_config (struct eth_dev *dev, unsigned number, gfp_t gfp_flags) { int result = 0; struct usb_gadget *gadget = dev->gadget; @@ -1598,7 +1598,7 @@ static void defer_kevent (struct eth_dev *dev, int flag) static void rx_complete (struct usb_ep *ep, struct usb_request *req); static int -rx_submit (struct eth_dev *dev, struct usb_request *req, unsigned gfp_flags) +rx_submit (struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) { struct sk_buff *skb; int retval = -ENOMEM; @@ -1724,7 +1724,7 @@ clean: } static int prealloc (struct list_head *list, struct usb_ep *ep, - unsigned n, unsigned gfp_flags) + unsigned n, gfp_t gfp_flags) { unsigned i; struct usb_request *req; @@ -1763,7 +1763,7 @@ extra: return 0; } -static int alloc_requests (struct eth_dev *dev, unsigned n, unsigned gfp_flags) +static int alloc_requests (struct eth_dev *dev, unsigned n, gfp_t gfp_flags) { int status; @@ -1779,7 +1779,7 @@ fail: return status; } -static void rx_fill (struct eth_dev *dev, unsigned gfp_flags) +static void rx_fill (struct eth_dev *dev, gfp_t gfp_flags) { struct usb_request *req; unsigned long flags; @@ -1962,7 +1962,7 @@ drop: * normally just one notification will be queued. */ -static struct usb_request *eth_req_alloc (struct usb_ep *, unsigned, unsigned); +static struct usb_request *eth_req_alloc (struct usb_ep *, unsigned, gfp_t); static void eth_req_free (struct usb_ep *ep, struct usb_request *req); static void @@ -2024,7 +2024,7 @@ static int rndis_control_ack (struct net_device *net) #endif /* RNDIS */ -static void eth_start (struct eth_dev *dev, unsigned gfp_flags) +static void eth_start (struct eth_dev *dev, gfp_t gfp_flags) { DEBUG (dev, "%s\n", __FUNCTION__); @@ -2092,7 +2092,7 @@ static int eth_stop (struct net_device *net) /*-------------------------------------------------------------------------*/ static struct usb_request * -eth_req_alloc (struct usb_ep *ep, unsigned size, unsigned gfp_flags) +eth_req_alloc (struct usb_ep *ep, unsigned size, gfp_t gfp_flags) { struct usb_request *req; diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c index eaab26f4ed37..b0f3cd63e3b9 100644 --- a/drivers/usb/gadget/goku_udc.c +++ b/drivers/usb/gadget/goku_udc.c @@ -269,7 +269,7 @@ static int goku_ep_disable(struct usb_ep *_ep) /*-------------------------------------------------------------------------*/ static struct usb_request * -goku_alloc_request(struct usb_ep *_ep, unsigned gfp_flags) +goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) { struct goku_request *req; @@ -327,7 +327,7 @@ goku_free_request(struct usb_ep *_ep, struct usb_request *_req) */ static void * goku_alloc_buffer(struct usb_ep *_ep, unsigned bytes, - dma_addr_t *dma, unsigned gfp_flags) + dma_addr_t *dma, gfp_t gfp_flags) { void *retval; struct goku_ep *ep; @@ -789,7 +789,7 @@ finished: /*-------------------------------------------------------------------------*/ static int -goku_queue(struct usb_ep *_ep, struct usb_request *_req, unsigned gfp_flags) +goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct goku_request *req; struct goku_ep *ep; diff --git a/drivers/usb/gadget/lh7a40x_udc.c b/drivers/usb/gadget/lh7a40x_udc.c index 4842577789c9..012d1e5f1524 100644 --- a/drivers/usb/gadget/lh7a40x_udc.c +++ b/drivers/usb/gadget/lh7a40x_udc.c @@ -71,13 +71,13 @@ static char *state_names[] = { static int lh7a40x_ep_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *); static int lh7a40x_ep_disable(struct usb_ep *ep); -static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep, int); +static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep, gfp_t); static void lh7a40x_free_request(struct usb_ep *ep, struct usb_request *); static void *lh7a40x_alloc_buffer(struct usb_ep *ep, unsigned, dma_addr_t *, - int); + gfp_t); static void lh7a40x_free_buffer(struct usb_ep *ep, void *, dma_addr_t, unsigned); -static int lh7a40x_queue(struct usb_ep *ep, struct usb_request *, int); +static int lh7a40x_queue(struct usb_ep *ep, struct usb_request *, gfp_t); static int lh7a40x_dequeue(struct usb_ep *ep, struct usb_request *); static int lh7a40x_set_halt(struct usb_ep *ep, int); static int lh7a40x_fifo_status(struct usb_ep *ep); @@ -1106,7 +1106,7 @@ static int lh7a40x_ep_disable(struct usb_ep *_ep) } static struct usb_request *lh7a40x_alloc_request(struct usb_ep *ep, - unsigned gfp_flags) + gfp_t gfp_flags) { struct lh7a40x_request *req; @@ -1134,7 +1134,7 @@ static void lh7a40x_free_request(struct usb_ep *ep, struct usb_request *_req) } static void *lh7a40x_alloc_buffer(struct usb_ep *ep, unsigned bytes, - dma_addr_t * dma, unsigned gfp_flags) + dma_addr_t * dma, gfp_t gfp_flags) { char *retval; @@ -1158,7 +1158,7 @@ static void lh7a40x_free_buffer(struct usb_ep *ep, void *buf, dma_addr_t dma, * NOTE: Sets INDEX register */ static int lh7a40x_queue(struct usb_ep *_ep, struct usb_request *_req, - unsigned gfp_flags) + gfp_t gfp_flags) { struct lh7a40x_request *req; struct lh7a40x_ep *ep; diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c index 477fab2e74d1..c32e1f7476da 100644 --- a/drivers/usb/gadget/net2280.c +++ b/drivers/usb/gadget/net2280.c @@ -376,7 +376,7 @@ static int net2280_disable (struct usb_ep *_ep) /*-------------------------------------------------------------------------*/ static struct usb_request * -net2280_alloc_request (struct usb_ep *_ep, unsigned gfp_flags) +net2280_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags) { struct net2280_ep *ep; struct net2280_request *req; @@ -463,7 +463,7 @@ net2280_alloc_buffer ( struct usb_ep *_ep, unsigned bytes, dma_addr_t *dma, - unsigned gfp_flags + gfp_t gfp_flags ) { void *retval; @@ -897,7 +897,7 @@ done (struct net2280_ep *ep, struct net2280_request *req, int status) /*-------------------------------------------------------------------------*/ static int -net2280_queue (struct usb_ep *_ep, struct usb_request *_req, unsigned gfp_flags) +net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct net2280_request *req; struct net2280_ep *ep; diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c index ff5533e69560..287c5900fb13 100644 --- a/drivers/usb/gadget/omap_udc.c +++ b/drivers/usb/gadget/omap_udc.c @@ -269,7 +269,7 @@ static int omap_ep_disable(struct usb_ep *_ep) /*-------------------------------------------------------------------------*/ static struct usb_request * -omap_alloc_request(struct usb_ep *ep, unsigned gfp_flags) +omap_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) { struct omap_req *req; @@ -298,7 +298,7 @@ omap_alloc_buffer( struct usb_ep *_ep, unsigned bytes, dma_addr_t *dma, - unsigned gfp_flags + gfp_t gfp_flags ) { void *retval; @@ -937,7 +937,7 @@ static void dma_channel_release(struct omap_ep *ep) /*-------------------------------------------------------------------------*/ static int -omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, unsigned gfp_flags) +omap_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct omap_ep *ep = container_of(_ep, struct omap_ep, ep); struct omap_req *req = container_of(_req, struct omap_req, req); diff --git a/drivers/usb/gadget/pxa2xx_udc.c b/drivers/usb/gadget/pxa2xx_udc.c index 1507738337c4..6e545393cfff 100644 --- a/drivers/usb/gadget/pxa2xx_udc.c +++ b/drivers/usb/gadget/pxa2xx_udc.c @@ -332,7 +332,7 @@ static int pxa2xx_ep_disable (struct usb_ep *_ep) * pxa2xx_ep_alloc_request - allocate a request data structure */ static struct usb_request * -pxa2xx_ep_alloc_request (struct usb_ep *_ep, unsigned gfp_flags) +pxa2xx_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags) { struct pxa2xx_request *req; @@ -367,7 +367,7 @@ pxa2xx_ep_free_request (struct usb_ep *_ep, struct usb_request *_req) */ static void * pxa2xx_ep_alloc_buffer(struct usb_ep *_ep, unsigned bytes, - dma_addr_t *dma, unsigned gfp_flags) + dma_addr_t *dma, gfp_t gfp_flags) { char *retval; @@ -422,7 +422,7 @@ static inline void ep0_idle (struct pxa2xx_udc *dev) } static int -write_packet(volatile u32 *uddr, struct pxa2xx_request *req, unsigned max) +write_packet(volatile unsigned long *uddr, struct pxa2xx_request *req, unsigned max) { u8 *buf; unsigned length, count; @@ -874,7 +874,7 @@ done: /*-------------------------------------------------------------------------*/ static int -pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, unsigned gfp_flags) +pxa2xx_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct pxa2xx_request *req; struct pxa2xx_ep *ep; @@ -2602,7 +2602,7 @@ static int __exit pxa2xx_udc_remove(struct device *_dev) * VBUS IRQs should probably be ignored so that the PXA device just acts * "dead" to USB hosts until system resume. */ -static int pxa2xx_udc_suspend(struct device *dev, u32 state, u32 level) +static int pxa2xx_udc_suspend(struct device *dev, pm_message_t state, u32 level) { struct pxa2xx_udc *udc = dev_get_drvdata(dev); diff --git a/drivers/usb/gadget/pxa2xx_udc.h b/drivers/usb/gadget/pxa2xx_udc.h index d0bc396a85d5..a58f3e6e71f1 100644 --- a/drivers/usb/gadget/pxa2xx_udc.h +++ b/drivers/usb/gadget/pxa2xx_udc.h @@ -69,11 +69,11 @@ struct pxa2xx_ep { * UDDR = UDC Endpoint Data Register (the fifo) * DRCM = DMA Request Channel Map */ - volatile u32 *reg_udccs; - volatile u32 *reg_ubcr; - volatile u32 *reg_uddr; + volatile unsigned long *reg_udccs; + volatile unsigned long *reg_ubcr; + volatile unsigned long *reg_uddr; #ifdef USE_DMA - volatile u32 *reg_drcmr; + volatile unsigned long *reg_drcmr; #define drcmr(n) .reg_drcmr = & DRCMR ## n , #else #define drcmr(n) diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c index c925d9222f53..b35ac6d334f8 100644 --- a/drivers/usb/gadget/serial.c +++ b/drivers/usb/gadget/serial.c @@ -300,18 +300,18 @@ static int gs_build_config_buf(u8 *buf, enum usb_device_speed speed, u8 type, unsigned int index, int is_otg); static struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned int len, - unsigned kmalloc_flags); + gfp_t kmalloc_flags); static void gs_free_req(struct usb_ep *ep, struct usb_request *req); static struct gs_req_entry *gs_alloc_req_entry(struct usb_ep *ep, unsigned len, - unsigned kmalloc_flags); + gfp_t kmalloc_flags); static void gs_free_req_entry(struct usb_ep *ep, struct gs_req_entry *req); -static int gs_alloc_ports(struct gs_dev *dev, unsigned kmalloc_flags); +static int gs_alloc_ports(struct gs_dev *dev, gfp_t kmalloc_flags); static void gs_free_ports(struct gs_dev *dev); /* circular buffer */ -static struct gs_buf *gs_buf_alloc(unsigned int size, unsigned kmalloc_flags); +static struct gs_buf *gs_buf_alloc(unsigned int size, gfp_t kmalloc_flags); static void gs_buf_free(struct gs_buf *gb); static void gs_buf_clear(struct gs_buf *gb); static unsigned int gs_buf_data_avail(struct gs_buf *gb); @@ -2091,7 +2091,7 @@ static int gs_build_config_buf(u8 *buf, enum usb_device_speed speed, * usb_request or NULL if there is an error. */ static struct usb_request * -gs_alloc_req(struct usb_ep *ep, unsigned int len, unsigned kmalloc_flags) +gs_alloc_req(struct usb_ep *ep, unsigned int len, gfp_t kmalloc_flags) { struct usb_request *req; @@ -2132,7 +2132,7 @@ static void gs_free_req(struct usb_ep *ep, struct usb_request *req) * endpoint, buffer len, and kmalloc flags. */ static struct gs_req_entry * -gs_alloc_req_entry(struct usb_ep *ep, unsigned len, unsigned kmalloc_flags) +gs_alloc_req_entry(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) { struct gs_req_entry *req; @@ -2173,7 +2173,7 @@ static void gs_free_req_entry(struct usb_ep *ep, struct gs_req_entry *req) * * The device lock is normally held when calling this function. */ -static int gs_alloc_ports(struct gs_dev *dev, unsigned kmalloc_flags) +static int gs_alloc_ports(struct gs_dev *dev, gfp_t kmalloc_flags) { int i; struct gs_port *port; @@ -2255,7 +2255,7 @@ static void gs_free_ports(struct gs_dev *dev) * * Allocate a circular buffer and all associated memory. */ -static struct gs_buf *gs_buf_alloc(unsigned int size, unsigned kmalloc_flags) +static struct gs_buf *gs_buf_alloc(unsigned int size, gfp_t kmalloc_flags) { struct gs_buf *gb; diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c index 6890e773b2a2..ec9c424f1d97 100644 --- a/drivers/usb/gadget/zero.c +++ b/drivers/usb/gadget/zero.c @@ -612,7 +612,7 @@ static void source_sink_complete (struct usb_ep *ep, struct usb_request *req) } static struct usb_request * -source_sink_start_ep (struct usb_ep *ep, unsigned gfp_flags) +source_sink_start_ep (struct usb_ep *ep, gfp_t gfp_flags) { struct usb_request *req; int status; @@ -640,7 +640,7 @@ source_sink_start_ep (struct usb_ep *ep, unsigned gfp_flags) } static int -set_source_sink_config (struct zero_dev *dev, unsigned gfp_flags) +set_source_sink_config (struct zero_dev *dev, gfp_t gfp_flags) { int result = 0; struct usb_ep *ep; @@ -744,7 +744,7 @@ static void loopback_complete (struct usb_ep *ep, struct usb_request *req) } static int -set_loopback_config (struct zero_dev *dev, unsigned gfp_flags) +set_loopback_config (struct zero_dev *dev, gfp_t gfp_flags) { int result = 0; struct usb_ep *ep; @@ -845,7 +845,7 @@ static void zero_reset_config (struct zero_dev *dev) * by limiting configuration choices (like the pxa2xx). */ static int -zero_set_config (struct zero_dev *dev, unsigned number, unsigned gfp_flags) +zero_set_config (struct zero_dev *dev, unsigned number, gfp_t gfp_flags) { int result = 0; struct usb_gadget *gadget = dev->gadget; diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index b948ffd94f45..f5eb9e7b5b18 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -983,7 +983,7 @@ static int ehci_urb_enqueue ( struct usb_hcd *hcd, struct usb_host_endpoint *ep, struct urb *urb, - unsigned mem_flags + gfp_t mem_flags ) { struct ehci_hcd *ehci = hcd_to_ehci (hcd); struct list_head qtd_list; diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c index 5c38ad869485..91c2ab43cbcc 100644 --- a/drivers/usb/host/ehci-mem.c +++ b/drivers/usb/host/ehci-mem.c @@ -45,7 +45,7 @@ static inline void ehci_qtd_init (struct ehci_qtd *qtd, dma_addr_t dma) INIT_LIST_HEAD (&qtd->qtd_list); } -static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, int flags) +static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, gfp_t flags) { struct ehci_qtd *qtd; dma_addr_t dma; @@ -79,7 +79,7 @@ static void qh_destroy (struct kref *kref) dma_pool_free (ehci->qh_pool, qh, qh->qh_dma); } -static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, int flags) +static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags) { struct ehci_qh *qh; dma_addr_t dma; @@ -161,7 +161,7 @@ static void ehci_mem_cleanup (struct ehci_hcd *ehci) } /* remember to add cleanup code (above) if you add anything here */ -static int ehci_mem_init (struct ehci_hcd *ehci, int flags) +static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags) { int i; diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 940d38ca7d91..5bb872c3496d 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -477,7 +477,7 @@ qh_urb_transaction ( struct ehci_hcd *ehci, struct urb *urb, struct list_head *head, - int flags + gfp_t flags ) { struct ehci_qtd *qtd, *qtd_prev; dma_addr_t buf; @@ -629,7 +629,7 @@ static struct ehci_qh * qh_make ( struct ehci_hcd *ehci, struct urb *urb, - int flags + gfp_t flags ) { struct ehci_qh *qh = ehci_qh_alloc (ehci, flags); u32 info1 = 0, info2 = 0; @@ -906,7 +906,7 @@ submit_async ( struct usb_host_endpoint *ep, struct urb *urb, struct list_head *qtd_list, - unsigned mem_flags + gfp_t mem_flags ) { struct ehci_qtd *qtd; int epnum; diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index ccc7300baa6d..f0c8aa1ccd5d 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -589,7 +589,7 @@ static int intr_submit ( struct usb_host_endpoint *ep, struct urb *urb, struct list_head *qtd_list, - unsigned mem_flags + gfp_t mem_flags ) { unsigned epnum; unsigned long flags; @@ -634,7 +634,7 @@ done: /* ehci_iso_stream ops work with both ITD and SITD */ static struct ehci_iso_stream * -iso_stream_alloc (unsigned mem_flags) +iso_stream_alloc (gfp_t mem_flags) { struct ehci_iso_stream *stream; @@ -851,7 +851,7 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb) /* ehci_iso_sched ops can be ITD-only or SITD-only */ static struct ehci_iso_sched * -iso_sched_alloc (unsigned packets, unsigned mem_flags) +iso_sched_alloc (unsigned packets, gfp_t mem_flags) { struct ehci_iso_sched *iso_sched; int size = sizeof *iso_sched; @@ -924,7 +924,7 @@ itd_urb_transaction ( struct ehci_iso_stream *stream, struct ehci_hcd *ehci, struct urb *urb, - unsigned mem_flags + gfp_t mem_flags ) { struct ehci_itd *itd; @@ -1418,7 +1418,7 @@ itd_complete ( /*-------------------------------------------------------------------------*/ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, - unsigned mem_flags) + gfp_t mem_flags) { int status = -EINVAL; unsigned long flags; @@ -1529,7 +1529,7 @@ sitd_urb_transaction ( struct ehci_iso_stream *stream, struct ehci_hcd *ehci, struct urb *urb, - unsigned mem_flags + gfp_t mem_flags ) { struct ehci_sitd *sitd; @@ -1779,7 +1779,7 @@ sitd_complete ( static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb, - unsigned mem_flags) + gfp_t mem_flags) { int status = -EINVAL; unsigned long flags; diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c index 41bbae83fc71..2548d94fcd72 100644 --- a/drivers/usb/host/isp116x-hcd.c +++ b/drivers/usb/host/isp116x-hcd.c @@ -326,7 +326,8 @@ static void postproc_atl_queue(struct isp116x *isp116x) usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT, - PTD_GET_TOGGLE(ptd) ^ 1); + PTD_GET_TOGGLE(ptd)); + urb->actual_length += PTD_GET_COUNT(ptd); urb->status = cc_to_error[TD_DATAUNDERRUN]; spin_unlock(&urb->lock); continue; @@ -693,7 +694,7 @@ static int balance(struct isp116x *isp116x, u16 period, u16 load) static int isp116x_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep, struct urb *urb, - unsigned mem_flags) + gfp_t mem_flags) { struct isp116x *isp116x = hcd_to_isp116x(hcd); struct usb_device *udev = urb->dev; diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 67c1aa5eb1c1..f8da8c7af7c6 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -180,7 +180,7 @@ static int ohci_urb_enqueue ( struct usb_hcd *hcd, struct usb_host_endpoint *ep, struct urb *urb, - unsigned mem_flags + gfp_t mem_flags ) { struct ohci_hcd *ohci = hcd_to_ohci (hcd); struct ed *ed; diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c index 817620d73841..859aca7be753 100644 --- a/drivers/usb/host/ohci-lh7a404.c +++ b/drivers/usb/host/ohci-lh7a404.c @@ -17,8 +17,6 @@ */ #include -#include -#include extern int usb_disabled(void); diff --git a/drivers/usb/host/ohci-mem.c b/drivers/usb/host/ohci-mem.c index fd3c4d3714bd..9fb83dfb1eb4 100644 --- a/drivers/usb/host/ohci-mem.c +++ b/drivers/usb/host/ohci-mem.c @@ -84,7 +84,7 @@ dma_to_td (struct ohci_hcd *hc, dma_addr_t td_dma) /* TDs ... */ static struct td * -td_alloc (struct ohci_hcd *hc, unsigned mem_flags) +td_alloc (struct ohci_hcd *hc, gfp_t mem_flags) { dma_addr_t dma; struct td *td; @@ -118,7 +118,7 @@ td_free (struct ohci_hcd *hc, struct td *td) /* EDs ... */ static struct ed * -ed_alloc (struct ohci_hcd *hc, unsigned mem_flags) +ed_alloc (struct ohci_hcd *hc, gfp_t mem_flags) { dma_addr_t dma; struct ed *ed; diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c index 5cde76faab93..d8f3ba7ad52e 100644 --- a/drivers/usb/host/ohci-omap.c +++ b/drivers/usb/host/ohci-omap.c @@ -18,7 +18,6 @@ #include #include -#include #include #include #include diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c index 3d9bcf78a9a4..da7d5478f74d 100644 --- a/drivers/usb/host/ohci-s3c2410.c +++ b/drivers/usb/host/ohci-s3c2410.c @@ -20,7 +20,6 @@ */ #include -#include #include #include diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index d2a1fd40dfcb..cad858575cea 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c @@ -782,6 +782,9 @@ retry: /* usb 1.1 says max 90% of a frame is available for periodic transfers. * this driver doesn't promise that much since it's got to handle an * IRQ per packet; irq handling latencies also use up that time. + * + * NOTE: the periodic schedule is a sparse tree, with the load for + * each branch minimized. see fig 3.5 in the OHCI spec for example. */ #define MAX_PERIODIC_LOAD 500 /* out of 1000 usec */ @@ -815,7 +818,7 @@ static int sl811h_urb_enqueue( struct usb_hcd *hcd, struct usb_host_endpoint *hep, struct urb *urb, - unsigned mem_flags + gfp_t mem_flags ) { struct sl811 *sl811 = hcd_to_sl811(hcd); struct usb_device *udev = urb->dev; @@ -843,6 +846,7 @@ static int sl811h_urb_enqueue( if (!(sl811->port1 & (1 << USB_PORT_FEAT_ENABLE)) || !HC_IS_RUNNING(hcd->state)) { retval = -ENODEV; + kfree(ep); goto fail; } @@ -911,8 +915,16 @@ static int sl811h_urb_enqueue( case PIPE_ISOCHRONOUS: case PIPE_INTERRUPT: urb->interval = ep->period; - if (ep->branch < PERIODIC_SIZE) + if (ep->branch < PERIODIC_SIZE) { + /* NOTE: the phase is correct here, but the value + * needs offsetting by the transfer queue depth. + * All current drivers ignore start_frame, so this + * is unlikely to ever matter... + */ + urb->start_frame = (sl811->frame & (PERIODIC_SIZE - 1)) + + ep->branch; break; + } retval = balance(sl811, ep->period, ep->load); if (retval < 0) @@ -1122,7 +1134,7 @@ sl811h_hub_descriptor ( desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp); /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */ - desc->bitmap[0] = 1 << 1; + desc->bitmap[0] = 0 << 1; desc->bitmap[1] = ~0; } diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index ea0d168a8c67..4e0fbe2c1a9a 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c @@ -1164,7 +1164,7 @@ static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb) static int uhci_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *ep, - struct urb *urb, unsigned mem_flags) + struct urb *urb, gfp_t mem_flags) { int ret; struct uhci_hcd *uhci = hcd_to_uhci(hcd); diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c index a99865c689c5..41f92b924761 100644 --- a/drivers/usb/input/hid-core.c +++ b/drivers/usb/input/hid-core.c @@ -1702,10 +1702,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf) if ((endpoint->bmAttributes & 3) != 3) /* Not an interrupt endpoint */ continue; - /* handle potential highspeed HID correctly */ interval = endpoint->bInterval; - if (dev->speed == USB_SPEED_HIGH) - interval = 1 << (interval - 1); /* Change the polling interval of mice. */ if (hid->collection->usage == HID_GD_MOUSE && hid_mousepoll_interval > 0) diff --git a/drivers/usb/media/vicam.c b/drivers/usb/media/vicam.c index 4a5857c53f11..0bc0b1247a6b 100644 --- a/drivers/usb/media/vicam.c +++ b/drivers/usb/media/vicam.c @@ -1148,7 +1148,7 @@ vicam_write_proc_gain(struct file *file, const char *buffer, static void vicam_create_proc_root(void) { - vicam_proc_root = create_proc_entry("video/vicam", S_IFDIR, 0); + vicam_proc_root = proc_mkdir("video/vicam", NULL); if (vicam_proc_root) vicam_proc_root->owner = THIS_MODULE; @@ -1181,7 +1181,7 @@ vicam_create_proc_entry(struct vicam_camera *cam) sprintf(name, "video%d", cam->vdev.minor); - cam->proc_dir = create_proc_entry(name, S_IFDIR, vicam_proc_root); + cam->proc_dir = proc_mkdir(name, vicam_proc_root); if ( !cam->proc_dir ) return; // FIXME: We should probably return an error here diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index 03fb70ef2eb3..0592cb5e6c4d 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -137,7 +137,7 @@ static void async_complete(struct urb *urb, struct pt_regs *ptregs) static struct uss720_async_request *submit_async_request(struct parport_uss720_private *priv, __u8 request, __u8 requesttype, __u16 value, __u16 index, - unsigned int mem_flags) + gfp_t mem_flags) { struct usb_device *usbdev; struct uss720_async_request *rq; @@ -204,7 +204,7 @@ static unsigned int kill_all_async_requests_priv(struct parport_uss720_private * /* --------------------------------------------------------------------- */ -static int get_1284_register(struct parport *pp, unsigned char reg, unsigned char *val, unsigned int mem_flags) +static int get_1284_register(struct parport *pp, unsigned char reg, unsigned char *val, gfp_t mem_flags) { struct parport_uss720_private *priv; struct uss720_async_request *rq; @@ -238,7 +238,7 @@ static int get_1284_register(struct parport *pp, unsigned char reg, unsigned cha return -EIO; } -static int set_1284_register(struct parport *pp, unsigned char reg, unsigned char val, unsigned int mem_flags) +static int set_1284_register(struct parport *pp, unsigned char reg, unsigned char val, gfp_t mem_flags) { struct parport_uss720_private *priv; struct uss720_async_request *rq; diff --git a/drivers/usb/net/asix.c b/drivers/usb/net/asix.c index 861f00a43750..252a34fbb42c 100644 --- a/drivers/usb/net/asix.c +++ b/drivers/usb/net/asix.c @@ -753,7 +753,7 @@ static int ax88772_rx_fixup(struct usbnet *dev, struct sk_buff *skb) } static struct sk_buff *ax88772_tx_fixup(struct usbnet *dev, struct sk_buff *skb, - unsigned flags) + gfp_t flags) { int padlen; int headroom = skb_headroom(skb); diff --git a/drivers/usb/net/gl620a.c b/drivers/usb/net/gl620a.c index c8763ae33c73..c0f263b202a6 100644 --- a/drivers/usb/net/gl620a.c +++ b/drivers/usb/net/gl620a.c @@ -301,7 +301,7 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb) } static struct sk_buff * -genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, unsigned flags) +genelink_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { int padlen; int length = skb->len; diff --git a/drivers/usb/net/kaweth.c b/drivers/usb/net/kaweth.c index e04b0ce3611a..c82655d3d448 100644 --- a/drivers/usb/net/kaweth.c +++ b/drivers/usb/net/kaweth.c @@ -477,13 +477,13 @@ static int kaweth_reset(struct kaweth_device *kaweth) } static void kaweth_usb_receive(struct urb *, struct pt_regs *regs); -static int kaweth_resubmit_rx_urb(struct kaweth_device *, unsigned); +static int kaweth_resubmit_rx_urb(struct kaweth_device *, gfp_t); /**************************************************************** int_callback *****************************************************************/ -static void kaweth_resubmit_int_urb(struct kaweth_device *kaweth, int mf) +static void kaweth_resubmit_int_urb(struct kaweth_device *kaweth, gfp_t mf) { int status; @@ -550,7 +550,7 @@ static void kaweth_resubmit_tl(void *d) * kaweth_resubmit_rx_urb ****************************************************************/ static int kaweth_resubmit_rx_urb(struct kaweth_device *kaweth, - unsigned mem_flags) + gfp_t mem_flags) { int result; diff --git a/drivers/usb/net/net1080.c b/drivers/usb/net/net1080.c index a4309c4a491b..cee55f8cf64f 100644 --- a/drivers/usb/net/net1080.c +++ b/drivers/usb/net/net1080.c @@ -500,7 +500,7 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb) } static struct sk_buff * -net1080_tx_fixup(struct usbnet *dev, struct sk_buff *skb, unsigned flags) +net1080_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { int padlen; struct sk_buff *skb2; diff --git a/drivers/usb/net/pegasus.c b/drivers/usb/net/pegasus.c index 7484d34780fc..6a4ffe6c3977 100644 --- a/drivers/usb/net/pegasus.c +++ b/drivers/usb/net/pegasus.c @@ -647,6 +647,13 @@ static void read_bulk_callback(struct urb *urb, struct pt_regs *regs) pkt_len -= 8; } + /* + * If the packet is unreasonably long, quietly drop it rather than + * kernel panicing by calling skb_put. + */ + if (pkt_len > PEGASUS_MTU) + goto goon; + /* * at this point we are sure pegasus->rx_skb != NULL * so we go ahead and pass up the packet. @@ -886,15 +893,17 @@ static inline void get_interrupt_interval(pegasus_t * pegasus) __u8 data[2]; read_eprom_word(pegasus, 4, (__u16 *) data); - if (data[1] < 0x80) { - if (netif_msg_timer(pegasus)) - dev_info(&pegasus->intf->dev, - "intr interval changed from %ums to %ums\n", - data[1], 0x80); - data[1] = 0x80; -#ifdef PEGASUS_WRITE_EEPROM - write_eprom_word(pegasus, 4, *(__u16 *) data); + if (pegasus->usb->speed != USB_SPEED_HIGH) { + if (data[1] < 0x80) { + if (netif_msg_timer(pegasus)) + dev_info(&pegasus->intf->dev, "intr interval " + "changed from %ums to %ums\n", + data[1], 0x80); + data[1] = 0x80; +#ifdef PEGASUS_WRITE_EEPROM + write_eprom_word(pegasus, 4, *(__u16 *) data); #endif + } } pegasus->intr_interval = data[1]; } @@ -904,8 +913,9 @@ static void set_carrier(struct net_device *net) pegasus_t *pegasus = netdev_priv(net); u16 tmp; - if (read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp)) + if (!read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp)) return; + if (tmp & BMSR_LSTATUS) netif_carrier_on(net); else @@ -1355,6 +1365,7 @@ static void pegasus_disconnect(struct usb_interface *intf) cancel_delayed_work(&pegasus->carrier_check); unregister_netdev(pegasus->net); usb_put_dev(interface_to_usbdev(intf)); + unlink_all_urbs(pegasus); free_all_urbs(pegasus); free_skb_pool(pegasus); if (pegasus->rx_skb) diff --git a/drivers/usb/net/rndis_host.c b/drivers/usb/net/rndis_host.c index 2ed2e5fb7778..b5a925dc1beb 100644 --- a/drivers/usb/net/rndis_host.c +++ b/drivers/usb/net/rndis_host.c @@ -517,7 +517,7 @@ static int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb) } static struct sk_buff * -rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, unsigned flags) +rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { struct rndis_data_hdr *hdr; struct sk_buff *skb2; diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c index 6c460918d54f..fce81d738933 100644 --- a/drivers/usb/net/usbnet.c +++ b/drivers/usb/net/usbnet.c @@ -288,7 +288,7 @@ EXPORT_SYMBOL_GPL(usbnet_defer_kevent); static void rx_complete (struct urb *urb, struct pt_regs *regs); -static void rx_submit (struct usbnet *dev, struct urb *urb, unsigned flags) +static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) { struct sk_buff *skb; struct skb_data *entry; diff --git a/drivers/usb/net/usbnet.h b/drivers/usb/net/usbnet.h index 7aa0abd1a9bd..89fc4958eecf 100644 --- a/drivers/usb/net/usbnet.h +++ b/drivers/usb/net/usbnet.h @@ -107,7 +107,7 @@ struct driver_info { /* fixup tx packet (add framing) */ struct sk_buff *(*tx_fixup)(struct usbnet *dev, - struct sk_buff *skb, unsigned flags); + struct sk_buff *skb, gfp_t flags); /* for new devices, use the descriptor-reading code instead */ int in; /* rx endpoint */ diff --git a/drivers/usb/net/zaurus.c b/drivers/usb/net/zaurus.c index ee3b892aeabc..5d4b7d55b097 100644 --- a/drivers/usb/net/zaurus.c +++ b/drivers/usb/net/zaurus.c @@ -62,7 +62,7 @@ */ static struct sk_buff * -zaurus_tx_fixup(struct usbnet *dev, struct sk_buff *skb, unsigned flags) +zaurus_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { int padlen; struct sk_buff *skb2; diff --git a/drivers/usb/net/zd1201.c b/drivers/usb/net/zd1201.c index c4e479ee926a..2f52261c7cc1 100644 --- a/drivers/usb/net/zd1201.c +++ b/drivers/usb/net/zd1201.c @@ -521,7 +521,7 @@ static int zd1201_setconfig(struct zd1201 *zd, int rid, void *buf, int len, int int reqlen; char seq=0; struct urb *urb; - unsigned int gfp_mask = wait ? GFP_NOIO : GFP_ATOMIC; + gfp_t gfp_mask = wait ? GFP_NOIO : GFP_ATOMIC; len += 4; /* first 4 are for header */ diff --git a/drivers/usb/serial/airprime.c b/drivers/usb/serial/airprime.c index a4ce0008d69b..926d4c2c1600 100644 --- a/drivers/usb/serial/airprime.c +++ b/drivers/usb/serial/airprime.c @@ -16,7 +16,8 @@ #include "usb-serial.h" static struct usb_device_id id_table [] = { - { USB_DEVICE(0xf3d, 0x0112) }, + { USB_DEVICE(0xf3d, 0x0112) }, /* AirPrime CDMA Wireless PC Card */ + { USB_DEVICE(0x1410, 0x1110) }, /* Novatel Wireless Merlin CDMA */ { }, }; MODULE_DEVICE_TABLE(usb, id_table); diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 4e434cb10bb1..5a8631c8a4a7 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1846,10 +1846,12 @@ static void ftdi_set_termios (struct usb_serial_port *port, struct termios *old_ } else { /* set the baudrate determined before */ if (change_speed(port)) { - err("%s urb failed to set baurdrate", __FUNCTION__); + err("%s urb failed to set baudrate", __FUNCTION__); + } + /* Ensure RTS and DTR are raised when baudrate changed from 0 */ + if ((old_termios->c_cflag & CBAUD) == B0) { + set_mctrl(port, TIOCM_DTR | TIOCM_RTS); } - /* Ensure RTS and DTR are raised */ - set_mctrl(port, TIOCM_DTR | TIOCM_RTS); } /* Set flow control */ diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index ddde5fb13f6b..5f7d3193d355 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -223,7 +223,7 @@ int usb_serial_generic_write_room (struct usb_serial_port *port) dbg("%s - port %d", __FUNCTION__, port->number); if (serial->num_bulk_out) { - if (port->write_urb_busy) + if (!(port->write_urb_busy)) room = port->bulk_out_size; } diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 92d0f925d053..4989e5740d18 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -25,6 +25,9 @@ 2005-06-20 v0.4.1 add missing braces :-/ killed end-of-line whitespace 2005-07-15 v0.4.2 rename WLAN product to FUSION, add FUSION2 + 2005-09-10 v0.4.3 added HUAWEI E600 card and Audiovox AirCard + 2005-09-20 v0.4.4 increased recv buffer size: the card sometimes + wants to send >2000 bytes. Work sponsored by: Sigos GmbH, Germany @@ -71,15 +74,21 @@ static int option_send_setup(struct usb_serial_port *port); /* Vendor and product IDs */ #define OPTION_VENDOR_ID 0x0AF0 +#define HUAWEI_VENDOR_ID 0x12D1 +#define AUDIOVOX_VENDOR_ID 0x0F3D #define OPTION_PRODUCT_OLD 0x5000 #define OPTION_PRODUCT_FUSION 0x6000 #define OPTION_PRODUCT_FUSION2 0x6300 +#define HUAWEI_PRODUCT_E600 0x1001 +#define AUDIOVOX_PRODUCT_AIRCARD 0x0112 static struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_OLD) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUSION) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_FUSION2) }, + { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600) }, + { USB_DEVICE(AUDIOVOX_VENDOR_ID, AUDIOVOX_PRODUCT_AIRCARD) }, { } /* Terminating entry */ }; @@ -132,7 +141,7 @@ static int debug; #define N_IN_URB 4 #define N_OUT_URB 1 -#define IN_BUFLEN 1024 +#define IN_BUFLEN 4096 #define OUT_BUFLEN 128 struct option_port_private { diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 773ae11b4a19..1cd942abb580 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -768,6 +768,7 @@ config FB_INTEL select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT + select FB_SOFT_CURSOR help This driver supports the on-board graphics built in to the Intel 830M/845G/852GM/855GM/865G chipsets. diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c index 046b47860266..8a24a66d9ba8 100644 --- a/drivers/video/aty/radeon_base.c +++ b/drivers/video/aty/radeon_base.c @@ -475,7 +475,7 @@ static int __devinit radeon_probe_pll_params(struct radeonfb_info *rinfo) */ /* Flush PCI buffers ? */ - tmp = INREG(DEVICE_ID); + tmp = INREG16(DEVICE_ID); local_irq_disable(); diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c index 59a1b6f85067..097d668c4fe5 100644 --- a/drivers/video/aty/radeon_pm.c +++ b/drivers/video/aty/radeon_pm.c @@ -62,9 +62,9 @@ static void radeon_pm_disable_dynamic_mode(struct radeonfb_info *rinfo) OUTPLL(pllSCLK_CNTL, tmp); return; } - /* RV350 (M10) */ + /* RV350 (M10/M11) */ if (rinfo->family == CHIP_FAMILY_RV350) { - /* for RV350/M10, no delays are required. */ + /* for RV350/M10/M11, no delays are required. */ tmp = INPLL(pllSCLK_CNTL2); tmp |= (SCLK_CNTL2__R300_FORCE_TCL | SCLK_CNTL2__R300_FORCE_GA | @@ -248,7 +248,7 @@ static void radeon_pm_enable_dynamic_mode(struct radeonfb_info *rinfo) return; } - /* M10 */ + /* M10/M11 */ if (rinfo->family == CHIP_FAMILY_RV350) { tmp = INPLL(pllSCLK_CNTL2); tmp &= ~(SCLK_CNTL2__R300_FORCE_TCL | @@ -1155,7 +1155,7 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo) OUTREG( CRTC_GEN_CNTL, (crtcGenCntl | CRTC_GEN_CNTL__CRTC_DISP_REQ_EN_B) ); OUTREG( CRTC2_GEN_CNTL, (crtcGenCntl2 | CRTC2_GEN_CNTL__CRTC2_DISP_REQ_EN_B) ); - /* This is the code for the Aluminium PowerBooks M10 */ + /* This is the code for the Aluminium PowerBooks M10 / iBooks M11 */ if (rinfo->family == CHIP_FAMILY_RV350) { u32 sdram_mode_reg = rinfo->save_regs[35]; static u32 default_mrtable[] = @@ -2741,9 +2741,11 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk) rinfo->pm_mode |= radeon_pm_d2; /* We can restart Jasper (M10 chip in albooks), BlueStone (7500 chip - * in some desktop G4s), and Via (M9+ chip on iBook G4) + * in some desktop G4s), Via (M9+ chip on iBook G4) and + * Snowy (M11 chip on iBook G4 manufactured after July 2005) */ - if (!strcmp(rinfo->of_node->name, "ATY,JasperParent")) { + if (!strcmp(rinfo->of_node->name, "ATY,JasperParent") || + !strcmp(rinfo->of_node->name, "ATY,SnowyParent")) { rinfo->reinit_func = radeon_reinitialize_M10; rinfo->pm_mode |= radeon_pm_off; } diff --git a/drivers/video/aty/radeonfb.h b/drivers/video/aty/radeonfb.h index 659bc9f62244..01b8b2f78514 100644 --- a/drivers/video/aty/radeonfb.h +++ b/drivers/video/aty/radeonfb.h @@ -395,6 +395,8 @@ static inline void _radeon_msleep(struct radeonfb_info *rinfo, unsigned long ms) #define INREG8(addr) readb((rinfo->mmio_base)+addr) #define OUTREG8(addr,val) writeb(val, (rinfo->mmio_base)+addr) +#define INREG16(addr) readw((rinfo->mmio_base)+addr) +#define OUTREG16(addr,val) writew(val, (rinfo->mmio_base)+addr) #define INREG(addr) readl((rinfo->mmio_base)+addr) #define OUTREG(addr,val) writel(val, (rinfo->mmio_base)+addr) diff --git a/drivers/video/aty/xlinit.c b/drivers/video/aty/xlinit.c index 0bea0d8d7821..a085cbf74ecb 100644 --- a/drivers/video/aty/xlinit.c +++ b/drivers/video/aty/xlinit.c @@ -253,9 +253,11 @@ int atyfb_xl_init(struct fb_info *info) aty_st_le32(0xFC, 0x00000000, par); #if defined (CONFIG_FB_ATY_GENERIC_LCD) - int i; - for (i=0; i #include -#include #include #define CORGI_DEFAULT_INTENSITY 0x1f diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 6ef6f7760e47..809fee2140ac 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -565,7 +565,11 @@ static int vgacon_switch(struct vc_data *c) scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf, c->vc_screenbuf_size > vga_vram_size ? vga_vram_size : c->vc_screenbuf_size); - vgacon_doresize(c, c->vc_cols, c->vc_rows); + if (!(vga_video_num_columns % 2) && + vga_video_num_columns <= ORIG_VIDEO_COLS && + vga_video_num_lines <= (ORIG_VIDEO_LINES * + vga_default_font_height) / c->vc_font.height) + vgacon_doresize(c, c->vc_cols, c->vc_rows); } return 0; /* Redrawing not needed */ @@ -1023,7 +1027,8 @@ static int vgacon_resize(struct vc_data *c, unsigned int width, if (width % 2 || width > ORIG_VIDEO_COLS || height > (ORIG_VIDEO_LINES * vga_default_font_height)/ c->vc_font.height) - return -EINVAL; + /* let svgatextmode tinker with video timings */ + return 0; if (CON_IS_VISIBLE(c) && !vga_is_gfx) /* who knows */ vgacon_doresize(c, width, height); diff --git a/drivers/video/cyblafb.c b/drivers/video/cyblafb.c index ae2762cb5608..6992100a508c 100644 --- a/drivers/video/cyblafb.c +++ b/drivers/video/cyblafb.c @@ -410,20 +410,21 @@ static void cyblafb_imageblit(struct fb_info *info, out32(GE0C,point(image->dx+image->width-1,image->dy+image->height-1)); while(index < index_end) { + const char *p = image->data + index; for(i=0;idata + index))); + out32(GE9C,*(u32*)p); + p+=4; index+=4; } switch(width_dbs) { case 0: break; - case 8: out32(GE9C,*((u8*)((u32)image->data+index))); + case 8: out32(GE9C,*(u8*)p); index+=1; break; - case 16: out32(GE9C,*((u16*)((u32)image->data+index))); + case 16: out32(GE9C,*(u16*)p); index+=2; break; - case 24: out32(GE9C,(u32)(*((u16*)((u32)image->data+index))) | - (u32)(*((u8*)((u32)image->data+index+2)))<<16); + case 24: out32(GE9C,*(u16*)p | *(u8*)(p+2)<<16); index+=3; break; } diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c index 1147b899f007..007c8e9b2b39 100644 --- a/drivers/video/fbsysfs.c +++ b/drivers/video/fbsysfs.c @@ -242,6 +242,13 @@ static ssize_t show_virtual(struct class_device *class_device, char *buf) fb_info->var.yres_virtual); } +static ssize_t show_stride(struct class_device *class_device, char *buf) +{ + struct fb_info *fb_info = + (struct fb_info *)class_get_devdata(class_device); + return snprintf(buf, PAGE_SIZE, "%d\n", fb_info->fix.line_length); +} + /* Format for cmap is "%02x%c%4x%4x%4x\n" */ /* %02x entry %c transp %4x red %4x blue %4x green \n */ /* 256 rows at 16 chars equals 4096, the normal page size */ @@ -432,6 +439,7 @@ static struct class_device_attribute class_device_attrs[] = { __ATTR(pan, S_IRUGO|S_IWUSR, show_pan, store_pan), __ATTR(virtual_size, S_IRUGO|S_IWUSR, show_virtual, store_virtual), __ATTR(name, S_IRUGO, show_name, NULL), + __ATTR(stride, S_IRUGO, show_stride, NULL), }; int fb_init_class_device(struct fb_info *fb_info) diff --git a/drivers/video/i810/i810-i2c.c b/drivers/video/i810/i810-i2c.c index fda53aac1fc1..689d2586366d 100644 --- a/drivers/video/i810/i810-i2c.c +++ b/drivers/video/i810/i810-i2c.c @@ -44,7 +44,7 @@ static void i810i2c_setscl(void *data, int state) { struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; struct i810fb_par *par = chan->par; - u8 *mmio = par->mmio_start_virtual; + u8 __iomem *mmio = par->mmio_start_virtual; i810_writel(mmio, GPIOB, (state ? SCL_VAL_OUT : 0) | SCL_DIR | SCL_DIR_MASK | SCL_VAL_MASK); @@ -55,7 +55,7 @@ static void i810i2c_setsda(void *data, int state) { struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; struct i810fb_par *par = chan->par; - u8 *mmio = par->mmio_start_virtual; + u8 __iomem *mmio = par->mmio_start_virtual; i810_writel(mmio, GPIOB, (state ? SDA_VAL_OUT : 0) | SDA_DIR | SDA_DIR_MASK | SDA_VAL_MASK); @@ -66,7 +66,7 @@ static int i810i2c_getscl(void *data) { struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; struct i810fb_par *par = chan->par; - u8 *mmio = par->mmio_start_virtual; + u8 __iomem *mmio = par->mmio_start_virtual; i810_writel(mmio, GPIOB, SCL_DIR_MASK); i810_writel(mmio, GPIOB, 0); @@ -77,7 +77,7 @@ static int i810i2c_getsda(void *data) { struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; struct i810fb_par *par = chan->par; - u8 *mmio = par->mmio_start_virtual; + u8 __iomem *mmio = par->mmio_start_virtual; i810_writel(mmio, GPIOB, SDA_DIR_MASK); i810_writel(mmio, GPIOB, 0); @@ -88,7 +88,7 @@ static void i810ddc_setscl(void *data, int state) { struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; struct i810fb_par *par = chan->par; - u8 *mmio = par->mmio_start_virtual; + u8 __iomem *mmio = par->mmio_start_virtual; i810_writel(mmio, GPIOA, (state ? SCL_VAL_OUT : 0) | SCL_DIR | SCL_DIR_MASK | SCL_VAL_MASK); @@ -99,7 +99,7 @@ static void i810ddc_setsda(void *data, int state) { struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; struct i810fb_par *par = chan->par; - u8 *mmio = par->mmio_start_virtual; + u8 __iomem *mmio = par->mmio_start_virtual; i810_writel(mmio, GPIOA, (state ? SDA_VAL_OUT : 0) | SDA_DIR | SDA_DIR_MASK | SDA_VAL_MASK); @@ -110,7 +110,7 @@ static int i810ddc_getscl(void *data) { struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; struct i810fb_par *par = chan->par; - u8 *mmio = par->mmio_start_virtual; + u8 __iomem *mmio = par->mmio_start_virtual; i810_writel(mmio, GPIOA, SCL_DIR_MASK); i810_writel(mmio, GPIOA, 0); @@ -121,7 +121,7 @@ static int i810ddc_getsda(void *data) { struct i810fb_i2c_chan *chan = (struct i810fb_i2c_chan *)data; struct i810fb_par *par = chan->par; - u8 *mmio = par->mmio_start_virtual; + u8 __iomem *mmio = par->mmio_start_virtual; i810_writel(mmio, GPIOA, SDA_DIR_MASK); i810_writel(mmio, GPIOA, 0); diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c index 6c2244cf0e74..1d54d3d6960b 100644 --- a/drivers/video/imxfb.c +++ b/drivers/video/imxfb.c @@ -36,7 +36,6 @@ #include #include -#include #include #include diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c index bf62e6ed0382..80a09344f1aa 100644 --- a/drivers/video/intelfb/intelfbdrv.c +++ b/drivers/video/intelfb/intelfbdrv.c @@ -226,7 +226,7 @@ MODULE_DEVICE_TABLE(pci, intelfb_pci_table); static int accel = 1; static int vram = 4; -static int hwcursor = 1; +static int hwcursor = 0; static int mtrr = 1; static int fixed = 0; static int noinit = 0; @@ -609,15 +609,9 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent) dinfo->accel = 0; } - if (MB(voffset) < stolen_size) - offset = (stolen_size >> 12); - else - offset = ROUND_UP_TO_PAGE(MB(voffset))/GTT_PAGE_SIZE; - /* Framebuffer parameters - Use all the stolen memory if >= vram */ - if (ROUND_UP_TO_PAGE(stolen_size) >= ((offset << 12) + MB(vram))) { + if (ROUND_UP_TO_PAGE(stolen_size) >= MB(vram)) { dinfo->fb.size = ROUND_UP_TO_PAGE(stolen_size); - dinfo->fb.offset = 0; dinfo->fbmem_gart = 0; } else { dinfo->fb.size = MB(vram); @@ -648,6 +642,11 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENODEV; } + if (MB(voffset) < stolen_size) + offset = (stolen_size >> 12); + else + offset = ROUND_UP_TO_PAGE(MB(voffset))/GTT_PAGE_SIZE; + /* set the mem offsets - set them after the already used pages */ if (dinfo->accel) { dinfo->ring.offset = offset + gtt_info.current_memory; @@ -662,10 +661,11 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent) + (dinfo->cursor.size >> 12); } + /* Allocate memories (which aren't stolen) */ /* Map the fb and MMIO regions */ /* ioremap only up to the end of used aperture */ dinfo->aperture.virtual = (u8 __iomem *)ioremap_nocache - (dinfo->aperture.physical, (dinfo->fb.offset << 12) + (dinfo->aperture.physical, ((offset + dinfo->fb.offset) << 12) + dinfo->fb.size); if (!dinfo->aperture.virtual) { ERR_MSG("Cannot remap FB region.\n"); @@ -682,7 +682,6 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENODEV; } - /* Allocate memories (which aren't stolen) */ if (dinfo->accel) { if (!(dinfo->gtt_ring_mem = agp_allocate_memory(bridge, dinfo->ring.size >> 12, @@ -1484,7 +1483,7 @@ intelfb_cursor(struct fb_info *info, struct fb_cursor *cursor) #endif if (!dinfo->hwcursor) - return -ENXIO; + return soft_cursor(info, cursor); intelfbhw_cursor_hide(dinfo); diff --git a/drivers/video/logo/.gitignore b/drivers/video/logo/.gitignore new file mode 100644 index 000000000000..e48355f538fa --- /dev/null +++ b/drivers/video/logo/.gitignore @@ -0,0 +1,7 @@ +# +# Generated files +# +*_mono.c +*_vga16.c +*_clut224.c +*_gray256.c diff --git a/drivers/video/p9100.c b/drivers/video/p9100.c index 7808a01493ad..b76a5a9a125b 100644 --- a/drivers/video/p9100.c +++ b/drivers/video/p9100.c @@ -288,6 +288,9 @@ static void p9100_init_one(struct sbus_dev *sdev) all->par.physbase = sdev->reg_addrs[2].phys_addr; sbusfb_fill_var(&all->info.var, sdev->prom_node, 8); + all->info.var.red.length = 8; + all->info.var.green.length = 8; + all->info.var.blue.length = 8; linebytes = prom_getintdefault(sdev->prom_node, "linebytes", all->info.var.xres); @@ -323,6 +326,7 @@ static void p9100_init_one(struct sbus_dev *sdev) kfree(all); return; } + fb_set_cmap(&all->info.cmap, &all->info); list_add(&all->list, &p9100_list); diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index 34d4dcc0320a..194eed0a238c 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c @@ -260,9 +260,9 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) } #ifdef CONFIG_CPU_FREQ - DPRINTK("dma period = %d ps, clock = %d kHz\n", - pxafb_display_dma_period(var), - get_clk_frequency_khz(0)); + pr_debug("pxafb: dma period = %d ps, clock = %d kHz\n", + pxafb_display_dma_period(var), + get_clk_frequency_khz(0)); #endif return 0; @@ -270,7 +270,7 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) static inline void pxafb_set_truecolor(u_int is_true_color) { - DPRINTK("true_color = %d\n", is_true_color); + pr_debug("pxafb: true_color = %d\n", is_true_color); // do your machine-specific setup if needed } @@ -284,7 +284,7 @@ static int pxafb_set_par(struct fb_info *info) struct fb_var_screeninfo *var = &info->var; unsigned long palette_mem_size; - DPRINTK("set_par\n"); + pr_debug("pxafb: set_par\n"); if (var->bits_per_pixel == 16) fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR; @@ -308,7 +308,7 @@ static int pxafb_set_par(struct fb_info *info) palette_mem_size = fbi->palette_size * sizeof(u16); - DPRINTK("palette_mem_size = 0x%08lx\n", (u_long) palette_mem_size); + pr_debug("pxafb: palette_mem_size = 0x%08lx\n", palette_mem_size); fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size); fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size; @@ -369,7 +369,7 @@ static int pxafb_blank(int blank, struct fb_info *info) struct pxafb_info *fbi = (struct pxafb_info *)info; int i; - DPRINTK("pxafb_blank: blank=%d\n", blank); + pr_debug("pxafb: blank=%d\n", blank); switch (blank) { case FB_BLANK_POWERDOWN: @@ -508,15 +508,15 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info * u_long flags; u_int lines_per_panel, pcd = get_pcd(var->pixclock); - DPRINTK("Configuring PXA LCD\n"); + pr_debug("pxafb: Configuring PXA LCD\n"); - DPRINTK("var: xres=%d hslen=%d lm=%d rm=%d\n", - var->xres, var->hsync_len, - var->left_margin, var->right_margin); - DPRINTK("var: yres=%d vslen=%d um=%d bm=%d\n", - var->yres, var->vsync_len, - var->upper_margin, var->lower_margin); - DPRINTK("var: pixclock=%d pcd=%d\n", var->pixclock, pcd); + pr_debug("var: xres=%d hslen=%d lm=%d rm=%d\n", + var->xres, var->hsync_len, + var->left_margin, var->right_margin); + pr_debug("var: yres=%d vslen=%d um=%d bm=%d\n", + var->yres, var->vsync_len, + var->upper_margin, var->lower_margin); + pr_debug("var: pixclock=%d pcd=%d\n", var->pixclock, pcd); #if DEBUG_VAR if (var->xres < 16 || var->xres > 1024) @@ -589,10 +589,10 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info * if (pcd) new_regs.lccr3 |= LCCR3_PixClkDiv(pcd); - DPRINTK("nlccr0 = 0x%08x\n", new_regs.lccr0); - DPRINTK("nlccr1 = 0x%08x\n", new_regs.lccr1); - DPRINTK("nlccr2 = 0x%08x\n", new_regs.lccr2); - DPRINTK("nlccr3 = 0x%08x\n", new_regs.lccr3); + pr_debug("nlccr0 = 0x%08x\n", new_regs.lccr0); + pr_debug("nlccr1 = 0x%08x\n", new_regs.lccr1); + pr_debug("nlccr2 = 0x%08x\n", new_regs.lccr2); + pr_debug("nlccr3 = 0x%08x\n", new_regs.lccr3); /* Update shadow copy atomically */ local_irq_save(flags); @@ -637,24 +637,24 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info * } #if 0 - DPRINTK("fbi->dmadesc_fblow_cpu = 0x%p\n", fbi->dmadesc_fblow_cpu); - DPRINTK("fbi->dmadesc_fbhigh_cpu = 0x%p\n", fbi->dmadesc_fbhigh_cpu); - DPRINTK("fbi->dmadesc_palette_cpu = 0x%p\n", fbi->dmadesc_palette_cpu); - DPRINTK("fbi->dmadesc_fblow_dma = 0x%x\n", fbi->dmadesc_fblow_dma); - DPRINTK("fbi->dmadesc_fbhigh_dma = 0x%x\n", fbi->dmadesc_fbhigh_dma); - DPRINTK("fbi->dmadesc_palette_dma = 0x%x\n", fbi->dmadesc_palette_dma); + pr_debug("fbi->dmadesc_fblow_cpu = 0x%p\n", fbi->dmadesc_fblow_cpu); + pr_debug("fbi->dmadesc_fbhigh_cpu = 0x%p\n", fbi->dmadesc_fbhigh_cpu); + pr_debug("fbi->dmadesc_palette_cpu = 0x%p\n", fbi->dmadesc_palette_cpu); + pr_debug("fbi->dmadesc_fblow_dma = 0x%x\n", fbi->dmadesc_fblow_dma); + pr_debug("fbi->dmadesc_fbhigh_dma = 0x%x\n", fbi->dmadesc_fbhigh_dma); + pr_debug("fbi->dmadesc_palette_dma = 0x%x\n", fbi->dmadesc_palette_dma); - DPRINTK("fbi->dmadesc_fblow_cpu->fdadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fdadr); - DPRINTK("fbi->dmadesc_fbhigh_cpu->fdadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fdadr); - DPRINTK("fbi->dmadesc_palette_cpu->fdadr = 0x%x\n", fbi->dmadesc_palette_cpu->fdadr); + pr_debug("fbi->dmadesc_fblow_cpu->fdadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fdadr); + pr_debug("fbi->dmadesc_fbhigh_cpu->fdadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fdadr); + pr_debug("fbi->dmadesc_palette_cpu->fdadr = 0x%x\n", fbi->dmadesc_palette_cpu->fdadr); - DPRINTK("fbi->dmadesc_fblow_cpu->fsadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fsadr); - DPRINTK("fbi->dmadesc_fbhigh_cpu->fsadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fsadr); - DPRINTK("fbi->dmadesc_palette_cpu->fsadr = 0x%x\n", fbi->dmadesc_palette_cpu->fsadr); + pr_debug("fbi->dmadesc_fblow_cpu->fsadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fsadr); + pr_debug("fbi->dmadesc_fbhigh_cpu->fsadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fsadr); + pr_debug("fbi->dmadesc_palette_cpu->fsadr = 0x%x\n", fbi->dmadesc_palette_cpu->fsadr); - DPRINTK("fbi->dmadesc_fblow_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fblow_cpu->ldcmd); - DPRINTK("fbi->dmadesc_fbhigh_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fbhigh_cpu->ldcmd); - DPRINTK("fbi->dmadesc_palette_cpu->ldcmd = 0x%x\n", fbi->dmadesc_palette_cpu->ldcmd); + pr_debug("fbi->dmadesc_fblow_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fblow_cpu->ldcmd); + pr_debug("fbi->dmadesc_fbhigh_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fbhigh_cpu->ldcmd); + pr_debug("fbi->dmadesc_palette_cpu->ldcmd = 0x%x\n", fbi->dmadesc_palette_cpu->ldcmd); #endif fbi->reg_lccr0 = new_regs.lccr0; @@ -684,7 +684,7 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info * */ static inline void __pxafb_backlight_power(struct pxafb_info *fbi, int on) { - DPRINTK("backlight o%s\n", on ? "n" : "ff"); + pr_debug("pxafb: backlight o%s\n", on ? "n" : "ff"); if (pxafb_backlight_power) pxafb_backlight_power(on); @@ -692,7 +692,7 @@ static inline void __pxafb_backlight_power(struct pxafb_info *fbi, int on) static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on) { - DPRINTK("LCD power o%s\n", on ? "n" : "ff"); + pr_debug("pxafb: LCD power o%s\n", on ? "n" : "ff"); if (pxafb_lcd_power) pxafb_lcd_power(on); @@ -740,13 +740,13 @@ static void pxafb_setup_gpio(struct pxafb_info *fbi) static void pxafb_enable_controller(struct pxafb_info *fbi) { - DPRINTK("Enabling LCD controller\n"); - DPRINTK("fdadr0 0x%08x\n", (unsigned int) fbi->fdadr0); - DPRINTK("fdadr1 0x%08x\n", (unsigned int) fbi->fdadr1); - DPRINTK("reg_lccr0 0x%08x\n", (unsigned int) fbi->reg_lccr0); - DPRINTK("reg_lccr1 0x%08x\n", (unsigned int) fbi->reg_lccr1); - DPRINTK("reg_lccr2 0x%08x\n", (unsigned int) fbi->reg_lccr2); - DPRINTK("reg_lccr3 0x%08x\n", (unsigned int) fbi->reg_lccr3); + pr_debug("pxafb: Enabling LCD controller\n"); + pr_debug("fdadr0 0x%08x\n", (unsigned int) fbi->fdadr0); + pr_debug("fdadr1 0x%08x\n", (unsigned int) fbi->fdadr1); + pr_debug("reg_lccr0 0x%08x\n", (unsigned int) fbi->reg_lccr0); + pr_debug("reg_lccr1 0x%08x\n", (unsigned int) fbi->reg_lccr1); + pr_debug("reg_lccr2 0x%08x\n", (unsigned int) fbi->reg_lccr2); + pr_debug("reg_lccr3 0x%08x\n", (unsigned int) fbi->reg_lccr3); /* enable LCD controller clock */ pxa_set_cken(CKEN16_LCD, 1); @@ -761,19 +761,19 @@ static void pxafb_enable_controller(struct pxafb_info *fbi) FDADR1 = fbi->fdadr1; LCCR0 |= LCCR0_ENB; - DPRINTK("FDADR0 0x%08x\n", (unsigned int) FDADR0); - DPRINTK("FDADR1 0x%08x\n", (unsigned int) FDADR1); - DPRINTK("LCCR0 0x%08x\n", (unsigned int) LCCR0); - DPRINTK("LCCR1 0x%08x\n", (unsigned int) LCCR1); - DPRINTK("LCCR2 0x%08x\n", (unsigned int) LCCR2); - DPRINTK("LCCR3 0x%08x\n", (unsigned int) LCCR3); + pr_debug("FDADR0 0x%08x\n", (unsigned int) FDADR0); + pr_debug("FDADR1 0x%08x\n", (unsigned int) FDADR1); + pr_debug("LCCR0 0x%08x\n", (unsigned int) LCCR0); + pr_debug("LCCR1 0x%08x\n", (unsigned int) LCCR1); + pr_debug("LCCR2 0x%08x\n", (unsigned int) LCCR2); + pr_debug("LCCR3 0x%08x\n", (unsigned int) LCCR3); } static void pxafb_disable_controller(struct pxafb_info *fbi) { DECLARE_WAITQUEUE(wait, current); - DPRINTK("Disabling LCD controller\n"); + pr_debug("pxafb: disabling LCD controller\n"); set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&fbi->ctrlr_wait, &wait); @@ -1039,7 +1039,7 @@ static int __init pxafb_map_video_memory(struct pxafb_info *fbi) fbi->palette_size = fbi->fb.var.bits_per_pixel == 8 ? 256 : 16; palette_mem_size = fbi->palette_size * sizeof(u16); - DPRINTK("palette_mem_size = 0x%08lx\n", (u_long) palette_mem_size); + pr_debug("pxafb: palette_mem_size = 0x%08lx\n", palette_mem_size); fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size); fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size; diff --git a/drivers/video/pxafb.h b/drivers/video/pxafb.h index 22c00be786a8..47f41f70db7a 100644 --- a/drivers/video/pxafb.h +++ b/drivers/video/pxafb.h @@ -113,15 +113,6 @@ struct pxafb_info { #define PXA_NAME "PXA" -/* - * Debug macros - */ -#if DEBUG -# define DPRINTK(fmt, args...) printk("%s: " fmt, __FUNCTION__ , ## args) -#else -# define DPRINTK(fmt, args...) -#endif - /* * Minimum X and Y resolutions */ diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c index 00c0223a352e..5ab79afb53b7 100644 --- a/drivers/video/s3c2410fb.c +++ b/drivers/video/s3c2410fb.c @@ -228,8 +228,8 @@ static int s3c2410fb_check_var(struct fb_var_screeninfo *var, * information */ -static int s3c2410fb_activate_var(struct s3c2410fb_info *fbi, - struct fb_var_screeninfo *var) +static void s3c2410fb_activate_var(struct s3c2410fb_info *fbi, + struct fb_var_screeninfo *var) { fbi->regs.lcdcon1 &= ~S3C2410_LCDCON1_MODEMASK; diff --git a/drivers/video/sa1100fb.c b/drivers/video/sa1100fb.c index beeec7b51425..8000890e4271 100644 --- a/drivers/video/sa1100fb.c +++ b/drivers/video/sa1100fb.c @@ -592,6 +592,7 @@ sa1100fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, return ret; } +#ifdef CONFIG_CPU_FREQ /* * sa1100fb_display_dma_period() * Calculate the minimum period (in picoseconds) between two DMA @@ -606,6 +607,7 @@ static inline unsigned int sa1100fb_display_dma_period(struct fb_var_screeninfo */ return var->pixclock * 8 * 16 / var->bits_per_pixel; } +#endif /* * sa1100fb_check_var(): diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c index 1ca80264c7b0..b1243da55fc5 100644 --- a/drivers/video/vesafb.c +++ b/drivers/video/vesafb.c @@ -96,14 +96,14 @@ static int vesafb_blank(int blank, struct fb_info *info) int loop = 10000; u8 seq = 0, crtc17 = 0; - err = 0; - - if (blank) { + if (blank == FB_BLANK_POWERDOWN) { seq = 0x20; crtc17 = 0x00; + err = 0; } else { seq = 0x00; crtc17 = 0x80; + err = (blank == FB_BLANK_UNBLANK) ? 0 : -EINVAL; } vga_wseq(NULL, 0x00, 0x01); diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index 1b6b74c116a9..14016b1cd948 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c @@ -77,8 +77,7 @@ static void w1_master_release(struct device *dev) dev_dbg(dev, "%s: Releasing %s.\n", __func__, md->name); - if (md->nls && md->nls->sk_socket) - sock_release(md->nls->sk_socket); + dev_fini_netlink(md); memset(md, 0, sizeof(struct w1_master) + sizeof(struct w1_bus_master)); kfree(md); } diff --git a/fs/9p/conv.c b/fs/9p/conv.c index 1554731bd653..18121af99d3e 100644 --- a/fs/9p/conv.c +++ b/fs/9p/conv.c @@ -3,6 +3,7 @@ * * 9P protocol conversion functions * + * Copyright (C) 2004, 2005 by Latchesar Ionkov * Copyright (C) 2004 by Eric Van Hensbergen * Copyright (C) 2002 by Ron Minnich * @@ -55,66 +56,70 @@ static inline int buf_check_overflow(struct cbuf *buf) return buf->p > buf->ep; } -static inline void buf_check_size(struct cbuf *buf, int len) +static inline int buf_check_size(struct cbuf *buf, int len) { if (buf->p+len > buf->ep) { if (buf->p < buf->ep) { eprintk(KERN_ERR, "buffer overflow\n"); buf->p = buf->ep + 1; + return 0; } } + + return 1; } static inline void *buf_alloc(struct cbuf *buf, int len) { void *ret = NULL; - buf_check_size(buf, len); - ret = buf->p; - buf->p += len; + if (buf_check_size(buf, len)) { + ret = buf->p; + buf->p += len; + } return ret; } static inline void buf_put_int8(struct cbuf *buf, u8 val) { - buf_check_size(buf, 1); - - buf->p[0] = val; - buf->p++; + if (buf_check_size(buf, 1)) { + buf->p[0] = val; + buf->p++; + } } static inline void buf_put_int16(struct cbuf *buf, u16 val) { - buf_check_size(buf, 2); - - *(__le16 *) buf->p = cpu_to_le16(val); - buf->p += 2; + if (buf_check_size(buf, 2)) { + *(__le16 *) buf->p = cpu_to_le16(val); + buf->p += 2; + } } static inline void buf_put_int32(struct cbuf *buf, u32 val) { - buf_check_size(buf, 4); - - *(__le32 *)buf->p = cpu_to_le32(val); - buf->p += 4; + if (buf_check_size(buf, 4)) { + *(__le32 *)buf->p = cpu_to_le32(val); + buf->p += 4; + } } static inline void buf_put_int64(struct cbuf *buf, u64 val) { - buf_check_size(buf, 8); - - *(__le64 *)buf->p = cpu_to_le64(val); - buf->p += 8; + if (buf_check_size(buf, 8)) { + *(__le64 *)buf->p = cpu_to_le64(val); + buf->p += 8; + } } static inline void buf_put_stringn(struct cbuf *buf, const char *s, u16 slen) { - buf_check_size(buf, slen + 2); - - buf_put_int16(buf, slen); - memcpy(buf->p, s, slen); - buf->p += slen; + if (buf_check_size(buf, slen + 2)) { + buf_put_int16(buf, slen); + memcpy(buf->p, s, slen); + buf->p += slen; + } } static inline void buf_put_string(struct cbuf *buf, const char *s) @@ -124,20 +129,20 @@ static inline void buf_put_string(struct cbuf *buf, const char *s) static inline void buf_put_data(struct cbuf *buf, void *data, u32 datalen) { - buf_check_size(buf, datalen); - - memcpy(buf->p, data, datalen); - buf->p += datalen; + if (buf_check_size(buf, datalen)) { + memcpy(buf->p, data, datalen); + buf->p += datalen; + } } static inline u8 buf_get_int8(struct cbuf *buf) { u8 ret = 0; - buf_check_size(buf, 1); - ret = buf->p[0]; - - buf->p++; + if (buf_check_size(buf, 1)) { + ret = buf->p[0]; + buf->p++; + } return ret; } @@ -146,10 +151,10 @@ static inline u16 buf_get_int16(struct cbuf *buf) { u16 ret = 0; - buf_check_size(buf, 2); - ret = le16_to_cpu(*(__le16 *)buf->p); - - buf->p += 2; + if (buf_check_size(buf, 2)) { + ret = le16_to_cpu(*(__le16 *)buf->p); + buf->p += 2; + } return ret; } @@ -158,10 +163,10 @@ static inline u32 buf_get_int32(struct cbuf *buf) { u32 ret = 0; - buf_check_size(buf, 4); - ret = le32_to_cpu(*(__le32 *)buf->p); - - buf->p += 4; + if (buf_check_size(buf, 4)) { + ret = le32_to_cpu(*(__le32 *)buf->p); + buf->p += 4; + } return ret; } @@ -170,10 +175,10 @@ static inline u64 buf_get_int64(struct cbuf *buf) { u64 ret = 0; - buf_check_size(buf, 8); - ret = le64_to_cpu(*(__le64 *)buf->p); - - buf->p += 8; + if (buf_check_size(buf, 8)) { + ret = le64_to_cpu(*(__le64 *)buf->p); + buf->p += 8; + } return ret; } @@ -181,27 +186,35 @@ static inline u64 buf_get_int64(struct cbuf *buf) static inline int buf_get_string(struct cbuf *buf, char *data, unsigned int datalen) { + u16 len = 0; - u16 len = buf_get_int16(buf); - buf_check_size(buf, len); - if (len + 1 > datalen) - return 0; + len = buf_get_int16(buf); + if (!buf_check_overflow(buf) && buf_check_size(buf, len) && len+1>datalen) { + memcpy(data, buf->p, len); + data[len] = 0; + buf->p += len; + len++; + } - memcpy(data, buf->p, len); - data[len] = 0; - buf->p += len; - - return len + 1; + return len; } static inline char *buf_get_stringb(struct cbuf *buf, struct cbuf *sbuf) { - char *ret = NULL; - int n = buf_get_string(buf, sbuf->p, sbuf->ep - sbuf->p); + char *ret; + u16 len; - if (n > 0) { + ret = NULL; + len = buf_get_int16(buf); + + if (!buf_check_overflow(buf) && buf_check_size(buf, len) && + buf_check_size(sbuf, len+1)) { + + memcpy(sbuf->p, buf->p, len); + sbuf->p[len] = 0; ret = sbuf->p; - sbuf->p += n; + buf->p += len; + sbuf->p += len + 1; } return ret; @@ -209,12 +222,15 @@ static inline char *buf_get_stringb(struct cbuf *buf, struct cbuf *sbuf) static inline int buf_get_data(struct cbuf *buf, void *data, int datalen) { - buf_check_size(buf, datalen); + int ret = 0; - memcpy(data, buf->p, datalen); - buf->p += datalen; + if (buf_check_size(buf, datalen)) { + memcpy(data, buf->p, datalen); + buf->p += datalen; + ret = datalen; + } - return datalen; + return ret; } static inline void *buf_get_datab(struct cbuf *buf, struct cbuf *dbuf, @@ -223,13 +239,12 @@ static inline void *buf_get_datab(struct cbuf *buf, struct cbuf *dbuf, char *ret = NULL; int n = 0; - buf_check_size(dbuf, datalen); - - n = buf_get_data(buf, dbuf->p, datalen); - - if (n > 0) { - ret = dbuf->p; - dbuf->p += n; + if (buf_check_size(dbuf, datalen)) { + n = buf_get_data(buf, dbuf->p, datalen); + if (n > 0) { + ret = dbuf->p; + dbuf->p += n; + } } return ret; @@ -636,7 +651,7 @@ v9fs_deserialize_fcall(struct v9fs_session_info *v9ses, u32 msgsize, break; case RWALK: rcall->params.rwalk.nwqid = buf_get_int16(bufp); - rcall->params.rwalk.wqids = buf_alloc(bufp, + rcall->params.rwalk.wqids = buf_alloc(dbufp, rcall->params.rwalk.nwqid * sizeof(struct v9fs_qid)); if (rcall->params.rwalk.wqids) for (i = 0; i < rcall->params.rwalk.nwqid; i++) { diff --git a/fs/9p/fid.c b/fs/9p/fid.c index 821c9c4d76aa..d95f8626d170 100644 --- a/fs/9p/fid.c +++ b/fs/9p/fid.c @@ -71,21 +71,28 @@ static int v9fs_fid_insert(struct v9fs_fid *fid, struct dentry *dentry) * */ -struct v9fs_fid *v9fs_fid_create(struct dentry *dentry) +struct v9fs_fid *v9fs_fid_create(struct dentry *dentry, + struct v9fs_session_info *v9ses, int fid, int create) { struct v9fs_fid *new; + dprintk(DEBUG_9P, "fid create dentry %p, fid %d, create %d\n", + dentry, fid, create); + new = kmalloc(sizeof(struct v9fs_fid), GFP_KERNEL); if (new == NULL) { dprintk(DEBUG_ERROR, "Out of Memory\n"); return ERR_PTR(-ENOMEM); } - new->fid = -1; + new->fid = fid; + new->v9ses = v9ses; new->fidopen = 0; - new->fidcreate = 0; + new->fidcreate = create; new->fidclunked = 0; new->iounit = 0; + new->rdir_pos = 0; + new->rdir_fcall = NULL; if (v9fs_fid_insert(new, dentry) == 0) return new; @@ -108,6 +115,59 @@ void v9fs_fid_destroy(struct v9fs_fid *fid) kfree(fid); } +/** + * v9fs_fid_walk_up - walks from the process current directory + * up to the specified dentry. + */ +static struct v9fs_fid *v9fs_fid_walk_up(struct dentry *dentry) +{ + int fidnum, cfidnum, err; + struct v9fs_fid *cfid; + struct dentry *cde; + struct v9fs_session_info *v9ses; + + v9ses = v9fs_inode2v9ses(current->fs->pwd->d_inode); + cfid = v9fs_fid_lookup(current->fs->pwd); + if (cfid == NULL) { + dprintk(DEBUG_ERROR, "process cwd doesn't have a fid\n"); + return ERR_PTR(-ENOENT); + } + + cfidnum = cfid->fid; + cde = current->fs->pwd; + /* TODO: take advantage of multiwalk */ + + fidnum = v9fs_get_idpool(&v9ses->fidpool); + if (fidnum < 0) { + dprintk(DEBUG_ERROR, "could not get a new fid num\n"); + err = -ENOENT; + goto clunk_fid; + } + + while (cde != dentry) { + if (cde == cde->d_parent) { + dprintk(DEBUG_ERROR, "can't find dentry\n"); + err = -ENOENT; + goto clunk_fid; + } + + err = v9fs_t_walk(v9ses, cfidnum, fidnum, "..", NULL); + if (err < 0) { + dprintk(DEBUG_ERROR, "problem walking to parent\n"); + goto clunk_fid; + } + + cfidnum = fidnum; + cde = cde->d_parent; + } + + return v9fs_fid_create(dentry, v9ses, fidnum, 0); + +clunk_fid: + v9fs_t_clunk(v9ses, fidnum, NULL); + return ERR_PTR(err); +} + /** * v9fs_fid_lookup - retrieve the right fid from a particular dentry * @dentry: dentry to look for fid in @@ -119,49 +179,25 @@ void v9fs_fid_destroy(struct v9fs_fid *fid) * */ -struct v9fs_fid *v9fs_fid_lookup(struct dentry *dentry, int type) +struct v9fs_fid *v9fs_fid_lookup(struct dentry *dentry) { struct list_head *fid_list = (struct list_head *)dentry->d_fsdata; struct v9fs_fid *current_fid = NULL; struct v9fs_fid *temp = NULL; struct v9fs_fid *return_fid = NULL; - int found_parent = 0; - int found_user = 0; - dprintk(DEBUG_9P, " dentry: %s (%p) type %d\n", dentry->d_iname, dentry, - type); + dprintk(DEBUG_9P, " dentry: %s (%p)\n", dentry->d_iname, dentry); - if (fid_list && !list_empty(fid_list)) { + if (fid_list) { list_for_each_entry_safe(current_fid, temp, fid_list, list) { - if (current_fid->uid == current->uid) { - if (return_fid == NULL) { - if ((type == FID_OP) - || (!current_fid->fidopen)) { - return_fid = current_fid; - found_user = 1; - } - } - } - if (current_fid->pid == current->real_parent->pid) { - if ((return_fid == NULL) || (found_parent) - || (found_user)) { - if ((type == FID_OP) - || (!current_fid->fidopen)) { - return_fid = current_fid; - found_parent = 1; - found_user = 0; - } - } - } - if (current_fid->pid == current->pid) { - if ((type == FID_OP) || - (!current_fid->fidopen)) { - return_fid = current_fid; - found_parent = 0; - found_user = 0; - } + if (!current_fid->fidcreate) { + return_fid = current_fid; + break; } } + + if (!return_fid) + return_fid = current_fid; } /* we are at the root but didn't match */ @@ -187,55 +223,33 @@ struct v9fs_fid *v9fs_fid_lookup(struct dentry *dentry, int type) /* XXX - there may be some duplication we can get rid of */ if (par == dentry) { - /* we need to fid_lookup the starting point */ - int fidnum = -1; - int oldfid = -1; - int result = -1; - struct v9fs_session_info *v9ses = - v9fs_inode2v9ses(current->fs->pwd->d_inode); - - current_fid = - v9fs_fid_lookup(current->fs->pwd, FID_WALK); - if (current_fid == NULL) { - dprintk(DEBUG_ERROR, - "process cwd doesn't have a fid\n"); - return return_fid; - } - oldfid = current_fid->fid; - par = current->fs->pwd; - /* TODO: take advantage of multiwalk */ - - fidnum = v9fs_get_idpool(&v9ses->fidpool); - if (fidnum < 0) { - dprintk(DEBUG_ERROR, - "could not get a new fid num\n"); - return return_fid; - } - - while (par != dentry) { - result = - v9fs_t_walk(v9ses, oldfid, fidnum, "..", - NULL); - if (result < 0) { - dprintk(DEBUG_ERROR, - "problem walking to parent\n"); - - break; - } - oldfid = fidnum; - if (par == par->d_parent) { - dprintk(DEBUG_ERROR, - "can't find dentry\n"); - break; - } - par = par->d_parent; - } - if (par == dentry) { - return_fid = v9fs_fid_create(dentry); - return_fid->fid = fidnum; - } + return_fid = v9fs_fid_walk_up(dentry); + if (IS_ERR(return_fid)) + return_fid = NULL; } } return return_fid; } + +struct v9fs_fid *v9fs_fid_get_created(struct dentry *dentry) +{ + struct list_head *fid_list; + struct v9fs_fid *fid, *ftmp, *ret; + + dprintk(DEBUG_9P, " dentry: %s (%p)\n", dentry->d_iname, dentry); + fid_list = (struct list_head *)dentry->d_fsdata; + ret = NULL; + if (fid_list) { + list_for_each_entry_safe(fid, ftmp, fid_list, list) { + if (fid->fidcreate && fid->pid == current->pid) { + list_del(&fid->list); + ret = fid; + break; + } + } + } + + dprintk(DEBUG_9P, "return %p\n", ret); + return ret; +} diff --git a/fs/9p/fid.h b/fs/9p/fid.h index 7db478ccca36..84c673a44c83 100644 --- a/fs/9p/fid.h +++ b/fs/9p/fid.h @@ -25,6 +25,7 @@ #define FID_OP 0 #define FID_WALK 1 +#define FID_CREATE 2 struct v9fs_fid { struct list_head list; /* list of fids associated with a dentry */ @@ -52,6 +53,8 @@ struct v9fs_fid { struct v9fs_session_info *v9ses; /* session info for this FID */ }; -struct v9fs_fid *v9fs_fid_lookup(struct dentry *dentry, int type); +struct v9fs_fid *v9fs_fid_lookup(struct dentry *dentry); +struct v9fs_fid *v9fs_fid_get_created(struct dentry *); void v9fs_fid_destroy(struct v9fs_fid *fid); -struct v9fs_fid *v9fs_fid_create(struct dentry *); +struct v9fs_fid *v9fs_fid_create(struct dentry *, + struct v9fs_session_info *v9ses, int fid, int create); diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index 13bdbbab4387..82303f3bf76f 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -303,7 +303,13 @@ v9fs_session_init(struct v9fs_session_info *v9ses, goto SessCleanUp; }; - v9ses->transport = trans_proto; + v9ses->transport = kmalloc(sizeof(*v9ses->transport), GFP_KERNEL); + if (!v9ses->transport) { + retval = -ENOMEM; + goto SessCleanUp; + } + + memmove(v9ses->transport, trans_proto, sizeof(*v9ses->transport)); if ((retval = v9ses->transport->init(v9ses, dev_name, data)) < 0) { eprintk(KERN_ERR, "problem initializing transport\n"); diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c index 306c96741f81..a6aa947de0f9 100644 --- a/fs/9p/vfs_dentry.c +++ b/fs/9p/vfs_dentry.c @@ -67,7 +67,7 @@ static int v9fs_dentry_validate(struct dentry *dentry, struct nameidata *nd) struct dentry *dc = current->fs->pwd; dprintk(DEBUG_VFS, "dentry: %s (%p)\n", dentry->d_iname, dentry); - if (v9fs_fid_lookup(dentry, FID_OP)) { + if (v9fs_fid_lookup(dentry)) { dprintk(DEBUG_VFS, "VALID\n"); return 1; } diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c index c478a7384186..57a43b8feef5 100644 --- a/fs/9p/vfs_dir.c +++ b/fs/9p/vfs_dir.c @@ -197,21 +197,18 @@ int v9fs_dir_release(struct inode *inode, struct file *filp) filemap_fdatawait(inode->i_mapping); if (fidnum >= 0) { - fid->fidopen--; dprintk(DEBUG_VFS, "fidopen: %d v9f->fid: %d\n", fid->fidopen, fid->fid); - if (fid->fidopen == 0) { - if (v9fs_t_clunk(v9ses, fidnum, NULL)) - dprintk(DEBUG_ERROR, "clunk failed\n"); + if (v9fs_t_clunk(v9ses, fidnum, NULL)) + dprintk(DEBUG_ERROR, "clunk failed\n"); - v9fs_put_idpool(fid->fid, &v9ses->fidpool); - } + v9fs_put_idpool(fid->fid, &v9ses->fidpool); kfree(fid->rdir_fcall); + kfree(fid); filp->private_data = NULL; - v9fs_fid_destroy(fid); } d_drop(filp->f_dentry); diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 1f8ae7d580ab..bbc3cc63854f 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -53,30 +53,36 @@ int v9fs_file_open(struct inode *inode, struct file *file) { struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); - struct v9fs_fid *v9fid = v9fs_fid_lookup(file->f_dentry, FID_WALK); - struct v9fs_fid *v9newfid = NULL; + struct v9fs_fid *v9fid, *fid; struct v9fs_fcall *fcall = NULL; int open_mode = 0; unsigned int iounit = 0; int newfid = -1; long result = -1; - dprintk(DEBUG_VFS, "inode: %p file: %p v9fid= %p\n", inode, file, - v9fid); + dprintk(DEBUG_VFS, "inode: %p file: %p \n", inode, file); + + v9fid = v9fs_fid_get_created(file->f_dentry); + if (!v9fid) + v9fid = v9fs_fid_lookup(file->f_dentry); if (!v9fid) { - struct dentry *dentry = file->f_dentry; dprintk(DEBUG_ERROR, "Couldn't resolve fid from dentry\n"); + return -EBADF; + } - /* XXX - some duplication from lookup, generalize later */ - /* basically vfs_lookup is too heavy weight */ - v9fid = v9fs_fid_lookup(file->f_dentry, FID_OP); - if (!v9fid) - return -EBADF; + if (!v9fid->fidcreate) { + fid = kmalloc(sizeof(struct v9fs_fid), GFP_KERNEL); + if (fid == NULL) { + dprintk(DEBUG_ERROR, "Out of Memory\n"); + return -ENOMEM; + } - v9fid = v9fs_fid_lookup(dentry->d_parent, FID_WALK); - if (!v9fid) - return -EBADF; + fid->fidopen = 0; + fid->fidcreate = 0; + fid->fidclunked = 0; + fid->iounit = 0; + fid->v9ses = v9ses; newfid = v9fs_get_idpool(&v9ses->fidpool); if (newfid < 0) { @@ -85,58 +91,16 @@ int v9fs_file_open(struct inode *inode, struct file *file) } result = - v9fs_t_walk(v9ses, v9fid->fid, newfid, - (char *)file->f_dentry->d_name.name, NULL); + v9fs_t_walk(v9ses, v9fid->fid, newfid, NULL, NULL); + if (result < 0) { v9fs_put_idpool(newfid, &v9ses->fidpool); dprintk(DEBUG_ERROR, "rewalk didn't work\n"); return -EBADF; } - v9fid = v9fs_fid_create(dentry); - if (v9fid == NULL) { - dprintk(DEBUG_ERROR, "couldn't insert\n"); - return -ENOMEM; - } - v9fid->fid = newfid; - } - - if (v9fid->fidcreate) { - /* create case */ - newfid = v9fid->fid; - iounit = v9fid->iounit; - v9fid->fidcreate = 0; - } else { - if (!S_ISDIR(inode->i_mode)) - newfid = v9fid->fid; - else { - newfid = v9fs_get_idpool(&v9ses->fidpool); - if (newfid < 0) { - eprintk(KERN_WARNING, "allocation failed\n"); - return -ENOSPC; - } - /* This would be a somewhat critical clone */ - result = - v9fs_t_walk(v9ses, v9fid->fid, newfid, NULL, - &fcall); - if (result < 0) { - dprintk(DEBUG_ERROR, "clone error: %s\n", - FCALL_ERROR(fcall)); - kfree(fcall); - return result; - } - - v9newfid = v9fs_fid_create(file->f_dentry); - v9newfid->fid = newfid; - v9newfid->qid = v9fid->qid; - v9newfid->iounit = v9fid->iounit; - v9newfid->fidopen = 0; - v9newfid->fidclunked = 0; - v9newfid->v9ses = v9ses; - v9fid = v9newfid; - kfree(fcall); - } - + fid->fid = newfid; + v9fid = fid; /* TODO: do special things for O_EXCL, O_NOFOLLOW, O_SYNC */ /* translate open mode appropriately */ open_mode = file->f_flags & 0x3; @@ -163,9 +127,13 @@ int v9fs_file_open(struct inode *inode, struct file *file) iounit = fcall->params.ropen.iounit; kfree(fcall); + } else { + /* create case */ + newfid = v9fid->fid; + iounit = v9fid->iounit; + v9fid->fidcreate = 0; } - file->private_data = v9fid; v9fid->rdir_pos = 0; @@ -207,16 +175,16 @@ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl) } /** - * v9fs_read - read from a file (internal) + * v9fs_file_read - read from a file * @filep: file pointer to read * @data: data buffer to read data into * @count: size of buffer * @offset: offset at which to read data * */ - static ssize_t -v9fs_read(struct file *filp, char *buffer, size_t count, loff_t * offset) +v9fs_file_read(struct file *filp, char __user * data, size_t count, + loff_t * offset) { struct inode *inode = filp->f_dentry->d_inode; struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); @@ -226,6 +194,7 @@ v9fs_read(struct file *filp, char *buffer, size_t count, loff_t * offset) int rsize = 0; int result = 0; int total = 0; + int n; dprintk(DEBUG_VFS, "\n"); @@ -248,10 +217,15 @@ v9fs_read(struct file *filp, char *buffer, size_t count, loff_t * offset) } else *offset += result; - /* XXX - extra copy */ - memcpy(buffer, fcall->params.rread.data, result); + n = copy_to_user(data, fcall->params.rread.data, result); + if (n) { + dprintk(DEBUG_ERROR, "Problem copying to user %d\n", n); + kfree(fcall); + return -EFAULT; + } + count -= result; - buffer += result; + data += result; total += result; kfree(fcall); @@ -263,101 +237,6 @@ v9fs_read(struct file *filp, char *buffer, size_t count, loff_t * offset) return total; } -/** - * v9fs_file_read - read from a file - * @filep: file pointer to read - * @data: data buffer to read data into - * @count: size of buffer - * @offset: offset at which to read data - * - */ - -static ssize_t -v9fs_file_read(struct file *filp, char __user * data, size_t count, - loff_t * offset) -{ - int retval = -1; - int ret = 0; - char *buffer; - - buffer = kmalloc(count, GFP_KERNEL); - if (!buffer) - return -ENOMEM; - - retval = v9fs_read(filp, buffer, count, offset); - if (retval > 0) { - if ((ret = copy_to_user(data, buffer, retval)) != 0) { - dprintk(DEBUG_ERROR, "Problem copying to user %d\n", - ret); - retval = ret; - } - } - - kfree(buffer); - - return retval; -} - -/** - * v9fs_write - write to a file - * @filep: file pointer to write - * @data: data buffer to write data from - * @count: size of buffer - * @offset: offset at which to write data - * - */ - -static ssize_t -v9fs_write(struct file *filp, char *buffer, size_t count, loff_t * offset) -{ - struct inode *inode = filp->f_dentry->d_inode; - struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); - struct v9fs_fid *v9fid = filp->private_data; - struct v9fs_fcall *fcall; - int fid = v9fid->fid; - int result = -EIO; - int rsize = 0; - int total = 0; - - dprintk(DEBUG_VFS, "data %p count %d offset %x\n", buffer, (int)count, - (int)*offset); - rsize = v9ses->maxdata - V9FS_IOHDRSZ; - if (v9fid->iounit != 0 && rsize > v9fid->iounit) - rsize = v9fid->iounit; - - dump_data(buffer, count); - - do { - if (count < rsize) - rsize = count; - - result = - v9fs_t_write(v9ses, fid, *offset, rsize, buffer, &fcall); - if (result < 0) { - eprintk(KERN_ERR, "error while writing: %s(%d)\n", - FCALL_ERROR(fcall), result); - kfree(fcall); - return result; - } else - *offset += result; - - kfree(fcall); - - if (result != rsize) { - eprintk(KERN_ERR, - "short write: v9fs_t_write returned %d\n", - result); - break; - } - - count -= result; - buffer += result; - total += result; - } while (count); - - return total; -} - /** * v9fs_file_write - write to a file * @filep: file pointer to write @@ -371,24 +250,65 @@ static ssize_t v9fs_file_write(struct file *filp, const char __user * data, size_t count, loff_t * offset) { - int ret = -1; - char *buffer; + struct inode *inode = filp->f_dentry->d_inode; + struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); + struct v9fs_fid *v9fid = filp->private_data; + struct v9fs_fcall *fcall; + int fid = v9fid->fid; + int result = -EIO; + int rsize = 0; + int total = 0; + char *buf; - buffer = kmalloc(count, GFP_KERNEL); - if (buffer == NULL) + dprintk(DEBUG_VFS, "data %p count %d offset %x\n", data, (int)count, + (int)*offset); + rsize = v9ses->maxdata - V9FS_IOHDRSZ; + if (v9fid->iounit != 0 && rsize > v9fid->iounit) + rsize = v9fid->iounit; + + buf = kmalloc(v9ses->maxdata - V9FS_IOHDRSZ, GFP_KERNEL); + if (!buf) return -ENOMEM; - ret = copy_from_user(buffer, data, count); - if (ret) { - dprintk(DEBUG_ERROR, "Problem copying from user\n"); - ret = -EFAULT; - } else { - ret = v9fs_write(filp, buffer, count, offset); - } + do { + if (count < rsize) + rsize = count; - kfree(buffer); + result = copy_from_user(buf, data, rsize); + if (result) { + dprintk(DEBUG_ERROR, "Problem copying from user\n"); + kfree(buf); + return -EFAULT; + } - return ret; + dump_data(buf, rsize); + result = v9fs_t_write(v9ses, fid, *offset, rsize, buf, &fcall); + if (result < 0) { + eprintk(KERN_ERR, "error while writing: %s(%d)\n", + FCALL_ERROR(fcall), result); + kfree(fcall); + kfree(buf); + return result; + } else + *offset += result; + + kfree(fcall); + fcall = NULL; + + if (result != rsize) { + eprintk(KERN_ERR, + "short write: v9fs_t_write returned %d\n", + result); + break; + } + + count -= result; + data += result; + total += result; + } while (count); + + kfree(buf); + return total; } struct file_operations v9fs_file_operations = { diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 0c13fc600049..2b696ae6655a 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -307,7 +307,7 @@ v9fs_create(struct inode *dir, struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir); struct super_block *sb = dir->i_sb; struct v9fs_fid *dirfid = - v9fs_fid_lookup(file_dentry->d_parent, FID_WALK); + v9fs_fid_lookup(file_dentry->d_parent); struct v9fs_fid *fid = NULL; struct inode *file_inode = NULL; struct v9fs_fcall *fcall = NULL; @@ -317,6 +317,7 @@ v9fs_create(struct inode *dir, long newfid = -1; int result = 0; unsigned int iounit = 0; + int wfidno = -1; perm = unixmode2p9mode(v9ses, perm); @@ -350,7 +351,7 @@ v9fs_create(struct inode *dir, if (result < 0) { dprintk(DEBUG_ERROR, "clone error: %s\n", FCALL_ERROR(fcall)); v9fs_put_idpool(newfid, &v9ses->fidpool); - newfid = 0; + newfid = -1; goto CleanUpFid; } @@ -369,20 +370,39 @@ v9fs_create(struct inode *dir, qid = fcall->params.rcreate.qid; kfree(fcall); - fid = v9fs_fid_create(file_dentry); + fid = v9fs_fid_create(file_dentry, v9ses, newfid, 1); + dprintk(DEBUG_VFS, "fid %p %d\n", fid, fid->fidcreate); if (!fid) { result = -ENOMEM; goto CleanUpFid; } - fid->fid = newfid; - fid->fidopen = 0; - fid->fidcreate = 1; fid->qid = qid; fid->iounit = iounit; - fid->rdir_pos = 0; - fid->rdir_fcall = NULL; - fid->v9ses = v9ses; + + /* walk to the newly created file and put the fid in the dentry */ + wfidno = v9fs_get_idpool(&v9ses->fidpool); + if (newfid < 0) { + eprintk(KERN_WARNING, "no free fids available\n"); + return -ENOSPC; + } + + result = v9fs_t_walk(v9ses, dirfidnum, wfidno, + (char *) file_dentry->d_name.name, NULL); + if (result < 0) { + dprintk(DEBUG_ERROR, "clone error: %s\n", FCALL_ERROR(fcall)); + v9fs_put_idpool(wfidno, &v9ses->fidpool); + wfidno = -1; + goto CleanUpFid; + } + + if (!v9fs_fid_create(file_dentry, v9ses, wfidno, 0)) { + if (!v9fs_t_clunk(v9ses, newfid, &fcall)) { + v9fs_put_idpool(wfidno, &v9ses->fidpool); + } + + goto CleanUpFid; + } if ((perm & V9FS_DMSYMLINK) || (perm & V9FS_DMLINK) || (perm & V9FS_DMNAMEDPIPE) || (perm & V9FS_DMSOCKET) || @@ -410,11 +430,11 @@ v9fs_create(struct inode *dir, d_instantiate(file_dentry, file_inode); if (perm & V9FS_DMDIR) { - if (v9fs_t_clunk(v9ses, newfid, &fcall)) + if (!v9fs_t_clunk(v9ses, newfid, &fcall)) + v9fs_put_idpool(newfid, &v9ses->fidpool); + else dprintk(DEBUG_ERROR, "clunk for mkdir failed: %s\n", FCALL_ERROR(fcall)); - - v9fs_put_idpool(newfid, &v9ses->fidpool); kfree(fcall); fid->fidopen = 0; fid->fidcreate = 0; @@ -426,12 +446,22 @@ v9fs_create(struct inode *dir, CleanUpFid: kfree(fcall); - if (newfid) { - if (v9fs_t_clunk(v9ses, newfid, &fcall)) + if (newfid >= 0) { + if (!v9fs_t_clunk(v9ses, newfid, &fcall)) + v9fs_put_idpool(newfid, &v9ses->fidpool); + else + dprintk(DEBUG_ERROR, "clunk failed: %s\n", + FCALL_ERROR(fcall)); + + kfree(fcall); + } + if (wfidno >= 0) { + if (!v9fs_t_clunk(v9ses, wfidno, &fcall)) + v9fs_put_idpool(wfidno, &v9ses->fidpool); + else dprintk(DEBUG_ERROR, "clunk failed: %s\n", FCALL_ERROR(fcall)); - v9fs_put_idpool(newfid, &v9ses->fidpool); kfree(fcall); } return result; @@ -461,7 +491,7 @@ static int v9fs_remove(struct inode *dir, struct dentry *file, int rmdir) file_inode = file->d_inode; sb = file_inode->i_sb; v9ses = v9fs_inode2v9ses(file_inode); - v9fid = v9fs_fid_lookup(file, FID_OP); + v9fid = v9fs_fid_lookup(file); if (!v9fid) { dprintk(DEBUG_ERROR, @@ -545,7 +575,7 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, sb = dir->i_sb; v9ses = v9fs_inode2v9ses(dir); - dirfid = v9fs_fid_lookup(dentry->d_parent, FID_WALK); + dirfid = v9fs_fid_lookup(dentry->d_parent); if (!dirfid) { dprintk(DEBUG_ERROR, "no dirfid\n"); @@ -573,7 +603,7 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, v9fs_put_idpool(newfid, &v9ses->fidpool); if (result == -ENOENT) { d_add(dentry, NULL); - dprintk(DEBUG_ERROR, + dprintk(DEBUG_VFS, "Return negative dentry %p count %d\n", dentry, atomic_read(&dentry->d_count)); return NULL; @@ -601,16 +631,13 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, inode->i_ino = v9fs_qid2ino(&fcall->params.rstat.stat->qid); - fid = v9fs_fid_create(dentry); + fid = v9fs_fid_create(dentry, v9ses, newfid, 0); if (fid == NULL) { dprintk(DEBUG_ERROR, "couldn't insert\n"); result = -ENOMEM; goto FreeFcall; } - fid->fid = newfid; - fid->fidopen = 0; - fid->v9ses = v9ses; fid->qid = fcall->params.rstat.stat->qid; dentry->d_op = &v9fs_dentry_operations; @@ -665,11 +692,11 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry, { struct inode *old_inode = old_dentry->d_inode; struct v9fs_session_info *v9ses = v9fs_inode2v9ses(old_inode); - struct v9fs_fid *oldfid = v9fs_fid_lookup(old_dentry, FID_WALK); + struct v9fs_fid *oldfid = v9fs_fid_lookup(old_dentry); struct v9fs_fid *olddirfid = - v9fs_fid_lookup(old_dentry->d_parent, FID_WALK); + v9fs_fid_lookup(old_dentry->d_parent); struct v9fs_fid *newdirfid = - v9fs_fid_lookup(new_dentry->d_parent, FID_WALK); + v9fs_fid_lookup(new_dentry->d_parent); struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL); struct v9fs_fcall *fcall = NULL; int fid = -1; @@ -744,7 +771,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, { struct v9fs_fcall *fcall = NULL; struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dentry->d_inode); - struct v9fs_fid *fid = v9fs_fid_lookup(dentry, FID_OP); + struct v9fs_fid *fid = v9fs_fid_lookup(dentry); int err = -EPERM; dprintk(DEBUG_VFS, "dentry: %p\n", dentry); @@ -778,7 +805,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr) { struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dentry->d_inode); - struct v9fs_fid *fid = v9fs_fid_lookup(dentry, FID_OP); + struct v9fs_fid *fid = v9fs_fid_lookup(dentry); struct v9fs_fcall *fcall = NULL; struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL); int res = -EPERM; @@ -960,7 +987,7 @@ v9fs_vfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) if (retval != 0) goto FreeFcall; - newfid = v9fs_fid_lookup(dentry, FID_OP); + newfid = v9fs_fid_lookup(dentry); /* issue a twstat */ v9fs_blank_mistat(v9ses, mistat); @@ -1004,7 +1031,7 @@ static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen) struct v9fs_fcall *fcall = NULL; struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dentry->d_inode); - struct v9fs_fid *fid = v9fs_fid_lookup(dentry, FID_OP); + struct v9fs_fid *fid = v9fs_fid_lookup(dentry); if (!fid) { dprintk(DEBUG_ERROR, "could not resolve fid from dentry\n"); @@ -1063,8 +1090,8 @@ static int v9fs_vfs_readlink(struct dentry *dentry, char __user * buffer, int ret; char *link = __getname(); - if (strlen(link) < buflen) - buflen = strlen(link); + if (buflen > PATH_MAX) + buflen = PATH_MAX; dprintk(DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_iname, dentry); @@ -1148,7 +1175,7 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir, struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir); struct v9fs_fcall *fcall = NULL; struct v9fs_stat *mistat = kmalloc(v9ses->maxdata, GFP_KERNEL); - struct v9fs_fid *oldfid = v9fs_fid_lookup(old_dentry, FID_OP); + struct v9fs_fid *oldfid = v9fs_fid_lookup(old_dentry); struct v9fs_fid *newfid = NULL; char *symname = __getname(); @@ -1168,7 +1195,7 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir, if (retval != 0) goto FreeMem; - newfid = v9fs_fid_lookup(dentry, FID_OP); + newfid = v9fs_fid_lookup(dentry); if (!newfid) { dprintk(DEBUG_ERROR, "couldn't resolve fid from dentry\n"); goto FreeMem; @@ -1246,7 +1273,7 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) if (retval != 0) goto FreeMem; - newfid = v9fs_fid_lookup(dentry, FID_OP); + newfid = v9fs_fid_lookup(dentry); if (!newfid) { dprintk(DEBUG_ERROR, "coudn't resove fid from dentry\n"); retval = -EINVAL; diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index 868f350b2c5f..82c5b0084079 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -129,8 +129,7 @@ static struct super_block *v9fs_get_sb(struct file_system_type if ((newfid = v9fs_session_init(v9ses, dev_name, data)) < 0) { dprintk(DEBUG_ERROR, "problem initiating session\n"); - retval = newfid; - goto free_session; + return ERR_PTR(newfid); } sb = sget(fs_type, NULL, v9fs_set_super, v9ses); @@ -150,28 +149,24 @@ static struct super_block *v9fs_get_sb(struct file_system_type if (!root) { retval = -ENOMEM; - goto release_inode; + goto put_back_sb; } sb->s_root = root; - /* Setup the Root Inode */ - root_fid = v9fs_fid_create(root); - if (root_fid == NULL) { - retval = -ENOMEM; - goto release_dentry; - } - - root_fid->fidopen = 0; - root_fid->v9ses = v9ses; - stat_result = v9fs_t_stat(v9ses, newfid, &fcall); if (stat_result < 0) { dprintk(DEBUG_ERROR, "stat error\n"); v9fs_t_clunk(v9ses, newfid, NULL); v9fs_put_idpool(newfid, &v9ses->fidpool); } else { - root_fid->fid = newfid; + /* Setup the Root Inode */ + root_fid = v9fs_fid_create(root, v9ses, newfid, 0); + if (root_fid == NULL) { + retval = -ENOMEM; + goto put_back_sb; + } + root_fid->qid = fcall->params.rstat.stat->qid; root->d_inode->i_ino = v9fs_qid2ino(&fcall->params.rstat.stat->qid); @@ -182,25 +177,15 @@ static struct super_block *v9fs_get_sb(struct file_system_type if (stat_result < 0) { retval = stat_result; - goto release_dentry; + goto put_back_sb; } return sb; - release_dentry: - dput(sb->s_root); - - release_inode: - iput(inode); - - put_back_sb: +put_back_sb: + /* deactivate_super calls v9fs_kill_super which will frees the rest */ up_write(&sb->s_umount); deactivate_super(sb); - v9fs_session_close(v9ses); - - free_session: - kfree(v9ses); - return ERR_PTR(retval); } diff --git a/fs/Kconfig b/fs/Kconfig index 068ccea2f184..48f5422cb19a 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -472,6 +472,9 @@ config FUSE_FS utilities is available from the FUSE homepage: + See for more information. + See for needed library/utility version. + If you want to develop a userspace FS, or if you want to use a filesystem based on FUSE, answer Y or M. diff --git a/fs/afs/file.c b/fs/afs/file.c index 23c125128024..0d576987ec67 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -29,7 +29,7 @@ static int afs_file_release(struct inode *inode, struct file *file); static int afs_file_readpage(struct file *file, struct page *page); static int afs_file_invalidatepage(struct page *page, unsigned long offset); -static int afs_file_releasepage(struct page *page, int gfp_flags); +static int afs_file_releasepage(struct page *page, gfp_t gfp_flags); static ssize_t afs_file_write(struct file *file, const char __user *buf, size_t size, loff_t *off); @@ -279,7 +279,7 @@ static int afs_file_invalidatepage(struct page *page, unsigned long offset) /* * release a page and cleanup its private data */ -static int afs_file_releasepage(struct page *page, int gfp_flags) +static int afs_file_releasepage(struct page *page, gfp_t gfp_flags) { struct cachefs_page *pageio; diff --git a/fs/aio.c b/fs/aio.c index 0e11e31dbb77..edfca5b75535 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -398,7 +398,7 @@ static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx) if (unlikely(!req)) return NULL; - req->ki_flags = 1 << KIF_LOCKED; + req->ki_flags = 0; req->ki_users = 2; req->ki_key = 0; req->ki_ctx = ctx; @@ -547,25 +547,6 @@ struct kioctx *lookup_ioctx(unsigned long ctx_id) return ioctx; } -static int lock_kiocb_action(void *param) -{ - schedule(); - return 0; -} - -static inline void lock_kiocb(struct kiocb *iocb) -{ - wait_on_bit_lock(&iocb->ki_flags, KIF_LOCKED, lock_kiocb_action, - TASK_UNINTERRUPTIBLE); -} - -static inline void unlock_kiocb(struct kiocb *iocb) -{ - kiocbClearLocked(iocb); - smp_mb__after_clear_bit(); - wake_up_bit(&iocb->ki_flags, KIF_LOCKED); -} - /* * use_mm * Makes the calling kernel thread take on the specified @@ -741,19 +722,9 @@ static ssize_t aio_run_iocb(struct kiocb *iocb) ret = retry(iocb); current->io_wait = NULL; - if (-EIOCBRETRY != ret) { - if (-EIOCBQUEUED != ret) { - BUG_ON(!list_empty(&iocb->ki_wait.task_list)); - aio_complete(iocb, ret, 0); - /* must not access the iocb after this */ - } - } else { - /* - * Issue an additional retry to avoid waiting forever if - * no waits were queued (e.g. in case of a short read). - */ - if (list_empty(&iocb->ki_wait.task_list)) - kiocbSetKicked(iocb); + if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) { + BUG_ON(!list_empty(&iocb->ki_wait.task_list)); + aio_complete(iocb, ret, 0); } out: spin_lock_irq(&ctx->ctx_lock); @@ -806,9 +777,7 @@ static int __aio_run_iocbs(struct kioctx *ctx) * Hold an extra reference while retrying i/o. */ iocb->ki_users++; /* grab extra reference */ - lock_kiocb(iocb); aio_run_iocb(iocb); - unlock_kiocb(iocb); if (__aio_put_req(ctx, iocb)) /* drop extra ref */ put_ioctx(ctx); } @@ -899,16 +868,24 @@ static void aio_kick_handler(void *data) * and if required activate the aio work queue to process * it */ -static void queue_kicked_iocb(struct kiocb *iocb) +static void try_queue_kicked_iocb(struct kiocb *iocb) { struct kioctx *ctx = iocb->ki_ctx; unsigned long flags; int run = 0; - WARN_ON((!list_empty(&iocb->ki_wait.task_list))); + /* We're supposed to be the only path putting the iocb back on the run + * list. If we find that the iocb is *back* on a wait queue already + * than retry has happened before we could queue the iocb. This also + * means that the retry could have completed and freed our iocb, no + * good. */ + BUG_ON((!list_empty(&iocb->ki_wait.task_list))); spin_lock_irqsave(&ctx->ctx_lock, flags); - run = __queue_kicked_iocb(iocb); + /* set this inside the lock so that we can't race with aio_run_iocb() + * testing it and putting the iocb on the run list under the lock */ + if (!kiocbTryKick(iocb)) + run = __queue_kicked_iocb(iocb); spin_unlock_irqrestore(&ctx->ctx_lock, flags); if (run) aio_queue_work(ctx); @@ -931,10 +908,7 @@ void fastcall kick_iocb(struct kiocb *iocb) return; } - /* If its already kicked we shouldn't queue it again */ - if (!kiocbTryKick(iocb)) { - queue_kicked_iocb(iocb); - } + try_queue_kicked_iocb(iocb); } EXPORT_SYMBOL(kick_iocb); @@ -1322,8 +1296,11 @@ asmlinkage long sys_io_destroy(aio_context_t ctx) } /* - * Default retry method for aio_read (also used for first time submit) - * Responsible for updating iocb state as retries progress + * aio_p{read,write} are the default ki_retry methods for + * IO_CMD_P{READ,WRITE}. They maintains kiocb retry state around potentially + * multiple calls to f_op->aio_read(). They loop around partial progress + * instead of returning -EIOCBRETRY because they don't have the means to call + * kick_iocb(). */ static ssize_t aio_pread(struct kiocb *iocb) { @@ -1332,25 +1309,25 @@ static ssize_t aio_pread(struct kiocb *iocb) struct inode *inode = mapping->host; ssize_t ret = 0; - ret = file->f_op->aio_read(iocb, iocb->ki_buf, - iocb->ki_left, iocb->ki_pos); - - /* - * Can't just depend on iocb->ki_left to determine - * whether we are done. This may have been a short read. - */ - if (ret > 0) { - iocb->ki_buf += ret; - iocb->ki_left -= ret; + do { + ret = file->f_op->aio_read(iocb, iocb->ki_buf, + iocb->ki_left, iocb->ki_pos); /* - * For pipes and sockets we return once we have - * some data; for regular files we retry till we - * complete the entire read or find that we can't - * read any more data (e.g short reads). + * Can't just depend on iocb->ki_left to determine + * whether we are done. This may have been a short read. */ - if (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)) - ret = -EIOCBRETRY; - } + if (ret > 0) { + iocb->ki_buf += ret; + iocb->ki_left -= ret; + } + + /* + * For pipes and sockets we return once we have some data; for + * regular files we retry till we complete the entire read or + * find that we can't read any more data (e.g short reads). + */ + } while (ret > 0 && iocb->ki_left > 0 && + !S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)); /* This means we must have transferred all that we could */ /* No need to retry anymore */ @@ -1360,27 +1337,21 @@ static ssize_t aio_pread(struct kiocb *iocb) return ret; } -/* - * Default retry method for aio_write (also used for first time submit) - * Responsible for updating iocb state as retries progress - */ +/* see aio_pread() */ static ssize_t aio_pwrite(struct kiocb *iocb) { struct file *file = iocb->ki_filp; ssize_t ret = 0; - ret = file->f_op->aio_write(iocb, iocb->ki_buf, - iocb->ki_left, iocb->ki_pos); + do { + ret = file->f_op->aio_write(iocb, iocb->ki_buf, + iocb->ki_left, iocb->ki_pos); + if (ret > 0) { + iocb->ki_buf += ret; + iocb->ki_left -= ret; + } + } while (ret > 0 && iocb->ki_left > 0); - if (ret > 0) { - iocb->ki_buf += ret; - iocb->ki_left -= ret; - - ret = -EIOCBRETRY; - } - - /* This means we must have transferred all that we could */ - /* No need to retry anymore */ if ((ret == 0) || (iocb->ki_left == 0)) ret = iocb->ki_nbytes - iocb->ki_left; @@ -1426,6 +1397,9 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb) if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf, kiocb->ki_left))) break; + ret = security_file_permission(file, MAY_READ); + if (unlikely(ret)) + break; ret = -EINVAL; if (file->f_op->aio_read) kiocb->ki_retry = aio_pread; @@ -1438,6 +1412,9 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb) if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf, kiocb->ki_left))) break; + ret = security_file_permission(file, MAY_WRITE); + if (unlikely(ret)) + break; ret = -EINVAL; if (file->f_op->aio_write) kiocb->ki_retry = aio_pwrite; @@ -1550,7 +1527,6 @@ int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, spin_lock_irq(&ctx->ctx_lock); aio_run_iocb(req); - unlock_kiocb(req); if (!list_empty(&ctx->run_list)) { /* drain the run list */ while (__aio_run_iocbs(ctx)) @@ -1682,7 +1658,6 @@ asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb, if (NULL != cancel) { struct io_event tmp; pr_debug("calling cancel\n"); - lock_kiocb(kiocb); memset(&tmp, 0, sizeof(tmp)); tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user; tmp.data = kiocb->ki_user_data; @@ -1694,7 +1669,6 @@ asmlinkage long sys_io_cancel(aio_context_t ctx_id, struct iocb __user *iocb, if (copy_to_user(result, &tmp, sizeof(tmp))) ret = -EFAULT; } - unlock_kiocb(kiocb); } else ret = -EINVAL; diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c index e240c335eb23..5af928fa0449 100644 --- a/fs/bfs/dir.c +++ b/fs/bfs/dir.c @@ -108,7 +108,7 @@ static int bfs_create(struct inode * dir, struct dentry * dentry, int mode, inode->i_mapping->a_ops = &bfs_aops; inode->i_mode = mode; inode->i_ino = ino; - BFS_I(inode)->i_dsk_ino = cpu_to_le16(ino); + BFS_I(inode)->i_dsk_ino = ino; BFS_I(inode)->i_sblock = 0; BFS_I(inode)->i_eblock = 0; insert_inode_hash(inode); diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c index c7b39aa279d7..3af6c73c5b5a 100644 --- a/fs/bfs/inode.c +++ b/fs/bfs/inode.c @@ -357,28 +357,46 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) } info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1)>>BFS_BSIZE_BITS; /* for statfs(2) */ - info->si_freeb = (le32_to_cpu(bfs_sb->s_end) + 1 - cpu_to_le32(bfs_sb->s_start))>>BFS_BSIZE_BITS; + info->si_freeb = (le32_to_cpu(bfs_sb->s_end) + 1 - le32_to_cpu(bfs_sb->s_start))>>BFS_BSIZE_BITS; info->si_freei = 0; info->si_lf_eblk = 0; info->si_lf_sblk = 0; info->si_lf_ioff = 0; + bh = NULL; for (i=BFS_ROOT_INO; i<=info->si_lasti; i++) { - inode = iget(s,i); - if (BFS_I(inode)->i_dsk_ino == 0) - info->si_freei++; - else { - set_bit(i, info->si_imap); - info->si_freeb -= inode->i_blocks; - if (BFS_I(inode)->i_eblock > info->si_lf_eblk) { - info->si_lf_eblk = BFS_I(inode)->i_eblock; - info->si_lf_sblk = BFS_I(inode)->i_sblock; - info->si_lf_ioff = BFS_INO2OFF(i); - } + struct bfs_inode *di; + int block = (i - BFS_ROOT_INO)/BFS_INODES_PER_BLOCK + 1; + int off = (i - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK; + unsigned long sblock, eblock; + + if (!off) { + brelse(bh); + bh = sb_bread(s, block); + } + + if (!bh) + continue; + + di = (struct bfs_inode *)bh->b_data + off; + + if (!di->i_ino) { + info->si_freei++; + continue; + } + set_bit(i, info->si_imap); + info->si_freeb -= BFS_FILEBLOCKS(di); + + sblock = le32_to_cpu(di->i_sblock); + eblock = le32_to_cpu(di->i_eblock); + if (eblock > info->si_lf_eblk) { + info->si_lf_eblk = eblock; + info->si_lf_sblk = sblock; + info->si_lf_ioff = BFS_INO2OFF(i); } - iput(inode); } + brelse(bh); if (!(s->s_flags & MS_RDONLY)) { - mark_buffer_dirty(bh); + mark_buffer_dirty(info->si_sbh); s->s_dirt = 1; } dump_imap("read_super", s); diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 7976a238f0a3..d4b15576e584 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -905,7 +905,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) send_sig(SIGKILL, current, 0); goto out_free_dentry; } - if (padzero(elf_bss)) { + if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) { send_sig(SIGSEGV, current, 0); retval = -EFAULT; /* Nobody gets to see this, but.. */ goto out_free_dentry; diff --git a/fs/bio.c b/fs/bio.c index 83a349574567..460554b07ff9 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -75,7 +75,7 @@ struct bio_set { */ static struct bio_set *fs_bio_set; -static inline struct bio_vec *bvec_alloc_bs(unsigned int __nocast gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) +static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) { struct bio_vec *bvl; struct biovec_slab *bp; @@ -155,7 +155,7 @@ inline void bio_init(struct bio *bio) * allocate bio and iovecs from the memory pools specified by the * bio_set structure. **/ -struct bio *bio_alloc_bioset(unsigned int __nocast gfp_mask, int nr_iovecs, struct bio_set *bs) +struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) { struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask); @@ -181,7 +181,7 @@ out: return bio; } -struct bio *bio_alloc(unsigned int __nocast gfp_mask, int nr_iovecs) +struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) { struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); @@ -277,7 +277,7 @@ inline void __bio_clone(struct bio *bio, struct bio *bio_src) * * Like __bio_clone, only also allocates the returned bio */ -struct bio *bio_clone(struct bio *bio, unsigned int __nocast gfp_mask) +struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) { struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set); @@ -778,7 +778,7 @@ static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err) static struct bio *__bio_map_kern(request_queue_t *q, void *data, - unsigned int len, unsigned int gfp_mask) + unsigned int len, gfp_t gfp_mask) { unsigned long kaddr = (unsigned long)data; unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -825,7 +825,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data, * device. Returns an error pointer in case of error. */ struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len, - unsigned int gfp_mask) + gfp_t gfp_mask) { struct bio *bio; @@ -1078,7 +1078,7 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) return bp; } -static void *bio_pair_alloc(unsigned int __nocast gfp_flags, void *data) +static void *bio_pair_alloc(gfp_t gfp_flags, void *data) { return kmalloc(sizeof(struct bio_pair), gfp_flags); } diff --git a/fs/buffer.c b/fs/buffer.c index 6cbfceabd95d..b1667986442f 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -502,7 +502,7 @@ static void free_more_memory(void) yield(); for_each_pgdat(pgdat) { - zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones; + zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones; if (*zones) try_to_free_pages(zones, GFP_NOFS); } @@ -1571,7 +1571,7 @@ static inline void discard_buffer(struct buffer_head * bh) * * NOTE: @gfp_mask may go away, and this function may become non-blocking. */ -int try_to_release_page(struct page *page, int gfp_mask) +int try_to_release_page(struct page *page, gfp_t gfp_mask) { struct address_space * const mapping = page->mapping; @@ -3045,7 +3045,7 @@ static void recalc_bh_state(void) buffer_heads_over_limit = (tot > max_buffer_heads); } -struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags) +struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) { struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags); if (ret) { diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 8cc23e7d0d5d..1ebf7dafc1d7 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -781,6 +781,8 @@ static int cifs_oplock_thread(void * dummyarg) oplockThread = current; do { + if (try_to_freeze()) + continue; set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(1*HZ); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 2335f14a1583..47360156cc54 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -344,6 +344,8 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) } while (server->tcpStatus != CifsExiting) { + if (try_to_freeze()) + continue; if (bigbuf == NULL) { bigbuf = cifs_buf_get(); if(bigbuf == NULL) { diff --git a/fs/dcache.c b/fs/dcache.c index fb10386c59be..e90512ed35a4 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -689,7 +689,7 @@ void shrink_dcache_anon(struct hlist_head *head) * * In this case we return -1 to tell the caller that we baled. */ -static int shrink_dcache_memory(int nr, unsigned int gfp_mask) +static int shrink_dcache_memory(int nr, gfp_t gfp_mask) { if (nr) { if (!(gfp_mask & __GFP_FS)) diff --git a/fs/dquot.c b/fs/dquot.c index b9732335bcdc..05f3327d64a3 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -500,7 +500,7 @@ static void prune_dqcache(int count) * more memory */ -static int shrink_dqcache_memory(int nr, unsigned int gfp_mask) +static int shrink_dqcache_memory(int nr, gfp_t gfp_mask) { if (nr) { spin_lock(&dq_list_lock); diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 403b90a1213d..4284cd31eba6 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -101,6 +101,10 @@ /* Maximum number of poll wake up nests we are allowing */ #define EP_MAX_POLLWAKE_NESTS 4 +/* Maximum msec timeout value storeable in a long int */ +#define EP_MAX_MSTIMEO min(1000ULL * MAX_SCHEDULE_TIMEOUT / HZ, (LONG_MAX - 999ULL) / HZ) + + struct epoll_filefd { struct file *file; int fd; @@ -1506,8 +1510,8 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, * and the overflow condition. The passed timeout is in milliseconds, * that why (t * HZ) / 1000. */ - jtimeout = timeout == -1 || timeout > (MAX_SCHEDULE_TIMEOUT - 1000) / HZ ? - MAX_SCHEDULE_TIMEOUT: (timeout * HZ + 999) / 1000; + jtimeout = (timeout < 0 || timeout >= EP_MAX_MSTIMEO) ? + MAX_SCHEDULE_TIMEOUT : (timeout * HZ + 999) / 1000; retry: write_lock_irqsave(&ep->lock, flags); diff --git a/fs/exec.c b/fs/exec.c index a04a575ad433..d2208f7c87db 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -126,8 +126,7 @@ asmlinkage long sys_uselib(const char __user * library) struct nameidata nd; int error; - nd.intent.open.flags = FMODE_READ; - error = __user_walk(library, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd); + error = __user_path_lookup_open(library, LOOKUP_FOLLOW, &nd, FMODE_READ); if (error) goto out; @@ -139,7 +138,7 @@ asmlinkage long sys_uselib(const char __user * library) if (error) goto exit; - file = dentry_open(nd.dentry, nd.mnt, O_RDONLY); + file = nameidata_to_filp(&nd, O_RDONLY); error = PTR_ERR(file); if (IS_ERR(file)) goto out; @@ -167,6 +166,7 @@ asmlinkage long sys_uselib(const char __user * library) out: return error; exit: + release_open_intent(&nd); path_release(&nd); goto out; } @@ -490,8 +490,7 @@ struct file *open_exec(const char *name) int err; struct file *file; - nd.intent.open.flags = FMODE_READ; - err = path_lookup(name, LOOKUP_FOLLOW|LOOKUP_OPEN, &nd); + err = path_lookup_open(name, LOOKUP_FOLLOW, &nd, FMODE_READ); file = ERR_PTR(err); if (!err) { @@ -504,7 +503,7 @@ struct file *open_exec(const char *name) err = -EACCES; file = ERR_PTR(err); if (!err) { - file = dentry_open(nd.dentry, nd.mnt, O_RDONLY); + file = nameidata_to_filp(&nd, O_RDONLY); if (!IS_ERR(file)) { err = deny_write_access(file); if (err) { @@ -516,6 +515,7 @@ out: return file; } } + release_open_intent(&nd); path_release(&nd); } goto out; diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c index c8d07030c897..e2d6208633a7 100644 --- a/fs/ext2/ialloc.c +++ b/fs/ext2/ialloc.c @@ -605,27 +605,28 @@ got: insert_inode_hash(inode); if (DQUOT_ALLOC_INODE(inode)) { - DQUOT_DROP(inode); err = -ENOSPC; - goto fail2; + goto fail_drop; } + err = ext2_init_acl(inode, dir); - if (err) { - DQUOT_FREE_INODE(inode); - DQUOT_DROP(inode); - goto fail2; - } + if (err) + goto fail_free_drop; + err = ext2_init_security(inode,dir); - if (err) { - DQUOT_FREE_INODE(inode); - goto fail2; - } + if (err) + goto fail_free_drop; + mark_inode_dirty(inode); ext2_debug("allocating inode %lu\n", inode->i_ino); ext2_preread_inode(inode); return inode; -fail2: +fail_free_drop: + DQUOT_FREE_INODE(inode); + +fail_drop: + DQUOT_DROP(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; iput(inode); diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index e463dca008e4..0213db4911a2 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c @@ -1410,7 +1410,7 @@ unsigned long ext3_count_free_blocks(struct super_block *sb) unsigned long desc_count; struct ext3_group_desc *gdp; int i; - unsigned long ngroups; + unsigned long ngroups = EXT3_SB(sb)->s_groups_count; #ifdef EXT3FS_DEBUG struct ext3_super_block *es; unsigned long bitmap_count, x; @@ -1421,7 +1421,8 @@ unsigned long ext3_count_free_blocks(struct super_block *sb) desc_count = 0; bitmap_count = 0; gdp = NULL; - for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) { + + for (i = 0; i < ngroups; i++) { gdp = ext3_get_group_desc(sb, i, NULL); if (!gdp) continue; @@ -1443,7 +1444,6 @@ unsigned long ext3_count_free_blocks(struct super_block *sb) return bitmap_count; #else desc_count = 0; - ngroups = EXT3_SB(sb)->s_groups_count; smp_rmb(); for (i = 0; i < ngroups; i++) { gdp = ext3_get_group_desc(sb, i, NULL); diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c index 96552769d039..6549945f9ac1 100644 --- a/fs/ext3/ialloc.c +++ b/fs/ext3/ialloc.c @@ -597,27 +597,22 @@ got: ret = inode; if(DQUOT_ALLOC_INODE(inode)) { - DQUOT_DROP(inode); err = -EDQUOT; - goto fail2; + goto fail_drop; } + err = ext3_init_acl(handle, inode, dir); - if (err) { - DQUOT_FREE_INODE(inode); - DQUOT_DROP(inode); - goto fail2; - } + if (err) + goto fail_free_drop; + err = ext3_init_security(handle,inode, dir); - if (err) { - DQUOT_FREE_INODE(inode); - goto fail2; - } + if (err) + goto fail_free_drop; + err = ext3_mark_inode_dirty(handle, inode); if (err) { ext3_std_error(sb, err); - DQUOT_FREE_INODE(inode); - DQUOT_DROP(inode); - goto fail2; + goto fail_free_drop; } ext3_debug("allocating inode %lu\n", inode->i_ino); @@ -631,7 +626,11 @@ really_out: brelse(bitmap_bh); return ret; -fail2: +fail_free_drop: + DQUOT_FREE_INODE(inode); + +fail_drop: + DQUOT_DROP(inode); inode->i_flags |= S_NOQUOTA; inode->i_nlink = 0; iput(inode); diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index b5177c90d6f1..8b38f2232796 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -1434,7 +1434,7 @@ static int ext3_invalidatepage(struct page *page, unsigned long offset) return journal_invalidatepage(journal, page, offset); } -static int ext3_releasepage(struct page *page, int wait) +static int ext3_releasepage(struct page *page, gfp_t wait) { journal_t *journal = EXT3_JOURNAL(page->mapping->host); diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c index 2c9f81278d5d..57f79106267d 100644 --- a/fs/ext3/resize.c +++ b/fs/ext3/resize.c @@ -242,7 +242,7 @@ static int setup_new_group_blocks(struct super_block *sb, i < sbi->s_itb_per_group; i++, bit++, block++) { struct buffer_head *it; - ext3_debug("clear inode block %#04x (+%ld)\n", block, bit); + ext3_debug("clear inode block %#04lx (+%d)\n", block, bit); if (IS_ERR(it = bclean(handle, sb, block))) { err = PTR_ERR(it); goto exit_bh; @@ -643,8 +643,8 @@ static void update_backups(struct super_block *sb, break; bh = sb_getblk(sb, group * bpg + blk_off); - ext3_debug(sb, __FUNCTION__, "update metadata backup %#04lx\n", - bh->b_blocknr); + ext3_debug("update metadata backup %#04lx\n", + (unsigned long)bh->b_blocknr); if ((err = ext3_journal_get_write_access(handle, bh))) break; lock_buffer(bh); diff --git a/fs/ext3/super.c b/fs/ext3/super.c index a93c3609025d..9e24ceb019fe 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -512,15 +512,14 @@ static void ext3_clear_inode(struct inode *inode) static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs) { - struct ext3_sb_info *sbi = EXT3_SB(vfs->mnt_sb); + struct super_block *sb = vfs->mnt_sb; + struct ext3_sb_info *sbi = EXT3_SB(sb); - if (sbi->s_mount_opt & EXT3_MOUNT_JOURNAL_DATA) + if (test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA) seq_puts(seq, ",data=journal"); - - if (sbi->s_mount_opt & EXT3_MOUNT_ORDERED_DATA) + else if (test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA) seq_puts(seq, ",data=ordered"); - - if (sbi->s_mount_opt & EXT3_MOUNT_WRITEBACK_DATA) + else if (test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA) seq_puts(seq, ",data=writeback"); #if defined(CONFIG_QUOTA) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index e79e49b3eec7..29f1e9f6e85c 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -96,6 +96,8 @@ static int fuse_lookup_iget(struct inode *dir, struct dentry *entry, fuse_lookup_init(req, dir, entry, &outarg); request_send(fc, req); err = req->out.h.error; + if (!err && (!outarg.nodeid || outarg.nodeid == FUSE_ROOT_ID)) + err = -EIO; if (!err) { inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, &outarg.attr); @@ -152,6 +154,10 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req, fuse_put_request(fc, req); return err; } + if (!outarg.nodeid || outarg.nodeid == FUSE_ROOT_ID) { + fuse_put_request(fc, req); + return -EIO; + } inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, &outarg.attr); if (!inode) { diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 6454022b0536..657ab11c173b 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -23,6 +23,10 @@ int fuse_open_common(struct inode *inode, struct file *file, int isdir) struct fuse_file *ff; int err; + /* VFS checks this, but only _after_ ->open() */ + if (file->f_flags & O_DIRECT) + return -EINVAL; + err = generic_file_open(inode, file); if (err) return err; diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index f1570b9f9de3..3f680c5675bf 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -46,7 +46,7 @@ static sector_t hfs_bmap(struct address_space *mapping, sector_t block) return generic_block_bmap(mapping, block, hfs_get_block); } -static int hfs_releasepage(struct page *page, int mask) +static int hfs_releasepage(struct page *page, gfp_t mask) { struct inode *inode = page->mapping->host; struct super_block *sb = inode->i_sb; diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index d5642705f633..f205773ddfbe 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -40,7 +40,7 @@ static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block) return generic_block_bmap(mapping, block, hfsplus_get_block); } -static int hfsplus_releasepage(struct page *page, int mask) +static int hfsplus_releasepage(struct page *page, gfp_t mask) { struct inode *inode = page->mapping->host; struct super_block *sb = inode->i_sb; diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index fd0f0f050e1d..452fc1fdbd32 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -50,6 +50,7 @@ static void hfsplus_read_inode(struct inode *inode) init_MUTEX(&HFSPLUS_I(inode).extents_lock); HFSPLUS_I(inode).flags = 0; HFSPLUS_I(inode).rsrc_inode = NULL; + atomic_set(&HFSPLUS_I(inode).opencnt, 0); if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID) { read_inode: diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 59c5062cd63f..dd7113106269 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -793,11 +793,6 @@ int hostfs_rename(struct inode *from_ino, struct dentry *from, return(err); } -void hostfs_truncate(struct inode *ino) -{ - not_implemented(); -} - int hostfs_permission(struct inode *ino, int desired, struct nameidata *nd) { char *name; @@ -894,7 +889,6 @@ static struct inode_operations hostfs_iops = { .rmdir = hostfs_rmdir, .mknod = hostfs_mknod, .rename = hostfs_rename, - .truncate = hostfs_truncate, .permission = hostfs_permission, .setattr = hostfs_setattr, .getattr = hostfs_getattr, @@ -910,7 +904,6 @@ static struct inode_operations hostfs_dir_iops = { .rmdir = hostfs_rmdir, .mknod = hostfs_mknod, .rename = hostfs_rename, - .truncate = hostfs_truncate, .permission = hostfs_permission, .setattr = hostfs_setattr, .getattr = hostfs_getattr, diff --git a/fs/inode.c b/fs/inode.c index f80a79ff156b..7d3316527767 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -475,7 +475,7 @@ static void prune_icache(int nr_to_scan) * This function is passed the number of inodes to scan, and it returns the * total number of remaining possibly-reclaimable inodes. */ -static int shrink_icache_memory(int nr, unsigned int gfp_mask) +static int shrink_icache_memory(int nr, gfp_t gfp_mask) { if (nr) { /* diff --git a/fs/inotify.c b/fs/inotify.c index a37e9fb1da58..9fbaebfdf40b 100644 --- a/fs/inotify.c +++ b/fs/inotify.c @@ -176,6 +176,7 @@ static inline void put_inotify_dev(struct inotify_device *dev) if (atomic_dec_and_test(&dev->count)) { atomic_dec(&dev->user->inotify_devs); free_uid(dev->user); + idr_destroy(&dev->idr); kfree(dev); } } diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 7ae2c4fe506b..e4b516ac4989 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -1606,7 +1606,7 @@ int journal_blocks_per_page(struct inode *inode) * Simple support for retrying memory allocations. Introduced to help to * debug different VM deadlock avoidance strategies. */ -void * __jbd_kmalloc (const char *where, size_t size, int flags, int retry) +void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry) { return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0)); } diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 49bbc2be3d72..13cb05bf6048 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -1621,7 +1621,7 @@ out: * while the data is part of a transaction. Yes? */ int journal_try_to_free_buffers(journal_t *journal, - struct page *page, int unused_gfp_mask) + struct page *page, gfp_t unused_gfp_mask) { struct buffer_head *head; struct buffer_head *bh; diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 0ec62d5310db..9f942ca8e4e3 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -129,8 +129,7 @@ void jfs_delete_inode(struct inode *inode) jfs_info("In jfs_delete_inode, inode = 0x%p", inode); if (!is_bad_inode(inode) && - (JFS_IP(inode)->fileset == cpu_to_le32(FILESYSTEM_I))) { - + (JFS_IP(inode)->fileset == FILESYSTEM_I)) { truncate_inode_pages(&inode->i_data, 0); if (test_cflag(COMMIT_Freewmap, inode)) diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index c739626f5bf1..eadf319bee22 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -3055,7 +3055,7 @@ static int cntlz(u32 value) * RETURN VALUES: * log2 number of blocks */ -int blkstol2(s64 nb) +static int blkstol2(s64 nb) { int l2nb; s64 mask; /* meant to be signed */ diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index 13d7e3f1feb4..eeb37d70e650 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c @@ -198,7 +198,7 @@ static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) } } -static inline struct metapage *alloc_metapage(unsigned int gfp_mask) +static inline struct metapage *alloc_metapage(gfp_t gfp_mask) { return mempool_alloc(metapage_mempool, gfp_mask); } @@ -534,7 +534,7 @@ add_failed: return -EIO; } -static int metapage_releasepage(struct page *page, int gfp_mask) +static int metapage_releasepage(struct page *page, gfp_t gfp_mask) { struct metapage *mp; int busy = 0; diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c index c7a92f9deb2b..9b71ed2674fe 100644 --- a/fs/jfs/jfs_txnmgr.c +++ b/fs/jfs/jfs_txnmgr.c @@ -725,6 +725,9 @@ struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp, else tlck->flag = tlckINODELOCK; + if (S_ISDIR(ip->i_mode)) + tlck->flag |= tlckDIRECTORY; + tlck->type = 0; /* bind the tlock and the page */ @@ -1009,6 +1012,8 @@ struct tlock *txMaplock(tid_t tid, struct inode *ip, int type) /* bind the tlock and the object */ tlck->flag = tlckINODELOCK; + if (S_ISDIR(ip->i_mode)) + tlck->flag |= tlckDIRECTORY; tlck->ip = ip; tlck->mp = NULL; @@ -1077,6 +1082,8 @@ struct linelock *txLinelock(struct linelock * tlock) linelock->flag = tlckLINELOCK; linelock->maxcnt = TLOCKLONG; linelock->index = 0; + if (tlck->flag & tlckDIRECTORY) + linelock->flag |= tlckDIRECTORY; /* append linelock after tlock */ linelock->next = tlock->next; @@ -2070,8 +2077,8 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, * * function: log from maplock of freed data extents; */ -void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, - struct tlock * tlck) +static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, + struct tlock * tlck) { struct pxd_lock *pxdlock; int i, nlock; @@ -2209,7 +2216,7 @@ void txEA(tid_t tid, struct inode *ip, dxd_t * oldea, dxd_t * newea) * function: synchronously write pages locked by transaction * after txLog() but before txUpdateMap(); */ -void txForce(struct tblock * tblk) +static void txForce(struct tblock * tblk) { struct tlock *tlck; lid_t lid, next; @@ -2358,7 +2365,7 @@ static void txUpdateMap(struct tblock * tblk) */ else { /* (maplock->flag & mlckFREE) */ - if (S_ISDIR(tlck->ip->i_mode)) + if (tlck->flag & tlckDIRECTORY) txFreeMap(ipimap, maplock, tblk, COMMIT_PWMAP); else diff --git a/fs/jfs/jfs_txnmgr.h b/fs/jfs/jfs_txnmgr.h index 59ad0f6b7231..0e4dc4514c47 100644 --- a/fs/jfs/jfs_txnmgr.h +++ b/fs/jfs/jfs_txnmgr.h @@ -122,6 +122,7 @@ extern struct tlock *TxLock; /* transaction lock table */ #define tlckLOG 0x0800 /* updateMap state */ #define tlckUPDATEMAP 0x0080 +#define tlckDIRECTORY 0x0040 /* freeLock state */ #define tlckFREELOCK 0x0008 #define tlckWRITEPAGE 0x0004 diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 82c77df81c5f..c4c8601096e0 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -173,11 +173,10 @@ nlm_bind_host(struct nlm_host *host) /* If we've already created an RPC client, check whether * RPC rebind is required - * Note: why keep rebinding if we're on a tcp connection? */ if ((clnt = host->h_rpcclnt) != NULL) { xprt = clnt->cl_xprt; - if (!xprt->stream && time_after_eq(jiffies, host->h_nextrebind)) { + if (time_after_eq(jiffies, host->h_nextrebind)) { clnt->cl_port = 0; host->h_nextrebind = jiffies + NLM_HOST_REBIND; dprintk("lockd: next rebind in %ld jiffies\n", @@ -189,7 +188,6 @@ nlm_bind_host(struct nlm_host *host) goto forgetit; xprt_set_timeout(&xprt->timeout, 5, nlmsvc_timeout); - xprt->nocong = 1; /* No congestion control for NLM */ xprt->resvport = 1; /* NLM requires a reserved port */ /* Existing NLM servers accept AUTH_UNIX only */ diff --git a/fs/locks.c b/fs/locks.c index f7daa5f48949..a1e8b2248014 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -316,21 +316,22 @@ static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, /* POSIX-1996 leaves the case l->l_len < 0 undefined; POSIX-2001 defines it. */ start += l->l_start; - end = start + l->l_len - 1; - if (l->l_len < 0) { - end = start - 1; - start += l->l_len; - } - if (start < 0) return -EINVAL; - if (l->l_len > 0 && end < 0) - return -EOVERFLOW; - + fl->fl_end = OFFSET_MAX; + if (l->l_len > 0) { + end = start + l->l_len - 1; + fl->fl_end = end; + } else if (l->l_len < 0) { + end = start - 1; + fl->fl_end = end; + start += l->l_len; + if (start < 0) + return -EINVAL; + } fl->fl_start = start; /* we record the absolute position */ - fl->fl_end = end; - if (l->l_len == 0) - fl->fl_end = OFFSET_MAX; + if (fl->fl_end < fl->fl_start) + return -EOVERFLOW; fl->fl_owner = current->files; fl->fl_pid = current->tgid; @@ -362,14 +363,21 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, return -EINVAL; } - if (((start += l->l_start) < 0) || (l->l_len < 0)) + start += l->l_start; + if (start < 0) return -EINVAL; - fl->fl_end = start + l->l_len - 1; - if (l->l_len > 0 && fl->fl_end < 0) - return -EOVERFLOW; + fl->fl_end = OFFSET_MAX; + if (l->l_len > 0) { + fl->fl_end = start + l->l_len - 1; + } else if (l->l_len < 0) { + fl->fl_end = start - 1; + start += l->l_len; + if (start < 0) + return -EINVAL; + } fl->fl_start = start; /* we record the absolute position */ - if (l->l_len == 0) - fl->fl_end = OFFSET_MAX; + if (fl->fl_end < fl->fl_start) + return -EOVERFLOW; fl->fl_owner = current->files; fl->fl_pid = current->tgid; @@ -829,12 +837,16 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request) /* Detect adjacent or overlapping regions (if same lock type) */ if (request->fl_type == fl->fl_type) { + /* In all comparisons of start vs end, use + * "start - 1" rather than "end + 1". If end + * is OFFSET_MAX, end + 1 will become negative. + */ if (fl->fl_end < request->fl_start - 1) goto next_lock; /* If the next lock in the list has entirely bigger * addresses than the new one, insert the lock here. */ - if (fl->fl_start > request->fl_end + 1) + if (fl->fl_start - 1 > request->fl_end) break; /* If we come here, the new and old lock are of the diff --git a/fs/mbcache.c b/fs/mbcache.c index b002a088857d..298997f17475 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -116,7 +116,7 @@ mb_cache_indexes(struct mb_cache *cache) * What the mbcache registers as to get shrunk dynamically. */ -static int mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask); +static int mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask); static inline int @@ -140,7 +140,7 @@ __mb_cache_entry_unhash(struct mb_cache_entry *ce) static inline void -__mb_cache_entry_forget(struct mb_cache_entry *ce, int gfp_mask) +__mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask) { struct mb_cache *cache = ce->e_cache; @@ -193,7 +193,7 @@ forget: * Returns the number of objects which are present in the cache. */ static int -mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask) +mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask) { LIST_HEAD(free_list); struct list_head *l, *ltmp; diff --git a/fs/mpage.c b/fs/mpage.c index bb9aebe93862..c5adcdddf3cc 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -102,7 +102,7 @@ static struct bio *mpage_bio_submit(int rw, struct bio *bio) static struct bio * mpage_alloc(struct block_device *bdev, sector_t first_sector, int nr_vecs, - unsigned int __nocast gfp_flags) + gfp_t gfp_flags) { struct bio *bio; diff --git a/fs/namei.c b/fs/namei.c index 043d587216b5..aaaa81036234 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -317,6 +318,18 @@ void path_release_on_umount(struct nameidata *nd) mntput_no_expire(nd->mnt); } +/** + * release_open_intent - free up open intent resources + * @nd: pointer to nameidata + */ +void release_open_intent(struct nameidata *nd) +{ + if (nd->intent.open.file->f_dentry == NULL) + put_filp(nd->intent.open.file); + else + fput(nd->intent.open.file); +} + /* * Internal lookup() using the new generic dcache. * SMP-safe @@ -750,6 +763,7 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd) struct qstr this; unsigned int c; + nd->flags |= LOOKUP_CONTINUE; err = exec_permission_lite(inode, nd); if (err == -EAGAIN) { err = permission(inode, MAY_EXEC, nd); @@ -802,7 +816,6 @@ static fastcall int __link_path_walk(const char * name, struct nameidata *nd) if (err < 0) break; } - nd->flags |= LOOKUP_CONTINUE; /* This does the actual lookups.. */ err = do_lookup(nd, &this, &next); if (err) @@ -1052,6 +1065,70 @@ out: return retval; } +static int __path_lookup_intent_open(const char *name, unsigned int lookup_flags, + struct nameidata *nd, int open_flags, int create_mode) +{ + struct file *filp = get_empty_filp(); + int err; + + if (filp == NULL) + return -ENFILE; + nd->intent.open.file = filp; + nd->intent.open.flags = open_flags; + nd->intent.open.create_mode = create_mode; + err = path_lookup(name, lookup_flags|LOOKUP_OPEN, nd); + if (IS_ERR(nd->intent.open.file)) { + if (err == 0) { + err = PTR_ERR(nd->intent.open.file); + path_release(nd); + } + } else if (err != 0) + release_open_intent(nd); + return err; +} + +/** + * path_lookup_open - lookup a file path with open intent + * @name: pointer to file name + * @lookup_flags: lookup intent flags + * @nd: pointer to nameidata + * @open_flags: open intent flags + */ +int path_lookup_open(const char *name, unsigned int lookup_flags, + struct nameidata *nd, int open_flags) +{ + return __path_lookup_intent_open(name, lookup_flags, nd, + open_flags, 0); +} + +/** + * path_lookup_create - lookup a file path with open + create intent + * @name: pointer to file name + * @lookup_flags: lookup intent flags + * @nd: pointer to nameidata + * @open_flags: open intent flags + * @create_mode: create intent flags + */ +int path_lookup_create(const char *name, unsigned int lookup_flags, + struct nameidata *nd, int open_flags, int create_mode) +{ + return __path_lookup_intent_open(name, lookup_flags|LOOKUP_CREATE, nd, + open_flags, create_mode); +} + +int __user_path_lookup_open(const char __user *name, unsigned int lookup_flags, + struct nameidata *nd, int open_flags) +{ + char *tmp = getname(name); + int err = PTR_ERR(tmp); + + if (!IS_ERR(tmp)) { + err = __path_lookup_intent_open(tmp, lookup_flags, nd, open_flags, 0); + putname(tmp); + } + return err; +} + /* * Restricted form of lookup. Doesn't follow links, single-component only, * needs parent already locked. Doesn't follow mounts. @@ -1416,27 +1493,27 @@ int may_open(struct nameidata *nd, int acc_mode, int flag) */ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd) { - int acc_mode, error = 0; + int acc_mode, error; struct path path; struct dentry *dir; int count = 0; acc_mode = ACC_MODE(flag); + /* O_TRUNC implies we need access checks for write permissions */ + if (flag & O_TRUNC) + acc_mode |= MAY_WRITE; + /* Allow the LSM permission hook to distinguish append access from general write access. */ if (flag & O_APPEND) acc_mode |= MAY_APPEND; - /* Fill in the open() intent data */ - nd->intent.open.flags = flag; - nd->intent.open.create_mode = mode; - /* * The simplest case - just a plain lookup. */ if (!(flag & O_CREAT)) { - error = path_lookup(pathname, lookup_flags(flag)|LOOKUP_OPEN, nd); + error = path_lookup_open(pathname, lookup_flags(flag), nd, flag); if (error) return error; goto ok; @@ -1445,7 +1522,7 @@ int open_namei(const char * pathname, int flag, int mode, struct nameidata *nd) /* * Create - we need to know the parent. */ - error = path_lookup(pathname, LOOKUP_PARENT|LOOKUP_OPEN|LOOKUP_CREATE, nd); + error = path_lookup_create(pathname, LOOKUP_PARENT, nd, flag, mode); if (error) return error; @@ -1520,6 +1597,8 @@ ok: exit_dput: dput_path(&path, nd); exit: + if (!IS_ERR(nd->intent.open.file)) + release_open_intent(nd); path_release(nd); return error; @@ -1551,19 +1630,19 @@ do_link: if (nd->last_type != LAST_NORM) goto exit; if (nd->last.name[nd->last.len]) { - putname(nd->last.name); + __putname(nd->last.name); goto exit; } error = -ELOOP; if (count++==32) { - putname(nd->last.name); + __putname(nd->last.name); goto exit; } dir = nd->dentry; down(&dir->d_inode->i_sem); path.dentry = __lookup_hash(&nd->last, nd->dentry, nd); path.mnt = nd->mnt; - putname(nd->last.name); + __putname(nd->last.name); goto do_last; } diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index d7f7eb669d03..44135af9894c 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -85,6 +85,10 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct struct nfs_delegation *delegation; int status = 0; + /* Ensure we first revalidate the attributes and page cache! */ + if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR))) + __nfs_revalidate_inode(NFS_SERVER(inode), inode); + delegation = nfs_alloc_delegation(); if (delegation == NULL) return -ENOMEM; @@ -138,7 +142,7 @@ static void nfs_msync_inode(struct inode *inode) /* * Basic procedure for returning a delegation to the server */ -int nfs_inode_return_delegation(struct inode *inode) +int __nfs_inode_return_delegation(struct inode *inode) { struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state; struct nfs_inode *nfsi = NFS_I(inode); diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index 3f6c45a29d6a..8017846b561f 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -25,7 +25,7 @@ struct nfs_delegation { int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); -int nfs_inode_return_delegation(struct inode *inode); +int __nfs_inode_return_delegation(struct inode *inode); int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid); struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle); @@ -47,11 +47,25 @@ static inline int nfs_have_delegation(struct inode *inode, int flags) return 1; return 0; } + +static inline int nfs_inode_return_delegation(struct inode *inode) +{ + int err = 0; + + if (NFS_I(inode)->delegation != NULL) + err = __nfs_inode_return_delegation(inode); + return err; +} #else static inline int nfs_have_delegation(struct inode *inode, int flags) { return 0; } + +static inline int nfs_inode_return_delegation(struct inode *inode) +{ + return 0; +} #endif #endif diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 2df639f143e8..8272ed3fc707 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -532,6 +532,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir) my_entry.eof = 0; my_entry.fh = &fh; my_entry.fattr = &fattr; + nfs_fattr_init(&fattr); desc->entry = &my_entry; while(!desc->entry->eof) { @@ -565,8 +566,6 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir) } } unlock_kernel(); - if (desc->error < 0) - return desc->error; if (res < 0) return res; return 0; @@ -803,6 +802,7 @@ static int nfs_dentry_delete(struct dentry *dentry) */ static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode) { + nfs_inode_return_delegation(inode); if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { lock_kernel(); inode->i_nlink--; @@ -853,12 +853,6 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru dentry->d_op = NFS_PROTO(dir)->dentry_ops; lock_kernel(); - /* Revalidate parent directory attribute cache */ - error = nfs_revalidate_inode(NFS_SERVER(dir), dir); - if (error < 0) { - res = ERR_PTR(error); - goto out_unlock; - } /* If we're doing an exclusive create, optimize away the lookup */ if (nfs_is_exclusive_create(dir, nd)) @@ -916,7 +910,6 @@ static int is_atomic_open(struct inode *dir, struct nameidata *nd) static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct dentry *res = NULL; - struct inode *inode = NULL; int error; /* Check that we are indeed trying to open this file */ @@ -930,8 +923,10 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry dentry->d_op = NFS_PROTO(dir)->dentry_ops; /* Let vfs_create() deal with O_EXCL */ - if (nd->intent.open.flags & O_EXCL) - goto no_entry; + if (nd->intent.open.flags & O_EXCL) { + d_add(dentry, NULL); + goto out; + } /* Open the file on the server */ lock_kernel(); @@ -945,32 +940,30 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry if (nd->intent.open.flags & O_CREAT) { nfs_begin_data_update(dir); - inode = nfs4_atomic_open(dir, dentry, nd); + res = nfs4_atomic_open(dir, dentry, nd); nfs_end_data_update(dir); } else - inode = nfs4_atomic_open(dir, dentry, nd); + res = nfs4_atomic_open(dir, dentry, nd); unlock_kernel(); - if (IS_ERR(inode)) { - error = PTR_ERR(inode); + if (IS_ERR(res)) { + error = PTR_ERR(res); switch (error) { /* Make a negative dentry */ case -ENOENT: - inode = NULL; - break; + res = NULL; + goto out; /* This turned out not to be a regular file */ + case -EISDIR: + case -ENOTDIR: + goto no_open; case -ELOOP: if (!(nd->intent.open.flags & O_NOFOLLOW)) goto no_open; - /* case -EISDIR: */ /* case -EINVAL: */ default: - res = ERR_PTR(error); goto out; } - } -no_entry: - res = d_add_unique(dentry, inode); - if (res != NULL) + } else if (res != NULL) dentry = res; nfs_renew_times(dentry); nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); @@ -1014,7 +1007,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd) */ lock_kernel(); verifier = nfs_save_change_attribute(dir); - ret = nfs4_open_revalidate(dir, dentry, openflags); + ret = nfs4_open_revalidate(dir, dentry, openflags, nd); if (!ret) nfs_set_verifier(dentry, verifier); unlock_kernel(); @@ -1137,7 +1130,7 @@ static int nfs_create(struct inode *dir, struct dentry *dentry, int mode, lock_kernel(); nfs_begin_data_update(dir); - error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags); + error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, nd); nfs_end_data_update(dir); if (error != 0) goto out_err; @@ -1332,6 +1325,7 @@ static int nfs_safe_remove(struct dentry *dentry) nfs_begin_data_update(dir); if (inode != NULL) { + nfs_inode_return_delegation(inode); nfs_begin_data_update(inode); error = NFS_PROTO(dir)->remove(dir, &dentry->d_name); /* The VFS may want to delete this inode */ @@ -1438,17 +1432,14 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) old_dentry->d_parent->d_name.name, old_dentry->d_name.name, dentry->d_parent->d_name.name, dentry->d_name.name); - /* - * Drop the dentry in advance to force a new lookup. - * Since nfs_proc_link doesn't return a file handle, - * we can't use the existing dentry. - */ lock_kernel(); - d_drop(dentry); - nfs_begin_data_update(dir); nfs_begin_data_update(inode); error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name); + if (error == 0) { + atomic_inc(&inode->i_count); + d_instantiate(dentry, inode); + } nfs_end_data_update(inode); nfs_end_data_update(dir); unlock_kernel(); @@ -1512,9 +1503,11 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, */ if (!new_inode) goto go_ahead; - if (S_ISDIR(new_inode->i_mode)) - goto out; - else if (atomic_read(&new_dentry->d_count) > 2) { + if (S_ISDIR(new_inode->i_mode)) { + error = -EISDIR; + if (!S_ISDIR(old_inode->i_mode)) + goto out; + } else if (atomic_read(&new_dentry->d_count) > 2) { int err; /* copy the target dentry's name */ dentry = d_alloc(new_dentry->d_parent, @@ -1539,7 +1532,8 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry, #endif goto out; } - } + } else + new_inode->i_nlink--; go_ahead: /* @@ -1549,6 +1543,7 @@ go_ahead: nfs_wb_all(old_inode); shrink_dcache_parent(old_dentry); } + nfs_inode_return_delegation(old_inode); if (new_inode) d_delete(new_dentry); diff --git a/fs/nfs/file.c b/fs/nfs/file.c index f6b9eda925c5..57d3e77d97ee 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -137,7 +137,8 @@ static int nfs_revalidate_file(struct inode *inode, struct file *filp) struct nfs_inode *nfsi = NFS_I(inode); int retval = 0; - if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) || nfs_attribute_timeout(inode)) + if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR)) + || nfs_attribute_timeout(inode)) retval = __nfs_revalidate_inode(NFS_SERVER(inode), inode); nfs_revalidate_mapping(inode, filp->f_mapping); return 0; @@ -204,8 +205,8 @@ nfs_file_flush(struct file *file) if (!status) { status = ctx->error; ctx->error = 0; - if (!status && !nfs_have_delegation(inode, FMODE_READ)) - __nfs_revalidate_inode(NFS_SERVER(inode), inode); + if (!status) + nfs_revalidate_inode(NFS_SERVER(inode), inode); } unlock_kernel(); return status; @@ -375,22 +376,31 @@ out_swapfile: static int do_getlk(struct file *filp, int cmd, struct file_lock *fl) { + struct file_lock *cfl; struct inode *inode = filp->f_mapping->host; int status = 0; lock_kernel(); - /* Use local locking if mounted with "-onolock" */ - if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM)) - status = NFS_PROTO(inode)->lock(filp, cmd, fl); - else { - struct file_lock *cfl = posix_test_lock(filp, fl); - - fl->fl_type = F_UNLCK; - if (cfl != NULL) - memcpy(fl, cfl, sizeof(*fl)); + /* Try local locking first */ + cfl = posix_test_lock(filp, fl); + if (cfl != NULL) { + locks_copy_lock(fl, cfl); + goto out; } + + if (nfs_have_delegation(inode, FMODE_READ)) + goto out_noconflict; + + if (NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM) + goto out_noconflict; + + status = NFS_PROTO(inode)->lock(filp, cmd, fl); +out: unlock_kernel(); return status; +out_noconflict: + fl->fl_type = F_UNLCK; + goto out; } static int do_vfs_lock(struct file *file, struct file_lock *fl) diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 6922469d6fc5..f2781ca42761 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -358,6 +358,35 @@ out_no_root: return no_root_error; } +static void nfs_init_timeout_values(struct rpc_timeout *to, int proto, unsigned int timeo, unsigned int retrans) +{ + to->to_initval = timeo * HZ / 10; + to->to_retries = retrans; + if (!to->to_retries) + to->to_retries = 2; + + switch (proto) { + case IPPROTO_TCP: + if (!to->to_initval) + to->to_initval = 60 * HZ; + if (to->to_initval > NFS_MAX_TCP_TIMEOUT) + to->to_initval = NFS_MAX_TCP_TIMEOUT; + to->to_increment = to->to_initval; + to->to_maxval = to->to_initval + (to->to_increment * to->to_retries); + to->to_exponential = 0; + break; + case IPPROTO_UDP: + default: + if (!to->to_initval) + to->to_initval = 11 * HZ / 10; + if (to->to_initval > NFS_MAX_UDP_TIMEOUT) + to->to_initval = NFS_MAX_UDP_TIMEOUT; + to->to_maxval = NFS_MAX_UDP_TIMEOUT; + to->to_exponential = 1; + break; + } +} + /* * Create an RPC client handle. */ @@ -367,22 +396,12 @@ nfs_create_client(struct nfs_server *server, const struct nfs_mount_data *data) struct rpc_timeout timeparms; struct rpc_xprt *xprt = NULL; struct rpc_clnt *clnt = NULL; - int tcp = (data->flags & NFS_MOUNT_TCP); + int proto = (data->flags & NFS_MOUNT_TCP) ? IPPROTO_TCP : IPPROTO_UDP; - /* Initialize timeout values */ - timeparms.to_initval = data->timeo * HZ / 10; - timeparms.to_retries = data->retrans; - timeparms.to_maxval = tcp ? RPC_MAX_TCP_TIMEOUT : RPC_MAX_UDP_TIMEOUT; - timeparms.to_exponential = 1; - - if (!timeparms.to_initval) - timeparms.to_initval = (tcp ? 600 : 11) * HZ / 10; - if (!timeparms.to_retries) - timeparms.to_retries = 5; + nfs_init_timeout_values(&timeparms, proto, data->timeo, data->retrans); /* create transport and client */ - xprt = xprt_create_proto(tcp ? IPPROTO_TCP : IPPROTO_UDP, - &server->addr, &timeparms); + xprt = xprt_create_proto(proto, &server->addr, &timeparms); if (IS_ERR(xprt)) { dprintk("%s: cannot create RPC transport. Error = %ld\n", __FUNCTION__, PTR_ERR(xprt)); @@ -576,7 +595,6 @@ static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt) { NFS_MOUNT_SOFT, ",soft", ",hard" }, { NFS_MOUNT_INTR, ",intr", "" }, { NFS_MOUNT_POSIX, ",posix", "" }, - { NFS_MOUNT_TCP, ",tcp", ",udp" }, { NFS_MOUNT_NOCTO, ",nocto", "" }, { NFS_MOUNT_NOAC, ",noac", "" }, { NFS_MOUNT_NONLM, ",nolock", ",lock" }, @@ -585,6 +603,8 @@ static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt) }; struct proc_nfs_info *nfs_infop; struct nfs_server *nfss = NFS_SB(mnt->mnt_sb); + char buf[12]; + char *proto; seq_printf(m, ",v%d", nfss->rpc_ops->version); seq_printf(m, ",rsize=%d", nfss->rsize); @@ -603,6 +623,18 @@ static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt) else seq_puts(m, nfs_infop->nostr); } + switch (nfss->client->cl_xprt->prot) { + case IPPROTO_TCP: + proto = "tcp"; + break; + case IPPROTO_UDP: + proto = "udp"; + break; + default: + snprintf(buf, sizeof(buf), "%u", nfss->client->cl_xprt->prot); + proto = buf; + } + seq_printf(m, ",proto=%s", proto); seq_puts(m, ",addr="); seq_escape(m, nfss->hostname, " \t\n\\"); return 0; @@ -753,7 +785,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr) else init_special_inode(inode, inode->i_mode, fattr->rdev); - nfsi->read_cache_jiffies = fattr->timestamp; + nfsi->read_cache_jiffies = fattr->time_start; + nfsi->last_updated = jiffies; inode->i_atime = fattr->atime; inode->i_mtime = fattr->mtime; inode->i_ctime = fattr->ctime; @@ -821,6 +854,11 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr) filemap_fdatawait(inode->i_mapping); nfs_wb_all(inode); } + /* + * Return any delegations if we're going to change ACLs + */ + if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) + nfs_inode_return_delegation(inode); error = NFS_PROTO(inode)->setattr(dentry, &fattr, attr); if (error == 0) nfs_refresh_inode(inode, &fattr); @@ -877,12 +915,10 @@ static int nfs_wait_on_inode(struct inode *inode) sigset_t oldmask; int error; - atomic_inc(&inode->i_count); rpc_clnt_sigmask(clnt, &oldmask); error = wait_on_bit_lock(&nfsi->flags, NFS_INO_REVALIDATING, nfs_wait_schedule, TASK_INTERRUPTIBLE); rpc_clnt_sigunmask(clnt, &oldmask); - iput(inode); return error; } @@ -1021,15 +1057,11 @@ int nfs_open(struct inode *inode, struct file *filp) ctx->mode = filp->f_mode; nfs_file_set_open_context(filp, ctx); put_nfs_open_context(ctx); - if ((filp->f_mode & FMODE_WRITE) != 0) - nfs_begin_data_update(inode); return 0; } int nfs_release(struct inode *inode, struct file *filp) { - if ((filp->f_mode & FMODE_WRITE) != 0) - nfs_end_data_update(inode); nfs_file_clear_open_context(filp); return 0; } @@ -1085,14 +1117,15 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode) goto out; } + spin_lock(&inode->i_lock); status = nfs_update_inode(inode, &fattr, verifier); if (status) { + spin_unlock(&inode->i_lock); dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Ld) refresh failed, error=%d\n", inode->i_sb->s_id, (long long)NFS_FILEID(inode), status); goto out; } - spin_lock(&inode->i_lock); cache_validity = nfsi->cache_validity; nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE; @@ -1100,7 +1133,7 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode) * We may need to keep the attributes marked as invalid if * we raced with nfs_end_attr_update(). */ - if (verifier == nfsi->cache_change_attribute) + if (time_after_eq(verifier, nfsi->cache_change_attribute)) nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME); spin_unlock(&inode->i_lock); @@ -1167,7 +1200,7 @@ void nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping) if (S_ISDIR(inode->i_mode)) { memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf)); /* This ensures we revalidate child dentries */ - nfsi->cache_change_attribute++; + nfsi->cache_change_attribute = jiffies; } spin_unlock(&inode->i_lock); @@ -1199,20 +1232,19 @@ void nfs_end_data_update(struct inode *inode) struct nfs_inode *nfsi = NFS_I(inode); if (!nfs_have_delegation(inode, FMODE_READ)) { - /* Mark the attribute cache for revalidation */ - spin_lock(&inode->i_lock); - nfsi->cache_validity |= NFS_INO_INVALID_ATTR; - /* Directories and symlinks: invalidate page cache too */ - if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) + /* Directories and symlinks: invalidate page cache */ + if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) { + spin_lock(&inode->i_lock); nfsi->cache_validity |= NFS_INO_INVALID_DATA; - spin_unlock(&inode->i_lock); + spin_unlock(&inode->i_lock); + } } - nfsi->cache_change_attribute ++; + nfsi->cache_change_attribute = jiffies; atomic_dec(&nfsi->data_updates); } /** - * nfs_refresh_inode - verify consistency of the inode attribute cache + * nfs_check_inode_attributes - verify consistency of the inode attribute cache * @inode - pointer to inode * @fattr - updated attributes * @@ -1220,17 +1252,12 @@ void nfs_end_data_update(struct inode *inode) * so that fattr carries weak cache consistency data, then it may * also update the ctime/mtime/change_attribute. */ -int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr) +static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fattr) { struct nfs_inode *nfsi = NFS_I(inode); loff_t cur_size, new_isize; int data_unstable; - /* Do we hold a delegation? */ - if (nfs_have_delegation(inode, FMODE_READ)) - return 0; - - spin_lock(&inode->i_lock); /* Are we in the process of updating data on the server? */ data_unstable = nfs_caches_unstable(inode); @@ -1294,11 +1321,67 @@ int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr) if (!timespec_equal(&inode->i_atime, &fattr->atime)) nfsi->cache_validity |= NFS_INO_INVALID_ATIME; - nfsi->read_cache_jiffies = fattr->timestamp; - spin_unlock(&inode->i_lock); + nfsi->read_cache_jiffies = fattr->time_start; return 0; } +/** + * nfs_refresh_inode - try to update the inode attribute cache + * @inode - pointer to inode + * @fattr - updated attributes + * + * Check that an RPC call that returned attributes has not overlapped with + * other recent updates of the inode metadata, then decide whether it is + * safe to do a full update of the inode attributes, or whether just to + * call nfs_check_inode_attributes. + */ +int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr) +{ + struct nfs_inode *nfsi = NFS_I(inode); + int status; + + if ((fattr->valid & NFS_ATTR_FATTR) == 0) + return 0; + spin_lock(&inode->i_lock); + nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE; + if (nfs_verify_change_attribute(inode, fattr->time_start)) + nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME); + if (time_after(fattr->time_start, nfsi->last_updated)) + status = nfs_update_inode(inode, fattr, fattr->time_start); + else + status = nfs_check_inode_attributes(inode, fattr); + + spin_unlock(&inode->i_lock); + return status; +} + +/** + * nfs_post_op_update_inode - try to update the inode attribute cache + * @inode - pointer to inode + * @fattr - updated attributes + * + * After an operation that has changed the inode metadata, mark the + * attribute cache as being invalid, then try to update it. + */ +int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr) +{ + struct nfs_inode *nfsi = NFS_I(inode); + int status = 0; + + spin_lock(&inode->i_lock); + if (unlikely((fattr->valid & NFS_ATTR_FATTR) == 0)) { + nfsi->cache_validity |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS; + goto out; + } + status = nfs_update_inode(inode, fattr, fattr->time_start); + if (time_after_eq(fattr->time_start, nfsi->cache_change_attribute)) + nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME|NFS_INO_REVAL_PAGECACHE); + nfsi->cache_change_attribute = jiffies; +out: + spin_unlock(&inode->i_lock); + return status; +} + /* * Many nfs protocol calls return the new file attributes after * an operation. Here we update the inode to reflect the state @@ -1334,23 +1417,21 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsign goto out_err; } - spin_lock(&inode->i_lock); - /* * Make sure the inode's type hasn't changed. */ - if ((inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) { - spin_unlock(&inode->i_lock); + if ((inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) goto out_changed; - } /* * Update the read time so we don't revalidate too often. */ - nfsi->read_cache_jiffies = fattr->timestamp; + nfsi->read_cache_jiffies = fattr->time_start; + nfsi->last_updated = jiffies; /* Are we racing with known updates of the metadata on the server? */ - data_unstable = ! nfs_verify_change_attribute(inode, verifier); + data_unstable = ! (nfs_verify_change_attribute(inode, verifier) || + (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)); /* Check if our cached file size is stale */ new_isize = nfs_size_to_loff_t(fattr->size); @@ -1359,7 +1440,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsign /* Do we perhaps have any outstanding writes? */ if (nfsi->npages == 0) { /* No, but did we race with nfs_end_data_update()? */ - if (verifier == nfsi->cache_change_attribute) { + if (time_after_eq(verifier, nfsi->cache_change_attribute)) { inode->i_size = new_isize; invalid |= NFS_INO_INVALID_DATA; } @@ -1435,7 +1516,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsign if (!nfs_have_delegation(inode, FMODE_READ)) nfsi->cache_validity |= invalid; - spin_unlock(&inode->i_lock); return 0; out_changed: /* @@ -1644,8 +1724,7 @@ static void nfs4_clear_inode(struct inode *inode) struct nfs_inode *nfsi = NFS_I(inode); /* If we are holding a delegation, return it! */ - if (nfsi->delegation != NULL) - nfs_inode_return_delegation(inode); + nfs_inode_return_delegation(inode); /* First call standard NFS clear_inode() code */ nfs_clear_inode(inode); /* Now clear out any remaining state */ @@ -1674,7 +1753,7 @@ static int nfs4_fill_super(struct super_block *sb, struct nfs4_mount_data *data, struct rpc_clnt *clnt = NULL; struct rpc_timeout timeparms; rpc_authflavor_t authflavour; - int proto, err = -EIO; + int err = -EIO; sb->s_blocksize_bits = 0; sb->s_blocksize = 0; @@ -1692,30 +1771,8 @@ static int nfs4_fill_super(struct super_block *sb, struct nfs4_mount_data *data, server->acdirmax = data->acdirmax*HZ; server->rpc_ops = &nfs_v4_clientops; - /* Initialize timeout values */ - timeparms.to_initval = data->timeo * HZ / 10; - timeparms.to_retries = data->retrans; - timeparms.to_exponential = 1; - if (!timeparms.to_retries) - timeparms.to_retries = 5; - - proto = data->proto; - /* Which IP protocol do we use? */ - switch (proto) { - case IPPROTO_TCP: - timeparms.to_maxval = RPC_MAX_TCP_TIMEOUT; - if (!timeparms.to_initval) - timeparms.to_initval = 600 * HZ / 10; - break; - case IPPROTO_UDP: - timeparms.to_maxval = RPC_MAX_UDP_TIMEOUT; - if (!timeparms.to_initval) - timeparms.to_initval = 11 * HZ / 10; - break; - default: - return -EINVAL; - } + nfs_init_timeout_values(&timeparms, data->proto, data->timeo, data->retrans); clp = nfs4_get_client(&server->addr.sin_addr); if (!clp) { @@ -1740,7 +1797,7 @@ static int nfs4_fill_super(struct super_block *sb, struct nfs4_mount_data *data, down_write(&clp->cl_sem); if (IS_ERR(clp->cl_rpcclient)) { - xprt = xprt_create_proto(proto, &server->addr, &timeparms); + xprt = xprt_create_proto(data->proto, &server->addr, &timeparms); if (IS_ERR(xprt)) { up_write(&clp->cl_sem); err = PTR_ERR(xprt); diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index d91b69044a4d..59049e864ca7 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -143,7 +143,6 @@ xdr_decode_fattr(u32 *p, struct nfs_fattr *fattr) fattr->mode = (fattr->mode & ~S_IFMT) | S_IFIFO; fattr->rdev = 0; } - fattr->timestamp = jiffies; return p; } diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index edc95514046d..92c870d19ccd 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -78,7 +78,7 @@ nfs3_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, int status; dprintk("%s: call fsinfo\n", __FUNCTION__); - info->fattr->valid = 0; + nfs_fattr_init(info->fattr); status = rpc_call(server->client_sys, NFS3PROC_FSINFO, fhandle, info, 0); dprintk("%s: reply fsinfo: %d\n", __FUNCTION__, status); if (!(info->fattr->valid & NFS_ATTR_FATTR)) { @@ -98,7 +98,7 @@ nfs3_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, int status; dprintk("NFS call getattr\n"); - fattr->valid = 0; + nfs_fattr_init(fattr); status = rpc_call(server->client, NFS3PROC_GETATTR, fhandle, fattr, 0); dprintk("NFS reply getattr: %d\n", status); @@ -117,7 +117,7 @@ nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, int status; dprintk("NFS call setattr\n"); - fattr->valid = 0; + nfs_fattr_init(fattr); status = rpc_call(NFS_CLIENT(inode), NFS3PROC_SETATTR, &arg, fattr, 0); if (status == 0) nfs_setattr_update_inode(inode, sattr); @@ -143,8 +143,8 @@ nfs3_proc_lookup(struct inode *dir, struct qstr *name, int status; dprintk("NFS call lookup %s\n", name->name); - dir_attr.valid = 0; - fattr->valid = 0; + nfs_fattr_init(&dir_attr); + nfs_fattr_init(fattr); status = rpc_call(NFS_CLIENT(dir), NFS3PROC_LOOKUP, &arg, &res, 0); if (status >= 0 && !(fattr->valid & NFS_ATTR_FATTR)) status = rpc_call(NFS_CLIENT(dir), NFS3PROC_GETATTR, @@ -174,7 +174,6 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry) int status; dprintk("NFS call access\n"); - fattr.valid = 0; if (mode & MAY_READ) arg.access |= NFS3_ACCESS_READ; @@ -189,6 +188,7 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry) if (mode & MAY_EXEC) arg.access |= NFS3_ACCESS_EXECUTE; } + nfs_fattr_init(&fattr); status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); nfs_refresh_inode(inode, &fattr); if (status == 0) { @@ -217,7 +217,7 @@ static int nfs3_proc_readlink(struct inode *inode, struct page *page, int status; dprintk("NFS call readlink\n"); - fattr.valid = 0; + nfs_fattr_init(&fattr); status = rpc_call(NFS_CLIENT(inode), NFS3PROC_READLINK, &args, &fattr, 0); nfs_refresh_inode(inode, &fattr); @@ -240,7 +240,7 @@ static int nfs3_proc_read(struct nfs_read_data *rdata) dprintk("NFS call read %d @ %Ld\n", rdata->args.count, (long long) rdata->args.offset); - fattr->valid = 0; + nfs_fattr_init(fattr); status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags); if (status >= 0) nfs_refresh_inode(inode, fattr); @@ -263,10 +263,10 @@ static int nfs3_proc_write(struct nfs_write_data *wdata) dprintk("NFS call write %d @ %Ld\n", wdata->args.count, (long long) wdata->args.offset); - fattr->valid = 0; + nfs_fattr_init(fattr); status = rpc_call_sync(NFS_CLIENT(inode), &msg, rpcflags); if (status >= 0) - nfs_refresh_inode(inode, fattr); + nfs_post_op_update_inode(inode, fattr); dprintk("NFS reply write: %d\n", status); return status < 0? status : wdata->res.count; } @@ -285,10 +285,10 @@ static int nfs3_proc_commit(struct nfs_write_data *cdata) dprintk("NFS call commit %d @ %Ld\n", cdata->args.count, (long long) cdata->args.offset); - fattr->valid = 0; + nfs_fattr_init(fattr); status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); if (status >= 0) - nfs_refresh_inode(inode, fattr); + nfs_post_op_update_inode(inode, fattr); dprintk("NFS reply commit: %d\n", status); return status; } @@ -299,7 +299,7 @@ static int nfs3_proc_commit(struct nfs_write_data *cdata) */ static int nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, - int flags) + int flags, struct nameidata *nd) { struct nfs_fh fhandle; struct nfs_fattr fattr; @@ -329,10 +329,10 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, sattr->ia_mode &= ~current->fs->umask; again: - dir_attr.valid = 0; - fattr.valid = 0; + nfs_fattr_init(&dir_attr); + nfs_fattr_init(&fattr); status = rpc_call(NFS_CLIENT(dir), NFS3PROC_CREATE, &arg, &res, 0); - nfs_refresh_inode(dir, &dir_attr); + nfs_post_op_update_inode(dir, &dir_attr); /* If the server doesn't support the exclusive creation semantics, * try again with simple 'guarded' mode. */ @@ -401,9 +401,9 @@ nfs3_proc_remove(struct inode *dir, struct qstr *name) int status; dprintk("NFS call remove %s\n", name->name); - dir_attr.valid = 0; + nfs_fattr_init(&dir_attr); status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); - nfs_refresh_inode(dir, &dir_attr); + nfs_post_op_update_inode(dir, &dir_attr); dprintk("NFS reply remove: %d\n", status); return status; } @@ -422,7 +422,7 @@ nfs3_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir, struct qstr ptr->arg.fh = NFS_FH(dir->d_inode); ptr->arg.name = name->name; ptr->arg.len = name->len; - ptr->res.valid = 0; + nfs_fattr_init(&ptr->res); msg->rpc_proc = &nfs3_procedures[NFS3PROC_REMOVE]; msg->rpc_argp = &ptr->arg; msg->rpc_resp = &ptr->res; @@ -439,7 +439,7 @@ nfs3_proc_unlink_done(struct dentry *dir, struct rpc_task *task) return 1; if (msg->rpc_argp) { dir_attr = (struct nfs_fattr*)msg->rpc_resp; - nfs_refresh_inode(dir->d_inode, dir_attr); + nfs_post_op_update_inode(dir->d_inode, dir_attr); kfree(msg->rpc_argp); } return 0; @@ -465,11 +465,11 @@ nfs3_proc_rename(struct inode *old_dir, struct qstr *old_name, int status; dprintk("NFS call rename %s -> %s\n", old_name->name, new_name->name); - old_dir_attr.valid = 0; - new_dir_attr.valid = 0; + nfs_fattr_init(&old_dir_attr); + nfs_fattr_init(&new_dir_attr); status = rpc_call(NFS_CLIENT(old_dir), NFS3PROC_RENAME, &arg, &res, 0); - nfs_refresh_inode(old_dir, &old_dir_attr); - nfs_refresh_inode(new_dir, &new_dir_attr); + nfs_post_op_update_inode(old_dir, &old_dir_attr); + nfs_post_op_update_inode(new_dir, &new_dir_attr); dprintk("NFS reply rename: %d\n", status); return status; } @@ -491,11 +491,11 @@ nfs3_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) int status; dprintk("NFS call link %s\n", name->name); - dir_attr.valid = 0; - fattr.valid = 0; + nfs_fattr_init(&dir_attr); + nfs_fattr_init(&fattr); status = rpc_call(NFS_CLIENT(inode), NFS3PROC_LINK, &arg, &res, 0); - nfs_refresh_inode(dir, &dir_attr); - nfs_refresh_inode(inode, &fattr); + nfs_post_op_update_inode(dir, &dir_attr); + nfs_post_op_update_inode(inode, &fattr); dprintk("NFS reply link: %d\n", status); return status; } @@ -524,10 +524,10 @@ nfs3_proc_symlink(struct inode *dir, struct qstr *name, struct qstr *path, if (path->len > NFS3_MAXPATHLEN) return -ENAMETOOLONG; dprintk("NFS call symlink %s -> %s\n", name->name, path->name); - dir_attr.valid = 0; - fattr->valid = 0; + nfs_fattr_init(&dir_attr); + nfs_fattr_init(fattr); status = rpc_call(NFS_CLIENT(dir), NFS3PROC_SYMLINK, &arg, &res, 0); - nfs_refresh_inode(dir, &dir_attr); + nfs_post_op_update_inode(dir, &dir_attr); dprintk("NFS reply symlink: %d\n", status); return status; } @@ -552,13 +552,13 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr) int status; dprintk("NFS call mkdir %s\n", dentry->d_name.name); - dir_attr.valid = 0; - fattr.valid = 0; sattr->ia_mode &= ~current->fs->umask; + nfs_fattr_init(&dir_attr); + nfs_fattr_init(&fattr); status = rpc_call(NFS_CLIENT(dir), NFS3PROC_MKDIR, &arg, &res, 0); - nfs_refresh_inode(dir, &dir_attr); + nfs_post_op_update_inode(dir, &dir_attr); if (status != 0) goto out; status = nfs_instantiate(dentry, &fhandle, &fattr); @@ -582,9 +582,9 @@ nfs3_proc_rmdir(struct inode *dir, struct qstr *name) int status; dprintk("NFS call rmdir %s\n", name->name); - dir_attr.valid = 0; + nfs_fattr_init(&dir_attr); status = rpc_call(NFS_CLIENT(dir), NFS3PROC_RMDIR, &arg, &dir_attr, 0); - nfs_refresh_inode(dir, &dir_attr); + nfs_post_op_update_inode(dir, &dir_attr); dprintk("NFS reply rmdir: %d\n", status); return status; } @@ -634,7 +634,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, dprintk("NFS call readdir%s %d\n", plus? "plus" : "", (unsigned int) cookie); - dir_attr.valid = 0; + nfs_fattr_init(&dir_attr); status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_refresh_inode(dir, &dir_attr); dprintk("NFS reply readdir: %d\n", status); @@ -676,10 +676,10 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr, sattr->ia_mode &= ~current->fs->umask; - dir_attr.valid = 0; - fattr.valid = 0; + nfs_fattr_init(&dir_attr); + nfs_fattr_init(&fattr); status = rpc_call(NFS_CLIENT(dir), NFS3PROC_MKNOD, &arg, &res, 0); - nfs_refresh_inode(dir, &dir_attr); + nfs_post_op_update_inode(dir, &dir_attr); if (status != 0) goto out; status = nfs_instantiate(dentry, &fh, &fattr); @@ -698,7 +698,7 @@ nfs3_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, int status; dprintk("NFS call fsstat\n"); - stat->fattr->valid = 0; + nfs_fattr_init(stat->fattr); status = rpc_call(server->client, NFS3PROC_FSSTAT, fhandle, stat, 0); dprintk("NFS reply statfs: %d\n", status); return status; @@ -711,7 +711,7 @@ nfs3_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, int status; dprintk("NFS call fsinfo\n"); - info->fattr->valid = 0; + nfs_fattr_init(info->fattr); status = rpc_call(server->client_sys, NFS3PROC_FSINFO, fhandle, info, 0); dprintk("NFS reply fsinfo: %d\n", status); return status; @@ -724,7 +724,7 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, int status; dprintk("NFS call pathconf\n"); - info->fattr->valid = 0; + nfs_fattr_init(info->fattr); status = rpc_call(server->client, NFS3PROC_PATHCONF, fhandle, info, 0); dprintk("NFS reply pathconf: %d\n", status); return status; @@ -735,7 +735,7 @@ extern u32 *nfs3_decode_dirent(u32 *, struct nfs_entry *, int); static void nfs3_read_done(struct rpc_task *task) { - struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata; + struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata; if (nfs3_async_handle_jukebox(task)) return; @@ -775,7 +775,7 @@ nfs3_write_done(struct rpc_task *task) return; data = (struct nfs_write_data *)task->tk_calldata; if (task->tk_status >= 0) - nfs_refresh_inode(data->inode, data->res.fattr); + nfs_post_op_update_inode(data->inode, data->res.fattr); nfs_writeback_done(task); } @@ -819,7 +819,7 @@ nfs3_commit_done(struct rpc_task *task) return; data = (struct nfs_write_data *)task->tk_calldata; if (task->tk_status >= 0) - nfs_refresh_inode(data->inode, data->res.fattr); + nfs_post_op_update_inode(data->inode, data->res.fattr); nfs_commit_done(task); } diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index db4a904810a4..0498bd36602c 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -174,7 +174,6 @@ xdr_decode_fattr(u32 *p, struct nfs_fattr *fattr) /* Update the mode bits */ fattr->valid |= (NFS_ATTR_FATTR | NFS_ATTR_FATTR_V3); - fattr->timestamp = jiffies; return p; } diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index ec1a22d7b876..78a53f5a9f18 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -92,26 +92,51 @@ struct nfs4_client { unsigned char cl_id_uniquifier; }; +/* + * struct rpc_sequence ensures that RPC calls are sent in the exact + * order that they appear on the list. + */ +struct rpc_sequence { + struct rpc_wait_queue wait; /* RPC call delay queue */ + spinlock_t lock; /* Protects the list */ + struct list_head list; /* Defines sequence of RPC calls */ +}; + +#define NFS_SEQID_CONFIRMED 1 +struct nfs_seqid_counter { + struct rpc_sequence *sequence; + int flags; + u32 counter; +}; + +struct nfs_seqid { + struct nfs_seqid_counter *sequence; + struct list_head list; +}; + +static inline void nfs_confirm_seqid(struct nfs_seqid_counter *seqid, int status) +{ + if (seqid_mutating_err(-status)) + seqid->flags |= NFS_SEQID_CONFIRMED; +} + /* * NFS4 state_owners and lock_owners are simply labels for ordered * sequences of RPC calls. Their sole purpose is to provide once-only * semantics by allowing the server to identify replayed requests. - * - * The ->so_sema is held during all state_owner seqid-mutating operations: - * OPEN, OPEN_DOWNGRADE, and CLOSE. Its purpose is to properly serialize - * so_seqid. */ struct nfs4_state_owner { + spinlock_t so_lock; struct list_head so_list; /* per-clientid list of state_owners */ struct nfs4_client *so_client; u32 so_id; /* 32-bit identifier, unique */ - struct semaphore so_sema; - u32 so_seqid; /* protected by so_sema */ atomic_t so_count; struct rpc_cred *so_cred; /* Associated cred */ struct list_head so_states; struct list_head so_delegations; + struct nfs_seqid_counter so_seqid; + struct rpc_sequence so_sequence; }; /* @@ -132,7 +157,7 @@ struct nfs4_lock_state { fl_owner_t ls_owner; /* POSIX lock owner */ #define NFS_LOCK_INITIALIZED 1 int ls_flags; - u32 ls_seqid; + struct nfs_seqid_counter ls_seqid; u32 ls_id; nfs4_stateid ls_stateid; atomic_t ls_count; @@ -153,7 +178,6 @@ struct nfs4_state { struct inode *inode; /* Pointer to the inode */ unsigned long flags; /* Do we hold any locks? */ - struct semaphore lock_sema; /* Serializes file locking operations */ spinlock_t state_lock; /* Protects the lock_states list */ nfs4_stateid stateid; @@ -191,8 +215,8 @@ extern int nfs4_proc_setclientid_confirm(struct nfs4_client *); extern int nfs4_proc_async_renew(struct nfs4_client *); extern int nfs4_proc_renew(struct nfs4_client *); extern int nfs4_do_close(struct inode *inode, struct nfs4_state *state, mode_t mode); -extern struct inode *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *); -extern int nfs4_open_revalidate(struct inode *, struct dentry *, int); +extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *); +extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *); extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops; extern struct nfs4_state_recovery_ops nfs4_network_partition_recovery_ops; @@ -224,12 +248,17 @@ extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state extern void nfs4_put_open_state(struct nfs4_state *); extern void nfs4_close_state(struct nfs4_state *, mode_t); extern struct nfs4_state *nfs4_find_state(struct inode *, struct rpc_cred *, mode_t mode); -extern void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp); extern void nfs4_schedule_state_recovery(struct nfs4_client *); +extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); -extern void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *ls); extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t); +extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter); +extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task); +extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid); +extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid); +extern void nfs_free_seqid(struct nfs_seqid *seqid); + extern const nfs4_stateid zero_stateid; /* nfs4xdr.c */ diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 9701ca8c9428..933e13b383f8 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -47,6 +47,7 @@ #include #include #include +#include #include "nfs4_fs.h" #include "delegation.h" @@ -56,10 +57,11 @@ #define NFS4_POLL_RETRY_MIN (1*HZ) #define NFS4_POLL_RETRY_MAX (15*HZ) +static int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid, struct nfs_seqid *seqid); static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); -static int nfs4_async_handle_error(struct rpc_task *, struct nfs_server *); +static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *); static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry); -static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception); +static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception); extern u32 *nfs4_decode_dirent(u32 *p, struct nfs_entry *entry, int plus); extern struct rpc_procinfo nfs4_procedures[]; @@ -185,8 +187,26 @@ static void update_changeattr(struct inode *inode, struct nfs4_change_info *cinf { struct nfs_inode *nfsi = NFS_I(inode); + spin_lock(&inode->i_lock); + nfsi->cache_validity |= NFS_INO_INVALID_ATTR; if (cinfo->before == nfsi->change_attr && cinfo->atomic) nfsi->change_attr = cinfo->after; + spin_unlock(&inode->i_lock); +} + +/* Helper for asynchronous RPC calls */ +static int nfs4_call_async(struct rpc_clnt *clnt, rpc_action tk_begin, + rpc_action tk_exit, void *calldata) +{ + struct rpc_task *task; + + if (!(task = rpc_new_task(clnt, tk_exit, RPC_TASK_ASYNC))) + return -ENOMEM; + + task->tk_calldata = calldata; + task->tk_action = tk_begin; + rpc_execute(task); + return 0; } static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, int open_flags) @@ -195,6 +215,7 @@ static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, open_flags &= (FMODE_READ|FMODE_WRITE); /* Protect against nfs4_find_state() */ + spin_lock(&state->owner->so_lock); spin_lock(&inode->i_lock); state->state |= open_flags; /* NB! List reordering - see the reclaim code for why. */ @@ -204,12 +225,12 @@ static void update_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, state->nreaders++; memcpy(&state->stateid, stateid, sizeof(state->stateid)); spin_unlock(&inode->i_lock); + spin_unlock(&state->owner->so_lock); } /* * OPEN_RECLAIM: * reclaim state on the server after a reboot. - * Assumes caller is holding the sp->so_sem */ static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state) { @@ -218,7 +239,6 @@ static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *st struct nfs_delegation *delegation = NFS_I(inode)->delegation; struct nfs_openargs o_arg = { .fh = NFS_FH(inode), - .seqid = sp->so_seqid, .id = sp->so_id, .open_flags = state->state, .clientid = server->nfs4_state->cl_clientid, @@ -245,8 +265,13 @@ static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *st } o_arg.u.delegation_type = delegation->type; } + o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid); + if (o_arg.seqid == NULL) + return -ENOMEM; status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR); - nfs4_increment_seqid(status, sp); + /* Confirm the sequence as being established */ + nfs_confirm_seqid(&sp->so_seqid, status); + nfs_increment_open_seqid(status, o_arg.seqid); if (status == 0) { memcpy(&state->stateid, &o_res.stateid, sizeof(state->stateid)); if (o_res.delegation_type != 0) { @@ -256,6 +281,7 @@ static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *st nfs_async_inode_return_delegation(inode, &o_res.stateid); } } + nfs_free_seqid(o_arg.seqid); clear_bit(NFS_DELEGATED_STATE, &state->flags); /* Ensure we update the inode attributes */ NFS_CACHEINV(inode); @@ -302,23 +328,35 @@ static int _nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state }; int status = 0; - down(&sp->so_sema); if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) goto out; if (state->state == 0) goto out; - arg.seqid = sp->so_seqid; + arg.seqid = nfs_alloc_seqid(&sp->so_seqid); + status = -ENOMEM; + if (arg.seqid == NULL) + goto out; arg.open_flags = state->state; memcpy(arg.u.delegation.data, state->stateid.data, sizeof(arg.u.delegation.data)); status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR); - nfs4_increment_seqid(status, sp); + nfs_increment_open_seqid(status, arg.seqid); + if (status != 0) + goto out_free; + if(res.rflags & NFS4_OPEN_RESULT_CONFIRM) { + status = _nfs4_proc_open_confirm(server->client, NFS_FH(inode), + sp, &res.stateid, arg.seqid); + if (status != 0) + goto out_free; + } + nfs_confirm_seqid(&sp->so_seqid, 0); if (status >= 0) { memcpy(state->stateid.data, res.stateid.data, sizeof(state->stateid.data)); clear_bit(NFS_DELEGATED_STATE, &state->flags); } +out_free: + nfs_free_seqid(arg.seqid); out: - up(&sp->so_sema); dput(parent); return status; } @@ -345,11 +383,11 @@ int nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state) return err; } -static inline int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid) +static int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid, struct nfs_seqid *seqid) { struct nfs_open_confirmargs arg = { .fh = fh, - .seqid = sp->so_seqid, + .seqid = seqid, .stateid = *stateid, }; struct nfs_open_confirmres res; @@ -362,7 +400,9 @@ static inline int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nf int status; status = rpc_call_sync(clnt, &msg, RPC_TASK_NOINTR); - nfs4_increment_seqid(status, sp); + /* Confirm the sequence as being established */ + nfs_confirm_seqid(&sp->so_seqid, status); + nfs_increment_open_seqid(status, seqid); if (status >= 0) memcpy(stateid, &res.stateid, sizeof(*stateid)); return status; @@ -380,21 +420,41 @@ static int _nfs4_proc_open(struct inode *dir, struct nfs4_state_owner *sp, stru int status; /* Update sequence id. The caller must serialize! */ - o_arg->seqid = sp->so_seqid; o_arg->id = sp->so_id; o_arg->clientid = sp->so_client->cl_clientid; status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR); - nfs4_increment_seqid(status, sp); + if (status == 0) { + /* OPEN on anything except a regular file is disallowed in NFSv4 */ + switch (o_res->f_attr->mode & S_IFMT) { + case S_IFREG: + break; + case S_IFLNK: + status = -ELOOP; + break; + case S_IFDIR: + status = -EISDIR; + break; + default: + status = -ENOTDIR; + } + } + + nfs_increment_open_seqid(status, o_arg->seqid); if (status != 0) goto out; - update_changeattr(dir, &o_res->cinfo); + if (o_arg->open_flags & O_CREAT) { + update_changeattr(dir, &o_res->cinfo); + nfs_post_op_update_inode(dir, o_res->dir_attr); + } else + nfs_refresh_inode(dir, o_res->dir_attr); if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { status = _nfs4_proc_open_confirm(server->client, &o_res->fh, - sp, &o_res->stateid); + sp, &o_res->stateid, o_arg->seqid); if (status != 0) goto out; } + nfs_confirm_seqid(&sp->so_seqid, 0); if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) status = server->rpc_ops->getattr(server, &o_res->fh, o_res->f_attr); out: @@ -441,9 +501,7 @@ static int _nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st struct inode *inode = state->inode; struct nfs_server *server = NFS_SERVER(dir); struct nfs_delegation *delegation = NFS_I(inode)->delegation; - struct nfs_fattr f_attr = { - .valid = 0, - }; + struct nfs_fattr f_attr, dir_attr; struct nfs_openargs o_arg = { .fh = NFS_FH(dir), .open_flags = state->state, @@ -453,6 +511,7 @@ static int _nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st }; struct nfs_openres o_res = { .f_attr = &f_attr, + .dir_attr = &dir_attr, .server = server, }; int status = 0; @@ -465,6 +524,12 @@ static int _nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st set_bit(NFS_DELEGATED_STATE, &state->flags); goto out; } + o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid); + status = -ENOMEM; + if (o_arg.seqid == NULL) + goto out; + nfs_fattr_init(&f_attr); + nfs_fattr_init(&dir_attr); status = _nfs4_proc_open(dir, sp, &o_arg, &o_res); if (status != 0) goto out_nodeleg; @@ -490,6 +555,7 @@ static int _nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st nfs_inode_reclaim_delegation(inode, sp->so_cred, &o_res); } out_nodeleg: + nfs_free_seqid(o_arg.seqid); clear_bit(NFS_DELEGATED_STATE, &state->flags); out: dput(parent); @@ -564,7 +630,6 @@ static int _nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred dprintk("%s: nfs4_get_state_owner failed!\n", __FUNCTION__); goto out_err; } - down(&sp->so_sema); state = nfs4_get_open_state(inode, sp); if (state == NULL) goto out_err; @@ -589,7 +654,6 @@ static int _nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred set_bit(NFS_DELEGATED_STATE, &state->flags); update_open_stateid(state, &delegation->stateid, open_flags); out_ok: - up(&sp->so_sema); nfs4_put_state_owner(sp); up_read(&nfsi->rwsem); up_read(&clp->cl_sem); @@ -600,11 +664,12 @@ out_err: if (sp != NULL) { if (state != NULL) nfs4_put_open_state(state); - up(&sp->so_sema); nfs4_put_state_owner(sp); } up_read(&nfsi->rwsem); up_read(&clp->cl_sem); + if (err != -EACCES) + nfs_inode_return_delegation(inode); return err; } @@ -635,9 +700,7 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, st struct nfs4_client *clp = server->nfs4_state; struct inode *inode = NULL; int status; - struct nfs_fattr f_attr = { - .valid = 0, - }; + struct nfs_fattr f_attr, dir_attr; struct nfs_openargs o_arg = { .fh = NFS_FH(dir), .open_flags = flags, @@ -648,6 +711,7 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, st }; struct nfs_openres o_res = { .f_attr = &f_attr, + .dir_attr = &dir_attr, .server = server, }; @@ -665,8 +729,12 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, st } else o_arg.u.attrs = sattr; /* Serialization for the sequence id */ - down(&sp->so_sema); + o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid); + if (o_arg.seqid == NULL) + return -ENOMEM; + nfs_fattr_init(&f_attr); + nfs_fattr_init(&dir_attr); status = _nfs4_proc_open(dir, sp, &o_arg, &o_res); if (status != 0) goto out_err; @@ -681,7 +749,7 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, int flags, st update_open_stateid(state, &o_res.stateid, flags); if (o_res.delegation_type != 0) nfs_inode_set_delegation(inode, cred, &o_res); - up(&sp->so_sema); + nfs_free_seqid(o_arg.seqid); nfs4_put_state_owner(sp); up_read(&clp->cl_sem); *res = state; @@ -690,7 +758,7 @@ out_err: if (sp != NULL) { if (state != NULL) nfs4_put_open_state(state); - up(&sp->so_sema); + nfs_free_seqid(o_arg.seqid); nfs4_put_state_owner(sp); } /* Note: clp->cl_sem must be released before nfs4_put_open_state()! */ @@ -718,7 +786,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, * It is actually a sign of a bug on the client or on the server. * * If we receive a BAD_SEQID error in the particular case of - * doing an OPEN, we assume that nfs4_increment_seqid() will + * doing an OPEN, we assume that nfs_increment_open_seqid() will * have unhashed the old state_owner for us, and that we can * therefore safely retry using a new one. We should still warn * the user though... @@ -728,6 +796,16 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, exception.retry = 1; continue; } + /* + * BAD_STATEID on OPEN means that the server cancelled our + * state before it received the OPEN_CONFIRM. + * Recover by retrying the request as per the discussion + * on Page 181 of RFC3530. + */ + if (status == -NFS4ERR_BAD_STATEID) { + exception.retry = 1; + continue; + } res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir), status, &exception)); } while (exception.retry); @@ -755,7 +833,7 @@ static int _nfs4_do_setattr(struct nfs_server *server, struct nfs_fattr *fattr, }; int status; - fattr->valid = 0; + nfs_fattr_init(fattr); if (state != NULL) { msg.rpc_cred = state->owner->so_cred; @@ -787,19 +865,30 @@ struct nfs4_closedata { struct nfs4_state *state; struct nfs_closeargs arg; struct nfs_closeres res; + struct nfs_fattr fattr; }; +static void nfs4_free_closedata(struct nfs4_closedata *calldata) +{ + struct nfs4_state *state = calldata->state; + struct nfs4_state_owner *sp = state->owner; + + nfs4_put_open_state(calldata->state); + nfs_free_seqid(calldata->arg.seqid); + nfs4_put_state_owner(sp); + kfree(calldata); +} + static void nfs4_close_done(struct rpc_task *task) { struct nfs4_closedata *calldata = (struct nfs4_closedata *)task->tk_calldata; struct nfs4_state *state = calldata->state; - struct nfs4_state_owner *sp = state->owner; struct nfs_server *server = NFS_SERVER(calldata->inode); /* hmm. we are done with the inode, and in the process of freeing * the state_owner. we keep this around to process errors */ - nfs4_increment_seqid(task->tk_status, sp); + nfs_increment_open_seqid(task->tk_status, calldata->arg.seqid); switch (task->tk_status) { case 0: memcpy(&state->stateid, &calldata->res.stateid, @@ -816,25 +905,49 @@ static void nfs4_close_done(struct rpc_task *task) return; } } + nfs_refresh_inode(calldata->inode, calldata->res.fattr); state->state = calldata->arg.open_flags; - nfs4_put_open_state(state); - up(&sp->so_sema); - nfs4_put_state_owner(sp); - up_read(&server->nfs4_state->cl_sem); - kfree(calldata); + nfs4_free_closedata(calldata); } -static inline int nfs4_close_call(struct rpc_clnt *clnt, struct nfs4_closedata *calldata) +static void nfs4_close_begin(struct rpc_task *task) { + struct nfs4_closedata *calldata = (struct nfs4_closedata *)task->tk_calldata; + struct nfs4_state *state = calldata->state; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE], .rpc_argp = &calldata->arg, .rpc_resp = &calldata->res, - .rpc_cred = calldata->state->owner->so_cred, + .rpc_cred = state->owner->so_cred, }; - if (calldata->arg.open_flags != 0) + int mode = 0; + int status; + + status = nfs_wait_on_sequence(calldata->arg.seqid, task); + if (status != 0) + return; + /* Don't reorder reads */ + smp_rmb(); + /* Recalculate the new open mode in case someone reopened the file + * while we were waiting in line to be scheduled. + */ + if (state->nreaders != 0) + mode |= FMODE_READ; + if (state->nwriters != 0) + mode |= FMODE_WRITE; + if (test_bit(NFS_DELEGATED_STATE, &state->flags)) + state->state = mode; + if (mode == state->state) { + nfs4_free_closedata(calldata); + task->tk_exit = NULL; + rpc_exit(task, 0); + return; + } + nfs_fattr_init(calldata->res.fattr); + if (mode != 0) msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; - return rpc_call_async(clnt, &msg, 0, nfs4_close_done, calldata); + calldata->arg.open_flags = mode; + rpc_call_setup(task, &msg, 0); } /* @@ -850,40 +963,57 @@ static inline int nfs4_close_call(struct rpc_clnt *clnt, struct nfs4_closedata * */ int nfs4_do_close(struct inode *inode, struct nfs4_state *state, mode_t mode) { + struct nfs_server *server = NFS_SERVER(inode); struct nfs4_closedata *calldata; - int status; + int status = -ENOMEM; - /* Tell caller we're done */ - if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { - state->state = mode; - return 0; - } - calldata = (struct nfs4_closedata *)kmalloc(sizeof(*calldata), GFP_KERNEL); + calldata = kmalloc(sizeof(*calldata), GFP_KERNEL); if (calldata == NULL) - return -ENOMEM; + goto out; calldata->inode = inode; calldata->state = state; calldata->arg.fh = NFS_FH(inode); + calldata->arg.stateid = &state->stateid; /* Serialization for the sequence id */ - calldata->arg.seqid = state->owner->so_seqid; - calldata->arg.open_flags = mode; - memcpy(&calldata->arg.stateid, &state->stateid, - sizeof(calldata->arg.stateid)); - status = nfs4_close_call(NFS_SERVER(inode)->client, calldata); - /* - * Return -EINPROGRESS on success in order to indicate to the - * caller that an asynchronous RPC call has been launched, and - * that it will release the semaphores on completion. - */ - return (status == 0) ? -EINPROGRESS : status; + calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid); + if (calldata->arg.seqid == NULL) + goto out_free_calldata; + calldata->arg.bitmask = server->attr_bitmask; + calldata->res.fattr = &calldata->fattr; + calldata->res.server = server; + + status = nfs4_call_async(server->client, nfs4_close_begin, + nfs4_close_done, calldata); + if (status == 0) + goto out; + + nfs_free_seqid(calldata->arg.seqid); +out_free_calldata: + kfree(calldata); +out: + return status; } -struct inode * +static void nfs4_intent_set_file(struct nameidata *nd, struct dentry *dentry, struct nfs4_state *state) +{ + struct file *filp; + + filp = lookup_instantiate_filp(nd, dentry, NULL); + if (!IS_ERR(filp)) { + struct nfs_open_context *ctx; + ctx = (struct nfs_open_context *)filp->private_data; + ctx->state = state; + } else + nfs4_close_state(state, nd->intent.open.flags); +} + +struct dentry * nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct iattr attr; struct rpc_cred *cred; struct nfs4_state *state; + struct dentry *res; if (nd->flags & LOOKUP_CREATE) { attr.ia_mode = nd->intent.open.create_mode; @@ -897,16 +1027,23 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) cred = rpcauth_lookupcred(NFS_SERVER(dir)->client->cl_auth, 0); if (IS_ERR(cred)) - return (struct inode *)cred; + return (struct dentry *)cred; state = nfs4_do_open(dir, dentry, nd->intent.open.flags, &attr, cred); put_rpccred(cred); - if (IS_ERR(state)) - return (struct inode *)state; - return state->inode; + if (IS_ERR(state)) { + if (PTR_ERR(state) == -ENOENT) + d_add(dentry, NULL); + return (struct dentry *)state; + } + res = d_add_unique(dentry, state->inode); + if (res != NULL) + dentry = res; + nfs4_intent_set_file(nd, dentry, state); + return res; } int -nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags) +nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, struct nameidata *nd) { struct rpc_cred *cred; struct nfs4_state *state; @@ -919,18 +1056,30 @@ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags) if (IS_ERR(state)) state = nfs4_do_open(dir, dentry, openflags, NULL, cred); put_rpccred(cred); - if (state == ERR_PTR(-ENOENT) && dentry->d_inode == 0) - return 1; - if (IS_ERR(state)) - return 0; + if (IS_ERR(state)) { + switch (PTR_ERR(state)) { + case -EPERM: + case -EACCES: + case -EDQUOT: + case -ENOSPC: + case -EROFS: + lookup_instantiate_filp(nd, (struct dentry *)state, NULL); + return 1; + case -ENOENT: + if (dentry->d_inode == NULL) + return 1; + } + goto out_drop; + } inode = state->inode; + iput(inode); if (inode == dentry->d_inode) { - iput(inode); + nfs4_intent_set_file(nd, dentry, state); return 1; } - d_drop(dentry); nfs4_close_state(state, openflags); - iput(inode); +out_drop: + d_drop(dentry); return 0; } @@ -974,13 +1123,12 @@ static int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fh static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *info) { - struct nfs_fattr * fattr = info->fattr; struct nfs4_lookup_root_arg args = { .bitmask = nfs4_fattr_bitmap, }; struct nfs4_lookup_res res = { .server = server, - .fattr = fattr, + .fattr = info->fattr, .fh = fhandle, }; struct rpc_message msg = { @@ -988,7 +1136,7 @@ static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, .rpc_argp = &args, .rpc_resp = &res, }; - fattr->valid = 0; + nfs_fattr_init(info->fattr); return rpc_call_sync(server->client, &msg, 0); } @@ -1051,7 +1199,7 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, q.len = p - q.name; do { - fattr->valid = 0; + nfs_fattr_init(fattr); status = nfs4_handle_exception(server, rpc_call_sync(server->client, &msg, 0), &exception); @@ -1088,7 +1236,7 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, .rpc_resp = &res, }; - fattr->valid = 0; + nfs_fattr_init(fattr); return rpc_call_sync(server->client, &msg, 0); } @@ -1130,7 +1278,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, struct nfs4_state *state; int status; - fattr->valid = 0; + nfs_fattr_init(fattr); cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0); if (IS_ERR(cred)) @@ -1176,7 +1324,7 @@ static int _nfs4_proc_lookup(struct inode *dir, struct qstr *name, .rpc_resp = &res, }; - fattr->valid = 0; + nfs_fattr_init(fattr); dprintk("NFS call lookup %s\n", name->name); status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); @@ -1325,7 +1473,7 @@ static int _nfs4_proc_read(struct nfs_read_data *rdata) dprintk("NFS call read %d @ %Ld\n", rdata->args.count, (long long) rdata->args.offset); - fattr->valid = 0; + nfs_fattr_init(fattr); status = rpc_call_sync(server->client, &msg, flags); if (!status) renew_lease(server, timestamp); @@ -1362,7 +1510,7 @@ static int _nfs4_proc_write(struct nfs_write_data *wdata) dprintk("NFS call write %d @ %Ld\n", wdata->args.count, (long long) wdata->args.offset); - fattr->valid = 0; + nfs_fattr_init(fattr); status = rpc_call_sync(server->client, &msg, rpcflags); dprintk("NFS reply write: %d\n", status); return status; @@ -1396,7 +1544,7 @@ static int _nfs4_proc_commit(struct nfs_write_data *cdata) dprintk("NFS call commit %d @ %Ld\n", cdata->args.count, (long long) cdata->args.offset); - fattr->valid = 0; + nfs_fattr_init(fattr); status = rpc_call_sync(server->client, &msg, 0); dprintk("NFS reply commit: %d\n", status); return status; @@ -1431,7 +1579,7 @@ static int nfs4_proc_commit(struct nfs_write_data *cdata) static int nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, - int flags) + int flags, struct nameidata *nd) { struct nfs4_state *state; struct rpc_cred *cred; @@ -1453,24 +1601,30 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, struct nfs_fattr fattr; status = nfs4_do_setattr(NFS_SERVER(dir), &fattr, NFS_FH(state->inode), sattr, state); - if (status == 0) { + if (status == 0) nfs_setattr_update_inode(state->inode, sattr); - goto out; - } - } else if (flags != 0) - goto out; - nfs4_close_state(state, flags); + } + if (status == 0 && nd != NULL && (nd->flags & LOOKUP_OPEN)) + nfs4_intent_set_file(nd, dentry, state); + else + nfs4_close_state(state, flags); out: return status; } static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) { + struct nfs_server *server = NFS_SERVER(dir); struct nfs4_remove_arg args = { .fh = NFS_FH(dir), .name = name, + .bitmask = server->attr_bitmask, + }; + struct nfs_fattr dir_attr; + struct nfs4_remove_res res = { + .server = server, + .dir_attr = &dir_attr, }; - struct nfs4_change_info res; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE], .rpc_argp = &args, @@ -1478,9 +1632,12 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name) }; int status; - status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); - if (status == 0) - update_changeattr(dir, &res); + nfs_fattr_init(res.dir_attr); + status = rpc_call_sync(server->client, &msg, 0); + if (status == 0) { + update_changeattr(dir, &res.cinfo); + nfs_post_op_update_inode(dir, res.dir_attr); + } return status; } @@ -1498,12 +1655,14 @@ static int nfs4_proc_remove(struct inode *dir, struct qstr *name) struct unlink_desc { struct nfs4_remove_arg args; - struct nfs4_change_info res; + struct nfs4_remove_res res; + struct nfs_fattr dir_attr; }; static int nfs4_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir, struct qstr *name) { + struct nfs_server *server = NFS_SERVER(dir->d_inode); struct unlink_desc *up; up = (struct unlink_desc *) kmalloc(sizeof(*up), GFP_KERNEL); @@ -1512,6 +1671,9 @@ static int nfs4_proc_unlink_setup(struct rpc_message *msg, struct dentry *dir, up->args.fh = NFS_FH(dir->d_inode); up->args.name = name; + up->args.bitmask = server->attr_bitmask; + up->res.server = server; + up->res.dir_attr = &up->dir_attr; msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; msg->rpc_argp = &up->args; @@ -1526,7 +1688,8 @@ static int nfs4_proc_unlink_done(struct dentry *dir, struct rpc_task *task) if (msg->rpc_resp != NULL) { up = container_of(msg->rpc_resp, struct unlink_desc, res); - update_changeattr(dir->d_inode, &up->res); + update_changeattr(dir->d_inode, &up->res.cinfo); + nfs_post_op_update_inode(dir->d_inode, up->res.dir_attr); kfree(up); msg->rpc_resp = NULL; msg->rpc_argp = NULL; @@ -1537,13 +1700,20 @@ static int nfs4_proc_unlink_done(struct dentry *dir, struct rpc_task *task) static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, struct inode *new_dir, struct qstr *new_name) { + struct nfs_server *server = NFS_SERVER(old_dir); struct nfs4_rename_arg arg = { .old_dir = NFS_FH(old_dir), .new_dir = NFS_FH(new_dir), .old_name = old_name, .new_name = new_name, + .bitmask = server->attr_bitmask, + }; + struct nfs_fattr old_fattr, new_fattr; + struct nfs4_rename_res res = { + .server = server, + .old_fattr = &old_fattr, + .new_fattr = &new_fattr, }; - struct nfs4_rename_res res = { }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME], .rpc_argp = &arg, @@ -1551,11 +1721,15 @@ static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, }; int status; - status = rpc_call_sync(NFS_CLIENT(old_dir), &msg, 0); + nfs_fattr_init(res.old_fattr); + nfs_fattr_init(res.new_fattr); + status = rpc_call_sync(server->client, &msg, 0); if (!status) { update_changeattr(old_dir, &res.old_cinfo); + nfs_post_op_update_inode(old_dir, res.old_fattr); update_changeattr(new_dir, &res.new_cinfo); + nfs_post_op_update_inode(new_dir, res.new_fattr); } return status; } @@ -1576,22 +1750,34 @@ static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) { + struct nfs_server *server = NFS_SERVER(inode); struct nfs4_link_arg arg = { .fh = NFS_FH(inode), .dir_fh = NFS_FH(dir), .name = name, + .bitmask = server->attr_bitmask, + }; + struct nfs_fattr fattr, dir_attr; + struct nfs4_link_res res = { + .server = server, + .fattr = &fattr, + .dir_attr = &dir_attr, }; - struct nfs4_change_info cinfo = { }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK], .rpc_argp = &arg, - .rpc_resp = &cinfo, + .rpc_resp = &res, }; int status; - status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); - if (!status) - update_changeattr(dir, &cinfo); + nfs_fattr_init(res.fattr); + nfs_fattr_init(res.dir_attr); + status = rpc_call_sync(server->client, &msg, 0); + if (!status) { + update_changeattr(dir, &res.cinfo); + nfs_post_op_update_inode(dir, res.dir_attr); + nfs_refresh_inode(inode, res.fattr); + } return status; } @@ -1613,6 +1799,7 @@ static int _nfs4_proc_symlink(struct inode *dir, struct qstr *name, struct nfs_fattr *fattr) { struct nfs_server *server = NFS_SERVER(dir); + struct nfs_fattr dir_fattr; struct nfs4_create_arg arg = { .dir_fh = NFS_FH(dir), .server = server, @@ -1625,6 +1812,7 @@ static int _nfs4_proc_symlink(struct inode *dir, struct qstr *name, .server = server, .fh = fhandle, .fattr = fattr, + .dir_fattr = &dir_fattr, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK], @@ -1636,11 +1824,13 @@ static int _nfs4_proc_symlink(struct inode *dir, struct qstr *name, if (path->len > NFS4_MAXPATHLEN) return -ENAMETOOLONG; arg.u.symlink = path; - fattr->valid = 0; + nfs_fattr_init(fattr); + nfs_fattr_init(&dir_fattr); status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); if (!status) update_changeattr(dir, &res.dir_cinfo); + nfs_post_op_update_inode(dir, res.dir_fattr); return status; } @@ -1664,7 +1854,7 @@ static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, { struct nfs_server *server = NFS_SERVER(dir); struct nfs_fh fhandle; - struct nfs_fattr fattr; + struct nfs_fattr fattr, dir_fattr; struct nfs4_create_arg arg = { .dir_fh = NFS_FH(dir), .server = server, @@ -1677,6 +1867,7 @@ static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, .server = server, .fh = &fhandle, .fattr = &fattr, + .dir_fattr = &dir_fattr, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE], @@ -1685,11 +1876,13 @@ static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, }; int status; - fattr.valid = 0; + nfs_fattr_init(&fattr); + nfs_fattr_init(&dir_fattr); status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); if (!status) { update_changeattr(dir, &res.dir_cinfo); + nfs_post_op_update_inode(dir, res.dir_fattr); status = nfs_instantiate(dentry, &fhandle, &fattr); } return status; @@ -1762,7 +1955,7 @@ static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, { struct nfs_server *server = NFS_SERVER(dir); struct nfs_fh fh; - struct nfs_fattr fattr; + struct nfs_fattr fattr, dir_fattr; struct nfs4_create_arg arg = { .dir_fh = NFS_FH(dir), .server = server, @@ -1774,6 +1967,7 @@ static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, .server = server, .fh = &fh, .fattr = &fattr, + .dir_fattr = &dir_fattr, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE], @@ -1783,7 +1977,8 @@ static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, int status; int mode = sattr->ia_mode; - fattr.valid = 0; + nfs_fattr_init(&fattr); + nfs_fattr_init(&dir_fattr); BUG_ON(!(sattr->ia_valid & ATTR_MODE)); BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode)); @@ -1805,6 +2000,7 @@ static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); if (status == 0) { update_changeattr(dir, &res.dir_cinfo); + nfs_post_op_update_inode(dir, res.dir_fattr); status = nfs_instantiate(dentry, &fh, &fattr); } return status; @@ -1836,7 +2032,7 @@ static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, .rpc_resp = fsstat, }; - fsstat->fattr->valid = 0; + nfs_fattr_init(fsstat->fattr); return rpc_call_sync(server->client, &msg, 0); } @@ -1883,7 +2079,7 @@ static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, str static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) { - fsinfo->fattr->valid = 0; + nfs_fattr_init(fsinfo->fattr); return nfs4_do_fsinfo(server, fhandle, fsinfo); } @@ -1906,7 +2102,7 @@ static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle return 0; } - pathconf->fattr->valid = 0; + nfs_fattr_init(pathconf->fattr); return rpc_call_sync(server->client, &msg, 0); } @@ -1973,8 +2169,10 @@ nfs4_write_done(struct rpc_task *task) rpc_restart_call(task); return; } - if (task->tk_status >= 0) + if (task->tk_status >= 0) { renew_lease(NFS_SERVER(inode), data->timestamp); + nfs_post_op_update_inode(inode, data->res.fattr); + } /* Call back common NFS writeback processing */ nfs_writeback_done(task); } @@ -1990,6 +2188,7 @@ nfs4_proc_write_setup(struct nfs_write_data *data, int how) .rpc_cred = data->cred, }; struct inode *inode = data->inode; + struct nfs_server *server = NFS_SERVER(inode); int stable; int flags; @@ -2001,6 +2200,8 @@ nfs4_proc_write_setup(struct nfs_write_data *data, int how) } else stable = NFS_UNSTABLE; data->args.stable = stable; + data->args.bitmask = server->attr_bitmask; + data->res.server = server; data->timestamp = jiffies; @@ -2022,6 +2223,8 @@ nfs4_commit_done(struct rpc_task *task) rpc_restart_call(task); return; } + if (task->tk_status >= 0) + nfs_post_op_update_inode(inode, data->res.fattr); /* Call back common NFS writeback processing */ nfs_commit_done(task); } @@ -2037,8 +2240,12 @@ nfs4_proc_commit_setup(struct nfs_write_data *data, int how) .rpc_cred = data->cred, }; struct inode *inode = data->inode; + struct nfs_server *server = NFS_SERVER(inode); int flags; + data->args.bitmask = server->attr_bitmask; + data->res.server = server; + /* Set the initial flags for the task. */ flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; @@ -2106,65 +2313,6 @@ nfs4_proc_renew(struct nfs4_client *clp) return 0; } -/* - * We will need to arrange for the VFS layer to provide an atomic open. - * Until then, this open method is prone to inefficiency and race conditions - * due to the lookup, potential create, and open VFS calls from sys_open() - * placed on the wire. - */ -static int -nfs4_proc_file_open(struct inode *inode, struct file *filp) -{ - struct dentry *dentry = filp->f_dentry; - struct nfs_open_context *ctx; - struct nfs4_state *state = NULL; - struct rpc_cred *cred; - int status = -ENOMEM; - - dprintk("nfs4_proc_file_open: starting on (%.*s/%.*s)\n", - (int)dentry->d_parent->d_name.len, - dentry->d_parent->d_name.name, - (int)dentry->d_name.len, dentry->d_name.name); - - - /* Find our open stateid */ - cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0); - if (IS_ERR(cred)) - return PTR_ERR(cred); - ctx = alloc_nfs_open_context(dentry, cred); - put_rpccred(cred); - if (unlikely(ctx == NULL)) - return -ENOMEM; - status = -EIO; /* ERACE actually */ - state = nfs4_find_state(inode, cred, filp->f_mode); - if (unlikely(state == NULL)) - goto no_state; - ctx->state = state; - nfs4_close_state(state, filp->f_mode); - ctx->mode = filp->f_mode; - nfs_file_set_open_context(filp, ctx); - put_nfs_open_context(ctx); - if (filp->f_mode & FMODE_WRITE) - nfs_begin_data_update(inode); - return 0; -no_state: - printk(KERN_WARNING "NFS: v4 raced in function %s\n", __FUNCTION__); - put_nfs_open_context(ctx); - return status; -} - -/* - * Release our state - */ -static int -nfs4_proc_file_release(struct inode *inode, struct file *filp) -{ - if (filp->f_mode & FMODE_WRITE) - nfs_end_data_update(inode); - nfs_file_clear_open_context(filp); - return 0; -} - static inline int nfs4_server_supports_acls(struct nfs_server *server) { return (server->caps & NFS_CAP_ACLS) @@ -2285,7 +2433,7 @@ static inline ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size return -ENOMEM; args.acl_pages[0] = localpage; args.acl_pgbase = 0; - args.acl_len = PAGE_SIZE; + resp_len = args.acl_len = PAGE_SIZE; } else { resp_buf = buf; buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase); @@ -2345,6 +2493,7 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen if (!nfs4_server_supports_acls(server)) return -EOPNOTSUPP; + nfs_inode_return_delegation(inode); buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase); ret = rpc_call_sync(NFS_SERVER(inode)->client, &msg, 0); if (ret == 0) @@ -2353,7 +2502,7 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen } static int -nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server) +nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server) { struct nfs4_client *clp = server->nfs4_state; @@ -2431,7 +2580,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout) /* This is the error handling routine for processes that are allowed * to sleep. */ -int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception) +int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception) { struct nfs4_client *clp = server->nfs4_state; int ret = errorcode; @@ -2632,7 +2781,6 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock down_read(&clp->cl_sem); nlo.clientid = clp->cl_clientid; - down(&state->lock_sema); status = nfs4_set_lock_state(state, request); if (status != 0) goto out; @@ -2659,7 +2807,6 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock status = 0; } out: - up(&state->lock_sema); up_read(&clp->cl_sem); return status; } @@ -2696,67 +2843,125 @@ static int do_vfs_lock(struct file *file, struct file_lock *fl) return res; } -static int _nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) +struct nfs4_unlockdata { + struct nfs_lockargs arg; + struct nfs_locku_opargs luargs; + struct nfs_lockres res; + struct nfs4_lock_state *lsp; + struct nfs_open_context *ctx; + atomic_t refcount; + struct completion completion; +}; + +static void nfs4_locku_release_calldata(struct nfs4_unlockdata *calldata) { - struct inode *inode = state->inode; - struct nfs_server *server = NFS_SERVER(inode); - struct nfs4_client *clp = server->nfs4_state; - struct nfs_lockargs arg = { - .fh = NFS_FH(inode), - .type = nfs4_lck_type(cmd, request), - .offset = request->fl_start, - .length = nfs4_lck_length(request), - }; - struct nfs_lockres res = { - .server = server, - }; + if (atomic_dec_and_test(&calldata->refcount)) { + nfs_free_seqid(calldata->luargs.seqid); + nfs4_put_lock_state(calldata->lsp); + put_nfs_open_context(calldata->ctx); + kfree(calldata); + } +} + +static void nfs4_locku_complete(struct nfs4_unlockdata *calldata) +{ + complete(&calldata->completion); + nfs4_locku_release_calldata(calldata); +} + +static void nfs4_locku_done(struct rpc_task *task) +{ + struct nfs4_unlockdata *calldata = (struct nfs4_unlockdata *)task->tk_calldata; + + nfs_increment_lock_seqid(task->tk_status, calldata->luargs.seqid); + switch (task->tk_status) { + case 0: + memcpy(calldata->lsp->ls_stateid.data, + calldata->res.u.stateid.data, + sizeof(calldata->lsp->ls_stateid.data)); + break; + case -NFS4ERR_STALE_STATEID: + case -NFS4ERR_EXPIRED: + nfs4_schedule_state_recovery(calldata->res.server->nfs4_state); + break; + default: + if (nfs4_async_handle_error(task, calldata->res.server) == -EAGAIN) { + rpc_restart_call(task); + return; + } + } + nfs4_locku_complete(calldata); +} + +static void nfs4_locku_begin(struct rpc_task *task) +{ + struct nfs4_unlockdata *calldata = (struct nfs4_unlockdata *)task->tk_calldata; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU], - .rpc_argp = &arg, - .rpc_resp = &res, - .rpc_cred = state->owner->so_cred, + .rpc_argp = &calldata->arg, + .rpc_resp = &calldata->res, + .rpc_cred = calldata->lsp->ls_state->owner->so_cred, }; - struct nfs4_lock_state *lsp; - struct nfs_locku_opargs luargs; int status; - - down_read(&clp->cl_sem); - down(&state->lock_sema); - status = nfs4_set_lock_state(state, request); - if (status != 0) - goto out; - lsp = request->fl_u.nfs4_fl.owner; - /* We might have lost the locks! */ - if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) - goto out; - luargs.seqid = lsp->ls_seqid; - memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid)); - arg.u.locku = &luargs; - status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR); - nfs4_increment_lock_seqid(status, lsp); - if (status == 0) - memcpy(&lsp->ls_stateid, &res.u.stateid, - sizeof(lsp->ls_stateid)); -out: - up(&state->lock_sema); - if (status == 0) - do_vfs_lock(request->fl_file, request); - up_read(&clp->cl_sem); - return status; + status = nfs_wait_on_sequence(calldata->luargs.seqid, task); + if (status != 0) + return; + if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) { + nfs4_locku_complete(calldata); + task->tk_exit = NULL; + rpc_exit(task, 0); + return; + } + rpc_call_setup(task, &msg, 0); } static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request) { - struct nfs4_exception exception = { }; - int err; + struct nfs4_unlockdata *calldata; + struct inode *inode = state->inode; + struct nfs_server *server = NFS_SERVER(inode); + struct nfs4_lock_state *lsp; + int status; - do { - err = nfs4_handle_exception(NFS_SERVER(state->inode), - _nfs4_proc_unlck(state, cmd, request), - &exception); - } while (exception.retry); - return err; + status = nfs4_set_lock_state(state, request); + if (status != 0) + return status; + lsp = request->fl_u.nfs4_fl.owner; + /* We might have lost the locks! */ + if ((lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) + return 0; + calldata = kmalloc(sizeof(*calldata), GFP_KERNEL); + if (calldata == NULL) + return -ENOMEM; + calldata->luargs.seqid = nfs_alloc_seqid(&lsp->ls_seqid); + if (calldata->luargs.seqid == NULL) { + kfree(calldata); + return -ENOMEM; + } + calldata->luargs.stateid = &lsp->ls_stateid; + calldata->arg.fh = NFS_FH(inode); + calldata->arg.type = nfs4_lck_type(cmd, request); + calldata->arg.offset = request->fl_start; + calldata->arg.length = nfs4_lck_length(request); + calldata->arg.u.locku = &calldata->luargs; + calldata->res.server = server; + calldata->lsp = lsp; + atomic_inc(&lsp->ls_count); + + /* Ensure we don't close file until we're done freeing locks! */ + calldata->ctx = get_nfs_open_context((struct nfs_open_context*)request->fl_file->private_data); + + atomic_set(&calldata->refcount, 2); + init_completion(&calldata->completion); + + status = nfs4_call_async(NFS_SERVER(inode)->client, nfs4_locku_begin, + nfs4_locku_done, calldata); + if (status == 0) + wait_for_completion_interruptible(&calldata->completion); + do_vfs_lock(request->fl_file, request); + nfs4_locku_release_calldata(calldata); + return status; } static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *request, int reclaim) @@ -2764,11 +2969,23 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *r struct inode *inode = state->inode; struct nfs_server *server = NFS_SERVER(inode); struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner; + struct nfs_lock_opargs largs = { + .lock_stateid = &lsp->ls_stateid, + .open_stateid = &state->stateid, + .lock_owner = { + .clientid = server->nfs4_state->cl_clientid, + .id = lsp->ls_id, + }, + .reclaim = reclaim, + }; struct nfs_lockargs arg = { .fh = NFS_FH(inode), .type = nfs4_lck_type(cmd, request), .offset = request->fl_start, .length = nfs4_lck_length(request), + .u = { + .lock = &largs, + }, }; struct nfs_lockres res = { .server = server, @@ -2779,53 +2996,39 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *r .rpc_resp = &res, .rpc_cred = state->owner->so_cred, }; - struct nfs_lock_opargs largs = { - .reclaim = reclaim, - .new_lock_owner = 0, - }; - int status; + int status = -ENOMEM; - if (!(lsp->ls_flags & NFS_LOCK_INITIALIZED)) { + largs.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid); + if (largs.lock_seqid == NULL) + return -ENOMEM; + if (!(lsp->ls_seqid.flags & NFS_SEQID_CONFIRMED)) { struct nfs4_state_owner *owner = state->owner; - struct nfs_open_to_lock otl = { - .lock_owner = { - .clientid = server->nfs4_state->cl_clientid, - }, - }; - otl.lock_seqid = lsp->ls_seqid; - otl.lock_owner.id = lsp->ls_id; - memcpy(&otl.open_stateid, &state->stateid, sizeof(otl.open_stateid)); - largs.u.open_lock = &otl; + largs.open_seqid = nfs_alloc_seqid(&owner->so_seqid); + if (largs.open_seqid == NULL) + goto out; largs.new_lock_owner = 1; - arg.u.lock = &largs; - down(&owner->so_sema); - otl.open_seqid = owner->so_seqid; status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR); - /* increment open_owner seqid on success, and - * seqid mutating errors */ - nfs4_increment_seqid(status, owner); - up(&owner->so_sema); - if (status == 0) { - lsp->ls_flags |= NFS_LOCK_INITIALIZED; - lsp->ls_seqid++; + /* increment open seqid on success, and seqid mutating errors */ + if (largs.new_lock_owner != 0) { + nfs_increment_open_seqid(status, largs.open_seqid); + if (status == 0) + nfs_confirm_seqid(&lsp->ls_seqid, 0); } - } else { - struct nfs_exist_lock el = { - .seqid = lsp->ls_seqid, - }; - memcpy(&el.stateid, &lsp->ls_stateid, sizeof(el.stateid)); - largs.u.exist_lock = ⪙ - arg.u.lock = &largs; + nfs_free_seqid(largs.open_seqid); + } else status = rpc_call_sync(server->client, &msg, RPC_TASK_NOINTR); - /* increment seqid on success, and * seqid mutating errors*/ - nfs4_increment_lock_seqid(status, lsp); - } + /* increment lock seqid on success, and seqid mutating errors*/ + nfs_increment_lock_seqid(status, largs.lock_seqid); /* save the returned stateid. */ - if (status == 0) - memcpy(&lsp->ls_stateid, &res.u.stateid, sizeof(nfs4_stateid)); - else if (status == -NFS4ERR_DENIED) + if (status == 0) { + memcpy(lsp->ls_stateid.data, res.u.stateid.data, + sizeof(lsp->ls_stateid.data)); + lsp->ls_flags |= NFS_LOCK_INITIALIZED; + } else if (status == -NFS4ERR_DENIED) status = -EAGAIN; +out: + nfs_free_seqid(largs.lock_seqid); return status; } @@ -2865,11 +3068,9 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock int status; down_read(&clp->cl_sem); - down(&state->lock_sema); status = nfs4_set_lock_state(state, request); if (status == 0) status = _nfs4_do_setlk(state, cmd, request, 0); - up(&state->lock_sema); if (status == 0) { /* Note: we always want to sleep here! */ request->fl_flags |= FL_SLEEP; @@ -3024,8 +3225,8 @@ struct nfs_rpc_ops nfs_v4_clientops = { .read_setup = nfs4_proc_read_setup, .write_setup = nfs4_proc_write_setup, .commit_setup = nfs4_proc_commit_setup, - .file_open = nfs4_proc_file_open, - .file_release = nfs4_proc_file_release, + .file_open = nfs_open, + .file_release = nfs_release, .lock = nfs4_proc_lock, .clear_acl_cache = nfs4_zap_acl_attr, }; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index afe587d82f1e..2d5a6a2b9dec 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -264,13 +264,16 @@ nfs4_alloc_state_owner(void) { struct nfs4_state_owner *sp; - sp = kmalloc(sizeof(*sp),GFP_KERNEL); + sp = kzalloc(sizeof(*sp),GFP_KERNEL); if (!sp) return NULL; - init_MUTEX(&sp->so_sema); - sp->so_seqid = 0; /* arbitrary */ + spin_lock_init(&sp->so_lock); INIT_LIST_HEAD(&sp->so_states); INIT_LIST_HEAD(&sp->so_delegations); + rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue"); + sp->so_seqid.sequence = &sp->so_sequence; + spin_lock_init(&sp->so_sequence.lock); + INIT_LIST_HEAD(&sp->so_sequence.list); atomic_set(&sp->so_count, 1); return sp; } @@ -359,7 +362,6 @@ nfs4_alloc_open_state(void) memset(state->stateid.data, 0, sizeof(state->stateid.data)); atomic_set(&state->count, 1); INIT_LIST_HEAD(&state->lock_states); - init_MUTEX(&state->lock_sema); spin_lock_init(&state->state_lock); return state; } @@ -437,21 +439,23 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner) if (state) goto out; new = nfs4_alloc_open_state(); + spin_lock(&owner->so_lock); spin_lock(&inode->i_lock); state = __nfs4_find_state_byowner(inode, owner); if (state == NULL && new != NULL) { state = new; - /* Caller *must* be holding owner->so_sem */ - /* Note: The reclaim code dictates that we add stateless - * and read-only stateids to the end of the list */ - list_add_tail(&state->open_states, &owner->so_states); state->owner = owner; atomic_inc(&owner->so_count); list_add(&state->inode_states, &nfsi->open_states); state->inode = igrab(inode); spin_unlock(&inode->i_lock); + /* Note: The reclaim code dictates that we add stateless + * and read-only stateids to the end of the list */ + list_add_tail(&state->open_states, &owner->so_states); + spin_unlock(&owner->so_lock); } else { spin_unlock(&inode->i_lock); + spin_unlock(&owner->so_lock); if (new) nfs4_free_open_state(new); } @@ -461,19 +465,21 @@ out: /* * Beware! Caller must be holding exactly one - * reference to clp->cl_sem and owner->so_sema! + * reference to clp->cl_sem! */ void nfs4_put_open_state(struct nfs4_state *state) { struct inode *inode = state->inode; struct nfs4_state_owner *owner = state->owner; - if (!atomic_dec_and_lock(&state->count, &inode->i_lock)) + if (!atomic_dec_and_lock(&state->count, &owner->so_lock)) return; + spin_lock(&inode->i_lock); if (!list_empty(&state->inode_states)) list_del(&state->inode_states); - spin_unlock(&inode->i_lock); list_del(&state->open_states); + spin_unlock(&inode->i_lock); + spin_unlock(&owner->so_lock); iput(inode); BUG_ON (state->state != 0); nfs4_free_open_state(state); @@ -481,20 +487,17 @@ void nfs4_put_open_state(struct nfs4_state *state) } /* - * Beware! Caller must be holding no references to clp->cl_sem! - * of owner->so_sema! + * Close the current file. */ void nfs4_close_state(struct nfs4_state *state, mode_t mode) { struct inode *inode = state->inode; struct nfs4_state_owner *owner = state->owner; - struct nfs4_client *clp = owner->so_client; int newstate; atomic_inc(&owner->so_count); - down_read(&clp->cl_sem); - down(&owner->so_sema); /* Protect against nfs4_find_state() */ + spin_lock(&owner->so_lock); spin_lock(&inode->i_lock); if (mode & FMODE_READ) state->nreaders--; @@ -507,6 +510,7 @@ void nfs4_close_state(struct nfs4_state *state, mode_t mode) list_move_tail(&state->open_states, &owner->so_states); } spin_unlock(&inode->i_lock); + spin_unlock(&owner->so_lock); newstate = 0; if (state->state != 0) { if (state->nreaders) @@ -515,14 +519,16 @@ void nfs4_close_state(struct nfs4_state *state, mode_t mode) newstate |= FMODE_WRITE; if (state->state == newstate) goto out; - if (nfs4_do_close(inode, state, newstate) == -EINPROGRESS) + if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { + state->state = newstate; + goto out; + } + if (nfs4_do_close(inode, state, newstate) == 0) return; } out: nfs4_put_open_state(state); - up(&owner->so_sema); nfs4_put_state_owner(owner); - up_read(&clp->cl_sem); } /* @@ -546,19 +552,16 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) * Return a compatible lock_state. If no initialized lock_state structure * exists, return an uninitialized one. * - * The caller must be holding state->lock_sema */ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) { struct nfs4_lock_state *lsp; struct nfs4_client *clp = state->owner->so_client; - lsp = kmalloc(sizeof(*lsp), GFP_KERNEL); + lsp = kzalloc(sizeof(*lsp), GFP_KERNEL); if (lsp == NULL) return NULL; - lsp->ls_flags = 0; - lsp->ls_seqid = 0; /* arbitrary */ - memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data)); + lsp->ls_seqid.sequence = &state->owner->so_sequence; atomic_set(&lsp->ls_count, 1); lsp->ls_owner = fl_owner; spin_lock(&clp->cl_lock); @@ -572,7 +575,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f * Return a compatible lock_state. If no initialized lock_state structure * exists, return an uninitialized one. * - * The caller must be holding state->lock_sema and clp->cl_sem + * The caller must be holding clp->cl_sem */ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner) { @@ -605,7 +608,7 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_ * Release reference to lock_state, and free it if we see that * it is no longer in use */ -static void nfs4_put_lock_state(struct nfs4_lock_state *lsp) +void nfs4_put_lock_state(struct nfs4_lock_state *lsp) { struct nfs4_state *state; @@ -673,29 +676,94 @@ void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t f nfs4_put_lock_state(lsp); } -/* -* Called with state->lock_sema and clp->cl_sem held. -*/ -void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp) +struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter) { - if (status == NFS_OK || seqid_mutating_err(-status)) - lsp->ls_seqid++; + struct nfs_seqid *new; + + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (new != NULL) { + new->sequence = counter; + INIT_LIST_HEAD(&new->list); + } + return new; +} + +void nfs_free_seqid(struct nfs_seqid *seqid) +{ + struct rpc_sequence *sequence = seqid->sequence->sequence; + + if (!list_empty(&seqid->list)) { + spin_lock(&sequence->lock); + list_del(&seqid->list); + spin_unlock(&sequence->lock); + } + rpc_wake_up_next(&sequence->wait); + kfree(seqid); } /* -* Called with sp->so_sema and clp->cl_sem held. -* -* Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or -* failed with a seqid incrementing error - -* see comments nfs_fs.h:seqid_mutating_error() -*/ -void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp) + * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or + * failed with a seqid incrementing error - + * see comments nfs_fs.h:seqid_mutating_error() + */ +static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid) { - if (status == NFS_OK || seqid_mutating_err(-status)) - sp->so_seqid++; - /* If the server returns BAD_SEQID, unhash state_owner here */ - if (status == -NFS4ERR_BAD_SEQID) + switch (status) { + case 0: + break; + case -NFS4ERR_BAD_SEQID: + case -NFS4ERR_STALE_CLIENTID: + case -NFS4ERR_STALE_STATEID: + case -NFS4ERR_BAD_STATEID: + case -NFS4ERR_BADXDR: + case -NFS4ERR_RESOURCE: + case -NFS4ERR_NOFILEHANDLE: + /* Non-seqid mutating errors */ + return; + }; + /* + * Note: no locking needed as we are guaranteed to be first + * on the sequence list + */ + seqid->sequence->counter++; +} + +void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid) +{ + if (status == -NFS4ERR_BAD_SEQID) { + struct nfs4_state_owner *sp = container_of(seqid->sequence, + struct nfs4_state_owner, so_seqid); nfs4_drop_state_owner(sp); + } + return nfs_increment_seqid(status, seqid); +} + +/* + * Increment the seqid if the LOCK/LOCKU succeeded, or + * failed with a seqid incrementing error - + * see comments nfs_fs.h:seqid_mutating_error() + */ +void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid) +{ + return nfs_increment_seqid(status, seqid); +} + +int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task) +{ + struct rpc_sequence *sequence = seqid->sequence->sequence; + int status = 0; + + if (sequence->list.next == &seqid->list) + goto out; + spin_lock(&sequence->lock); + if (!list_empty(&sequence->list)) { + rpc_sleep_on(&sequence->wait, task, NULL, NULL); + status = -EAGAIN; + } else + list_add(&seqid->list, &sequence->list); + spin_unlock(&sequence->lock); +out: + return status; } static int reclaimer(void *); @@ -791,8 +859,6 @@ static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct n if (state->state == 0) continue; status = ops->recover_open(sp, state); - list_for_each_entry(lock, &state->lock_states, ls_locks) - lock->ls_flags &= ~NFS_LOCK_INITIALIZED; if (status >= 0) { status = nfs4_reclaim_locks(ops, state); if (status < 0) @@ -831,6 +897,28 @@ out_err: return status; } +static void nfs4_state_mark_reclaim(struct nfs4_client *clp) +{ + struct nfs4_state_owner *sp; + struct nfs4_state *state; + struct nfs4_lock_state *lock; + + /* Reset all sequence ids to zero */ + list_for_each_entry(sp, &clp->cl_state_owners, so_list) { + sp->so_seqid.counter = 0; + sp->so_seqid.flags = 0; + spin_lock(&sp->so_lock); + list_for_each_entry(state, &sp->so_states, open_states) { + list_for_each_entry(lock, &state->lock_states, ls_locks) { + lock->ls_seqid.counter = 0; + lock->ls_seqid.flags = 0; + lock->ls_flags &= ~NFS_LOCK_INITIALIZED; + } + } + spin_unlock(&sp->so_lock); + } +} + static int reclaimer(void *ptr) { struct reclaimer_args *args = (struct reclaimer_args *)ptr; @@ -864,6 +952,7 @@ restart_loop: default: ops = &nfs4_network_partition_recovery_ops; }; + nfs4_state_mark_reclaim(clp); status = __nfs4_init_client(clp); if (status) goto out_error; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 6c564ef9489e..fbbace8a30c4 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -95,6 +95,8 @@ static int nfs_stat_to_errno(int); #define decode_getattr_maxsz (op_decode_hdr_maxsz + nfs4_fattr_maxsz) #define encode_savefh_maxsz (op_encode_hdr_maxsz) #define decode_savefh_maxsz (op_decode_hdr_maxsz) +#define encode_restorefh_maxsz (op_encode_hdr_maxsz) +#define decode_restorefh_maxsz (op_decode_hdr_maxsz) #define encode_fsinfo_maxsz (op_encode_hdr_maxsz + 2) #define decode_fsinfo_maxsz (op_decode_hdr_maxsz + 11) #define encode_renew_maxsz (op_encode_hdr_maxsz + 3) @@ -157,16 +159,20 @@ static int nfs_stat_to_errno(int); op_decode_hdr_maxsz + 2) #define NFS4_enc_write_sz (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ - op_encode_hdr_maxsz + 8) + op_encode_hdr_maxsz + 8 + \ + encode_getattr_maxsz) #define NFS4_dec_write_sz (compound_decode_hdr_maxsz + \ decode_putfh_maxsz + \ - op_decode_hdr_maxsz + 4) + op_decode_hdr_maxsz + 4 + \ + decode_getattr_maxsz) #define NFS4_enc_commit_sz (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ - op_encode_hdr_maxsz + 3) + op_encode_hdr_maxsz + 3 + \ + encode_getattr_maxsz) #define NFS4_dec_commit_sz (compound_decode_hdr_maxsz + \ decode_putfh_maxsz + \ - op_decode_hdr_maxsz + 2) + op_decode_hdr_maxsz + 2 + \ + decode_getattr_maxsz) #define NFS4_enc_open_sz (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ op_encode_hdr_maxsz + \ @@ -196,17 +202,21 @@ static int nfs_stat_to_errno(int); #define NFS4_enc_open_downgrade_sz \ (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ - op_encode_hdr_maxsz + 7) + op_encode_hdr_maxsz + 7 + \ + encode_getattr_maxsz) #define NFS4_dec_open_downgrade_sz \ (compound_decode_hdr_maxsz + \ decode_putfh_maxsz + \ - op_decode_hdr_maxsz + 4) + op_decode_hdr_maxsz + 4 + \ + decode_getattr_maxsz) #define NFS4_enc_close_sz (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ - op_encode_hdr_maxsz + 5) + op_encode_hdr_maxsz + 5 + \ + encode_getattr_maxsz) #define NFS4_dec_close_sz (compound_decode_hdr_maxsz + \ decode_putfh_maxsz + \ - op_decode_hdr_maxsz + 4) + op_decode_hdr_maxsz + 4 + \ + decode_getattr_maxsz) #define NFS4_enc_setattr_sz (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ op_encode_hdr_maxsz + 4 + \ @@ -300,30 +310,44 @@ static int nfs_stat_to_errno(int); decode_getfh_maxsz) #define NFS4_enc_remove_sz (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ - encode_remove_maxsz) + encode_remove_maxsz + \ + encode_getattr_maxsz) #define NFS4_dec_remove_sz (compound_decode_hdr_maxsz + \ decode_putfh_maxsz + \ - op_decode_hdr_maxsz + 5) + op_decode_hdr_maxsz + 5 + \ + decode_getattr_maxsz) #define NFS4_enc_rename_sz (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ encode_savefh_maxsz + \ encode_putfh_maxsz + \ - encode_rename_maxsz) + encode_rename_maxsz + \ + encode_getattr_maxsz + \ + encode_restorefh_maxsz + \ + encode_getattr_maxsz) #define NFS4_dec_rename_sz (compound_decode_hdr_maxsz + \ decode_putfh_maxsz + \ decode_savefh_maxsz + \ decode_putfh_maxsz + \ - decode_rename_maxsz) + decode_rename_maxsz + \ + decode_getattr_maxsz + \ + decode_restorefh_maxsz + \ + decode_getattr_maxsz) #define NFS4_enc_link_sz (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ encode_savefh_maxsz + \ encode_putfh_maxsz + \ - encode_link_maxsz) + encode_link_maxsz + \ + decode_getattr_maxsz + \ + encode_restorefh_maxsz + \ + decode_getattr_maxsz) #define NFS4_dec_link_sz (compound_decode_hdr_maxsz + \ decode_putfh_maxsz + \ decode_savefh_maxsz + \ decode_putfh_maxsz + \ - decode_link_maxsz) + decode_link_maxsz + \ + decode_getattr_maxsz + \ + decode_restorefh_maxsz + \ + decode_getattr_maxsz) #define NFS4_enc_symlink_sz (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ encode_symlink_maxsz + \ @@ -336,14 +360,20 @@ static int nfs_stat_to_errno(int); decode_getfh_maxsz) #define NFS4_enc_create_sz (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ + encode_savefh_maxsz + \ encode_create_maxsz + \ + encode_getfh_maxsz + \ encode_getattr_maxsz + \ - encode_getfh_maxsz) + encode_restorefh_maxsz + \ + encode_getattr_maxsz) #define NFS4_dec_create_sz (compound_decode_hdr_maxsz + \ decode_putfh_maxsz + \ + decode_savefh_maxsz + \ decode_create_maxsz + \ + decode_getfh_maxsz + \ decode_getattr_maxsz + \ - decode_getfh_maxsz) + decode_restorefh_maxsz + \ + decode_getattr_maxsz) #define NFS4_enc_pathconf_sz (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ encode_getattr_maxsz) @@ -602,10 +632,10 @@ static int encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg) { uint32_t *p; - RESERVE_SPACE(8+sizeof(arg->stateid.data)); + RESERVE_SPACE(8+sizeof(arg->stateid->data)); WRITE32(OP_CLOSE); - WRITE32(arg->seqid); - WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data)); + WRITE32(arg->seqid->sequence->counter); + WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data)); return 0; } @@ -729,22 +759,18 @@ static int encode_lock(struct xdr_stream *xdr, const struct nfs_lockargs *arg) WRITE64(arg->length); WRITE32(opargs->new_lock_owner); if (opargs->new_lock_owner){ - struct nfs_open_to_lock *ol = opargs->u.open_lock; - RESERVE_SPACE(40); - WRITE32(ol->open_seqid); - WRITEMEM(&ol->open_stateid, sizeof(ol->open_stateid)); - WRITE32(ol->lock_seqid); - WRITE64(ol->lock_owner.clientid); + WRITE32(opargs->open_seqid->sequence->counter); + WRITEMEM(opargs->open_stateid->data, sizeof(opargs->open_stateid->data)); + WRITE32(opargs->lock_seqid->sequence->counter); + WRITE64(opargs->lock_owner.clientid); WRITE32(4); - WRITE32(ol->lock_owner.id); + WRITE32(opargs->lock_owner.id); } else { - struct nfs_exist_lock *el = opargs->u.exist_lock; - RESERVE_SPACE(20); - WRITEMEM(&el->stateid, sizeof(el->stateid)); - WRITE32(el->seqid); + WRITEMEM(opargs->lock_stateid->data, sizeof(opargs->lock_stateid->data)); + WRITE32(opargs->lock_seqid->sequence->counter); } return 0; @@ -775,8 +801,8 @@ static int encode_locku(struct xdr_stream *xdr, const struct nfs_lockargs *arg) RESERVE_SPACE(44); WRITE32(OP_LOCKU); WRITE32(arg->type); - WRITE32(opargs->seqid); - WRITEMEM(&opargs->stateid, sizeof(opargs->stateid)); + WRITE32(opargs->seqid->sequence->counter); + WRITEMEM(opargs->stateid->data, sizeof(opargs->stateid->data)); WRITE64(arg->offset); WRITE64(arg->length); @@ -826,7 +852,7 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena */ RESERVE_SPACE(8); WRITE32(OP_OPEN); - WRITE32(arg->seqid); + WRITE32(arg->seqid->sequence->counter); encode_share_access(xdr, arg->open_flags); RESERVE_SPACE(16); WRITE64(arg->clientid); @@ -941,7 +967,7 @@ static int encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_con RESERVE_SPACE(8+sizeof(arg->stateid.data)); WRITE32(OP_OPEN_CONFIRM); WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data)); - WRITE32(arg->seqid); + WRITE32(arg->seqid->sequence->counter); return 0; } @@ -950,10 +976,10 @@ static int encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closea { uint32_t *p; - RESERVE_SPACE(8+sizeof(arg->stateid.data)); + RESERVE_SPACE(8+sizeof(arg->stateid->data)); WRITE32(OP_OPEN_DOWNGRADE); - WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data)); - WRITE32(arg->seqid); + WRITEMEM(arg->stateid->data, sizeof(arg->stateid->data)); + WRITE32(arg->seqid->sequence->counter); encode_share_access(xdr, arg->open_flags); return 0; } @@ -1116,6 +1142,17 @@ static int encode_renew(struct xdr_stream *xdr, const struct nfs4_client *client return 0; } +static int +encode_restorefh(struct xdr_stream *xdr) +{ + uint32_t *p; + + RESERVE_SPACE(4); + WRITE32(OP_RESTOREFH); + + return 0; +} + static int encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg) { @@ -1296,14 +1333,18 @@ static int nfs4_xdr_enc_remove(struct rpc_rqst *req, uint32_t *p, const struct n { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 2, + .nops = 3, }; int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); - if ((status = encode_putfh(&xdr, args->fh)) == 0) - status = encode_remove(&xdr, args->name); + if ((status = encode_putfh(&xdr, args->fh)) != 0) + goto out; + if ((status = encode_remove(&xdr, args->name)) != 0) + goto out; + status = encode_getfattr(&xdr, args->bitmask); +out: return status; } @@ -1314,7 +1355,7 @@ static int nfs4_xdr_enc_rename(struct rpc_rqst *req, uint32_t *p, const struct n { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 4, + .nops = 7, }; int status; @@ -1326,7 +1367,13 @@ static int nfs4_xdr_enc_rename(struct rpc_rqst *req, uint32_t *p, const struct n goto out; if ((status = encode_putfh(&xdr, args->new_dir)) != 0) goto out; - status = encode_rename(&xdr, args->old_name, args->new_name); + if ((status = encode_rename(&xdr, args->old_name, args->new_name)) != 0) + goto out; + if ((status = encode_getfattr(&xdr, args->bitmask)) != 0) + goto out; + if ((status = encode_restorefh(&xdr)) != 0) + goto out; + status = encode_getfattr(&xdr, args->bitmask); out: return status; } @@ -1338,7 +1385,7 @@ static int nfs4_xdr_enc_link(struct rpc_rqst *req, uint32_t *p, const struct nfs { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 4, + .nops = 7, }; int status; @@ -1350,7 +1397,13 @@ static int nfs4_xdr_enc_link(struct rpc_rqst *req, uint32_t *p, const struct nfs goto out; if ((status = encode_putfh(&xdr, args->dir_fh)) != 0) goto out; - status = encode_link(&xdr, args->name); + if ((status = encode_link(&xdr, args->name)) != 0) + goto out; + if ((status = encode_getfattr(&xdr, args->bitmask)) != 0) + goto out; + if ((status = encode_restorefh(&xdr)) != 0) + goto out; + status = encode_getfattr(&xdr, args->bitmask); out: return status; } @@ -1362,7 +1415,7 @@ static int nfs4_xdr_enc_create(struct rpc_rqst *req, uint32_t *p, const struct n { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 4, + .nops = 7, }; int status; @@ -1370,10 +1423,16 @@ static int nfs4_xdr_enc_create(struct rpc_rqst *req, uint32_t *p, const struct n encode_compound_hdr(&xdr, &hdr); if ((status = encode_putfh(&xdr, args->dir_fh)) != 0) goto out; + if ((status = encode_savefh(&xdr)) != 0) + goto out; if ((status = encode_create(&xdr, args)) != 0) goto out; if ((status = encode_getfh(&xdr)) != 0) goto out; + if ((status = encode_getfattr(&xdr, args->bitmask)) != 0) + goto out; + if ((status = encode_restorefh(&xdr)) != 0) + goto out; status = encode_getfattr(&xdr, args->bitmask); out: return status; @@ -1412,7 +1471,7 @@ static int nfs4_xdr_enc_close(struct rpc_rqst *req, uint32_t *p, struct nfs_clos { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 2, + .nops = 3, }; int status; @@ -1422,6 +1481,9 @@ static int nfs4_xdr_enc_close(struct rpc_rqst *req, uint32_t *p, struct nfs_clos if(status) goto out; status = encode_close(&xdr, args); + if (status != 0) + goto out; + status = encode_getfattr(&xdr, args->bitmask); out: return status; } @@ -1433,13 +1495,19 @@ static int nfs4_xdr_enc_open(struct rpc_rqst *req, uint32_t *p, struct nfs_opena { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 4, + .nops = 7, }; int status; + status = nfs_wait_on_sequence(args->seqid, req->rq_task); + if (status != 0) + goto out; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); status = encode_putfh(&xdr, args->fh); + if (status) + goto out; + status = encode_savefh(&xdr); if (status) goto out; status = encode_open(&xdr, args); @@ -1449,6 +1517,12 @@ static int nfs4_xdr_enc_open(struct rpc_rqst *req, uint32_t *p, struct nfs_opena if (status) goto out; status = encode_getfattr(&xdr, args->bitmask); + if (status) + goto out; + status = encode_restorefh(&xdr); + if (status) + goto out; + status = encode_getfattr(&xdr, args->bitmask); out: return status; } @@ -1464,6 +1538,9 @@ static int nfs4_xdr_enc_open_confirm(struct rpc_rqst *req, uint32_t *p, struct n }; int status; + status = nfs_wait_on_sequence(args->seqid, req->rq_task); + if (status != 0) + goto out; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); status = encode_putfh(&xdr, args->fh); @@ -1485,6 +1562,9 @@ static int nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, uint32_t *p, struct nf }; int status; + status = nfs_wait_on_sequence(args->seqid, req->rq_task); + if (status != 0) + goto out; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); status = encode_putfh(&xdr, args->fh); @@ -1502,7 +1582,7 @@ static int nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req, uint32_t *p, struct { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 2, + .nops = 3, }; int status; @@ -1512,6 +1592,9 @@ static int nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req, uint32_t *p, struct if (status) goto out; status = encode_open_downgrade(&xdr, args); + if (status != 0) + goto out; + status = encode_getfattr(&xdr, args->bitmask); out: return status; } @@ -1525,8 +1608,15 @@ static int nfs4_xdr_enc_lock(struct rpc_rqst *req, uint32_t *p, struct nfs_locka struct compound_hdr hdr = { .nops = 2, }; + struct nfs_lock_opargs *opargs = args->u.lock; int status; + status = nfs_wait_on_sequence(opargs->lock_seqid, req->rq_task); + if (status != 0) + goto out; + /* Do we need to do an open_to_lock_owner? */ + if (opargs->lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED) + opargs->new_lock_owner = 0; xdr_init_encode(&xdr, &req->rq_snd_buf, p); encode_compound_hdr(&xdr, &hdr); status = encode_putfh(&xdr, args->fh); @@ -1713,7 +1803,7 @@ static int nfs4_xdr_enc_write(struct rpc_rqst *req, uint32_t *p, struct nfs_writ { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 2, + .nops = 3, }; int status; @@ -1723,6 +1813,9 @@ static int nfs4_xdr_enc_write(struct rpc_rqst *req, uint32_t *p, struct nfs_writ if (status) goto out; status = encode_write(&xdr, args); + if (status) + goto out; + status = encode_getfattr(&xdr, args->bitmask); out: return status; } @@ -1734,7 +1827,7 @@ static int nfs4_xdr_enc_commit(struct rpc_rqst *req, uint32_t *p, struct nfs_wri { struct xdr_stream xdr; struct compound_hdr hdr = { - .nops = 2, + .nops = 3, }; int status; @@ -1744,6 +1837,9 @@ static int nfs4_xdr_enc_commit(struct rpc_rqst *req, uint32_t *p, struct nfs_wri if (status) goto out; status = encode_commit(&xdr, args); + if (status) + goto out; + status = encode_getfattr(&xdr, args->bitmask); out: return status; } @@ -2670,8 +2766,7 @@ static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_re goto xdr_error; status = verify_attr_len(xdr, savep, attrlen); xdr_error: - if (status != 0) - printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status); + dprintk("%s: xdr returned %d!\n", __FUNCTION__, -status); return status; } @@ -2704,8 +2799,7 @@ static int decode_statfs(struct xdr_stream *xdr, struct nfs_fsstat *fsstat) status = verify_attr_len(xdr, savep, attrlen); xdr_error: - if (status != 0) - printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status); + dprintk("%s: xdr returned %d!\n", __FUNCTION__, -status); return status; } @@ -2730,8 +2824,7 @@ static int decode_pathconf(struct xdr_stream *xdr, struct nfs_pathconf *pathconf status = verify_attr_len(xdr, savep, attrlen); xdr_error: - if (status != 0) - printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status); + dprintk("%s: xdr returned %d!\n", __FUNCTION__, -status); return status; } @@ -2787,13 +2880,10 @@ static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr, cons goto xdr_error; if ((status = decode_attr_time_modify(xdr, bitmap, &fattr->mtime)) != 0) goto xdr_error; - if ((status = verify_attr_len(xdr, savep, attrlen)) == 0) { + if ((status = verify_attr_len(xdr, savep, attrlen)) == 0) fattr->valid = NFS_ATTR_FATTR | NFS_ATTR_FATTR_V3 | NFS_ATTR_FATTR_V4; - fattr->timestamp = jiffies; - } xdr_error: - if (status != 0) - printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status); + dprintk("%s: xdr returned %d\n", __FUNCTION__, -status); return status; } @@ -2826,8 +2916,7 @@ static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo) status = verify_attr_len(xdr, savep, attrlen); xdr_error: - if (status != 0) - printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status); + dprintk("%s: xdr returned %d!\n", __FUNCTION__, -status); return status; } @@ -2890,8 +2979,8 @@ static int decode_lock(struct xdr_stream *xdr, struct nfs_lockres *res) status = decode_op_hdr(xdr, OP_LOCK); if (status == 0) { - READ_BUF(sizeof(nfs4_stateid)); - COPYMEM(&res->u.stateid, sizeof(res->u.stateid)); + READ_BUF(sizeof(res->u.stateid.data)); + COPYMEM(res->u.stateid.data, sizeof(res->u.stateid.data)); } else if (status == -NFS4ERR_DENIED) return decode_lock_denied(xdr, &res->u.denied); return status; @@ -2913,8 +3002,8 @@ static int decode_locku(struct xdr_stream *xdr, struct nfs_lockres *res) status = decode_op_hdr(xdr, OP_LOCKU); if (status == 0) { - READ_BUF(sizeof(nfs4_stateid)); - COPYMEM(&res->u.stateid, sizeof(res->u.stateid)); + READ_BUF(sizeof(res->u.stateid.data)); + COPYMEM(res->u.stateid.data, sizeof(res->u.stateid.data)); } return status; } @@ -2994,7 +3083,7 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res) p += bmlen; return decode_delegation(xdr, res); xdr_error: - printk(KERN_NOTICE "%s: xdr error!\n", __FUNCTION__); + dprintk("%s: Bitmap too large! Length = %u\n", __FUNCTION__, bmlen); return -EIO; } @@ -3208,6 +3297,12 @@ static int decode_renew(struct xdr_stream *xdr) return decode_op_hdr(xdr, OP_RENEW); } +static int +decode_restorefh(struct xdr_stream *xdr) +{ + return decode_op_hdr(xdr, OP_RESTOREFH); +} + static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req, size_t *acl_len) { @@ -3243,7 +3338,8 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req, if (attrlen <= *acl_len) xdr_read_pages(xdr, attrlen); *acl_len = attrlen; - } + } else + status = -EOPNOTSUPP; out: return status; @@ -3352,6 +3448,9 @@ static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp, uint32_t *p, stru if (status) goto out; status = decode_open_downgrade(&xdr, res); + if (status != 0) + goto out; + decode_getfattr(&xdr, res->fattr, res->server); out: return status; } @@ -3424,7 +3523,7 @@ out: /* * Decode REMOVE response */ -static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_change_info *cinfo) +static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_remove_res *res) { struct xdr_stream xdr; struct compound_hdr hdr; @@ -3433,8 +3532,11 @@ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); if ((status = decode_compound_hdr(&xdr, &hdr)) != 0) goto out; - if ((status = decode_putfh(&xdr)) == 0) - status = decode_remove(&xdr, cinfo); + if ((status = decode_putfh(&xdr)) != 0) + goto out; + if ((status = decode_remove(&xdr, &res->cinfo)) != 0) + goto out; + decode_getfattr(&xdr, res->dir_attr, res->server); out: return status; } @@ -3457,7 +3559,14 @@ static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_ goto out; if ((status = decode_putfh(&xdr)) != 0) goto out; - status = decode_rename(&xdr, &res->old_cinfo, &res->new_cinfo); + if ((status = decode_rename(&xdr, &res->old_cinfo, &res->new_cinfo)) != 0) + goto out; + /* Current FH is target directory */ + if (decode_getfattr(&xdr, res->new_fattr, res->server) != 0) + goto out; + if ((status = decode_restorefh(&xdr)) != 0) + goto out; + decode_getfattr(&xdr, res->old_fattr, res->server); out: return status; } @@ -3465,7 +3574,7 @@ out: /* * Decode LINK response */ -static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_change_info *cinfo) +static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_link_res *res) { struct xdr_stream xdr; struct compound_hdr hdr; @@ -3480,7 +3589,17 @@ static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_ch goto out; if ((status = decode_putfh(&xdr)) != 0) goto out; - status = decode_link(&xdr, cinfo); + if ((status = decode_link(&xdr, &res->cinfo)) != 0) + goto out; + /* + * Note order: OP_LINK leaves the directory as the current + * filehandle. + */ + if (decode_getfattr(&xdr, res->dir_attr, res->server) != 0) + goto out; + if ((status = decode_restorefh(&xdr)) != 0) + goto out; + decode_getfattr(&xdr, res->fattr, res->server); out: return status; } @@ -3499,13 +3618,17 @@ static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_ goto out; if ((status = decode_putfh(&xdr)) != 0) goto out; + if ((status = decode_savefh(&xdr)) != 0) + goto out; if ((status = decode_create(&xdr,&res->dir_cinfo)) != 0) goto out; if ((status = decode_getfh(&xdr, res->fh)) != 0) goto out; - status = decode_getfattr(&xdr, res->fattr, res->server); - if (status == NFS4ERR_DELAY) - status = 0; + if (decode_getfattr(&xdr, res->fattr, res->server) != 0) + goto out; + if ((status = decode_restorefh(&xdr)) != 0) + goto out; + decode_getfattr(&xdr, res->dir_fattr, res->server); out: return status; } @@ -3623,6 +3746,15 @@ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_cl if (status) goto out; status = decode_close(&xdr, res); + if (status != 0) + goto out; + /* + * Note: Server may do delete on close for this file + * in which case the getattr call will fail with + * an ESTALE error. Shouldn't be a problem, + * though, since fattr->valid will remain unset. + */ + decode_getfattr(&xdr, res->fattr, res->server); out: return status; } @@ -3643,15 +3775,20 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_ope status = decode_putfh(&xdr); if (status) goto out; + status = decode_savefh(&xdr); + if (status) + goto out; status = decode_open(&xdr, res); if (status) goto out; status = decode_getfh(&xdr, &res->fh); if (status) goto out; - status = decode_getfattr(&xdr, res->f_attr, res->server); - if (status == NFS4ERR_DELAY) - status = 0; + if (decode_getfattr(&xdr, res->f_attr, res->server) != 0) + goto out; + if ((status = decode_restorefh(&xdr)) != 0) + goto out; + decode_getfattr(&xdr, res->dir_attr, res->server); out: return status; } @@ -3869,6 +4006,9 @@ static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_wr if (status) goto out; status = decode_write(&xdr, res); + if (status) + goto out; + decode_getfattr(&xdr, res->fattr, res->server); if (!status) status = res->count; out: @@ -3892,6 +4032,9 @@ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_w if (status) goto out; status = decode_commit(&xdr, res); + if (status) + goto out; + decode_getfattr(&xdr, res->fattr, res->server); out: return status; } diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index be23c3fb9260..a48a003242c0 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -61,7 +61,7 @@ nfs_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, int status; dprintk("%s: call getattr\n", __FUNCTION__); - fattr->valid = 0; + nfs_fattr_init(fattr); status = rpc_call(server->client_sys, NFSPROC_GETATTR, fhandle, fattr, 0); dprintk("%s: reply getattr: %d\n", __FUNCTION__, status); if (status) @@ -93,7 +93,7 @@ nfs_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, int status; dprintk("NFS call getattr\n"); - fattr->valid = 0; + nfs_fattr_init(fattr); status = rpc_call(server->client, NFSPROC_GETATTR, fhandle, fattr, 0); dprintk("NFS reply getattr: %d\n", status); @@ -112,7 +112,7 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, int status; dprintk("NFS call setattr\n"); - fattr->valid = 0; + nfs_fattr_init(fattr); status = rpc_call(NFS_CLIENT(inode), NFSPROC_SETATTR, &arg, fattr, 0); if (status == 0) nfs_setattr_update_inode(inode, sattr); @@ -136,7 +136,7 @@ nfs_proc_lookup(struct inode *dir, struct qstr *name, int status; dprintk("NFS call lookup %s\n", name->name); - fattr->valid = 0; + nfs_fattr_init(fattr); status = rpc_call(NFS_CLIENT(dir), NFSPROC_LOOKUP, &arg, &res, 0); dprintk("NFS reply lookup: %d\n", status); return status; @@ -174,7 +174,7 @@ static int nfs_proc_read(struct nfs_read_data *rdata) dprintk("NFS call read %d @ %Ld\n", rdata->args.count, (long long) rdata->args.offset); - fattr->valid = 0; + nfs_fattr_init(fattr); status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags); if (status >= 0) { nfs_refresh_inode(inode, fattr); @@ -203,10 +203,10 @@ static int nfs_proc_write(struct nfs_write_data *wdata) dprintk("NFS call write %d @ %Ld\n", wdata->args.count, (long long) wdata->args.offset); - fattr->valid = 0; + nfs_fattr_init(fattr); status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags); if (status >= 0) { - nfs_refresh_inode(inode, fattr); + nfs_post_op_update_inode(inode, fattr); wdata->res.count = wdata->args.count; wdata->verf.committed = NFS_FILE_SYNC; } @@ -216,7 +216,7 @@ static int nfs_proc_write(struct nfs_write_data *wdata) static int nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, - int flags) + int flags, struct nameidata *nd) { struct nfs_fh fhandle; struct nfs_fattr fattr; @@ -232,7 +232,7 @@ nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, }; int status; - fattr.valid = 0; + nfs_fattr_init(&fattr); dprintk("NFS call create %s\n", dentry->d_name.name); status = rpc_call(NFS_CLIENT(dir), NFSPROC_CREATE, &arg, &res, 0); if (status == 0) @@ -273,12 +273,13 @@ nfs_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr, sattr->ia_size = new_encode_dev(rdev);/* get out your barf bag */ } - fattr.valid = 0; + nfs_fattr_init(&fattr); status = rpc_call(NFS_CLIENT(dir), NFSPROC_CREATE, &arg, &res, 0); + nfs_mark_for_revalidate(dir); if (status == -EINVAL && S_ISFIFO(mode)) { sattr->ia_mode = mode; - fattr.valid = 0; + nfs_fattr_init(&fattr); status = rpc_call(NFS_CLIENT(dir), NFSPROC_CREATE, &arg, &res, 0); } if (status == 0) @@ -305,6 +306,7 @@ nfs_proc_remove(struct inode *dir, struct qstr *name) dprintk("NFS call remove %s\n", name->name); status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); + nfs_mark_for_revalidate(dir); dprintk("NFS reply remove: %d\n", status); return status; @@ -331,8 +333,10 @@ nfs_proc_unlink_done(struct dentry *dir, struct rpc_task *task) { struct rpc_message *msg = &task->tk_msg; - if (msg->rpc_argp) + if (msg->rpc_argp) { + nfs_mark_for_revalidate(dir->d_inode); kfree(msg->rpc_argp); + } return 0; } @@ -352,6 +356,8 @@ nfs_proc_rename(struct inode *old_dir, struct qstr *old_name, dprintk("NFS call rename %s -> %s\n", old_name->name, new_name->name); status = rpc_call(NFS_CLIENT(old_dir), NFSPROC_RENAME, &arg, NULL, 0); + nfs_mark_for_revalidate(old_dir); + nfs_mark_for_revalidate(new_dir); dprintk("NFS reply rename: %d\n", status); return status; } @@ -369,6 +375,7 @@ nfs_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) dprintk("NFS call link %s\n", name->name); status = rpc_call(NFS_CLIENT(inode), NFSPROC_LINK, &arg, NULL, 0); + nfs_mark_for_revalidate(dir); dprintk("NFS reply link: %d\n", status); return status; } @@ -391,9 +398,10 @@ nfs_proc_symlink(struct inode *dir, struct qstr *name, struct qstr *path, if (path->len > NFS2_MAXPATHLEN) return -ENAMETOOLONG; dprintk("NFS call symlink %s -> %s\n", name->name, path->name); - fattr->valid = 0; + nfs_fattr_init(fattr); fhandle->size = 0; status = rpc_call(NFS_CLIENT(dir), NFSPROC_SYMLINK, &arg, NULL, 0); + nfs_mark_for_revalidate(dir); dprintk("NFS reply symlink: %d\n", status); return status; } @@ -416,8 +424,9 @@ nfs_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr) int status; dprintk("NFS call mkdir %s\n", dentry->d_name.name); - fattr.valid = 0; + nfs_fattr_init(&fattr); status = rpc_call(NFS_CLIENT(dir), NFSPROC_MKDIR, &arg, &res, 0); + nfs_mark_for_revalidate(dir); if (status == 0) status = nfs_instantiate(dentry, &fhandle, &fattr); dprintk("NFS reply mkdir: %d\n", status); @@ -436,6 +445,7 @@ nfs_proc_rmdir(struct inode *dir, struct qstr *name) dprintk("NFS call rmdir %s\n", name->name); status = rpc_call(NFS_CLIENT(dir), NFSPROC_RMDIR, &arg, NULL, 0); + nfs_mark_for_revalidate(dir); dprintk("NFS reply rmdir: %d\n", status); return status; } @@ -484,7 +494,7 @@ nfs_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, int status; dprintk("NFS call statfs\n"); - stat->fattr->valid = 0; + nfs_fattr_init(stat->fattr); status = rpc_call(server->client, NFSPROC_STATFS, fhandle, &fsinfo, 0); dprintk("NFS reply statfs: %d\n", status); if (status) @@ -507,7 +517,7 @@ nfs_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, int status; dprintk("NFS call fsinfo\n"); - info->fattr->valid = 0; + nfs_fattr_init(info->fattr); status = rpc_call(server->client, NFSPROC_STATFS, fhandle, &fsinfo, 0); dprintk("NFS reply fsinfo: %d\n", status); if (status) @@ -579,7 +589,7 @@ nfs_write_done(struct rpc_task *task) struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata; if (task->tk_status >= 0) - nfs_refresh_inode(data->inode, data->res.fattr); + nfs_post_op_update_inode(data->inode, data->res.fattr); nfs_writeback_done(task); } diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 6ceb1d471f20..43b03b19731b 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -184,14 +184,13 @@ static void nfs_readpage_release(struct nfs_page *req) { unlock_page(req->wb_page); - nfs_clear_request(req); - nfs_release_request(req); - dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", req->wb_context->dentry->d_inode->i_sb->s_id, (long long)NFS_FILEID(req->wb_context->dentry->d_inode), req->wb_bytes, (long long)req_offset(req)); + nfs_clear_request(req); + nfs_release_request(req); } /* @@ -216,6 +215,7 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data, data->res.fattr = &data->fattr; data->res.count = count; data->res.eof = 0; + nfs_fattr_init(&data->fattr); NFS_PROTO(inode)->read_setup(data); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 5130eda231d7..819a65f5071f 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -870,6 +870,7 @@ static void nfs_write_rpcsetup(struct nfs_page *req, data->res.fattr = &data->fattr; data->res.count = count; data->res.verf = &data->verf; + nfs_fattr_init(&data->fattr); NFS_PROTO(inode)->write_setup(data, how); @@ -1237,6 +1238,7 @@ static void nfs_commit_rpcsetup(struct list_head *head, data->res.count = 0; data->res.fattr = &data->fattr; data->res.verf = &data->verf; + nfs_fattr_init(&data->fattr); NFS_PROTO(inode)->commit_setup(data, how); diff --git a/fs/nfs_common/nfsacl.c b/fs/nfs_common/nfsacl.c index 251e5a1bb1c4..0c2be8c0307d 100644 --- a/fs/nfs_common/nfsacl.c +++ b/fs/nfs_common/nfsacl.c @@ -48,43 +48,26 @@ xdr_nfsace_encode(struct xdr_array2_desc *desc, void *elem) (struct nfsacl_encode_desc *) desc; u32 *p = (u32 *) elem; - if (nfsacl_desc->count < nfsacl_desc->acl->a_count) { - struct posix_acl_entry *entry = - &nfsacl_desc->acl->a_entries[nfsacl_desc->count++]; + struct posix_acl_entry *entry = + &nfsacl_desc->acl->a_entries[nfsacl_desc->count++]; - *p++ = htonl(entry->e_tag | nfsacl_desc->typeflag); - switch(entry->e_tag) { - case ACL_USER_OBJ: - *p++ = htonl(nfsacl_desc->uid); - break; - case ACL_GROUP_OBJ: - *p++ = htonl(nfsacl_desc->gid); - break; - case ACL_USER: - case ACL_GROUP: - *p++ = htonl(entry->e_id); - break; - default: /* Solaris depends on that! */ - *p++ = 0; - break; - } - *p++ = htonl(entry->e_perm & S_IRWXO); - } else { - const struct posix_acl_entry *pa, *pe; - int group_obj_perm = ACL_READ|ACL_WRITE|ACL_EXECUTE; - - FOREACH_ACL_ENTRY(pa, nfsacl_desc->acl, pe) { - if (pa->e_tag == ACL_GROUP_OBJ) { - group_obj_perm = pa->e_perm & S_IRWXO; - break; - } - } - /* fake up ACL_MASK entry */ - *p++ = htonl(ACL_MASK | nfsacl_desc->typeflag); - *p++ = htonl(0); - *p++ = htonl(group_obj_perm); + *p++ = htonl(entry->e_tag | nfsacl_desc->typeflag); + switch(entry->e_tag) { + case ACL_USER_OBJ: + *p++ = htonl(nfsacl_desc->uid); + break; + case ACL_GROUP_OBJ: + *p++ = htonl(nfsacl_desc->gid); + break; + case ACL_USER: + case ACL_GROUP: + *p++ = htonl(entry->e_id); + break; + default: /* Solaris depends on that! */ + *p++ = 0; + break; } - + *p++ = htonl(entry->e_perm & S_IRWXO); return 0; } @@ -105,11 +88,28 @@ nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode, .gid = inode->i_gid, }; int err; + struct posix_acl *acl2 = NULL; if (entries > NFS_ACL_MAX_ENTRIES || xdr_encode_word(buf, base, entries)) return -EINVAL; + if (encode_entries && acl && acl->a_count == 3) { + /* Fake up an ACL_MASK entry. */ + acl2 = posix_acl_alloc(4, GFP_KERNEL); + if (!acl2) + return -ENOMEM; + /* Insert entries in canonical order: other orders seem + to confuse Solaris VxFS. */ + acl2->a_entries[0] = acl->a_entries[0]; /* ACL_USER_OBJ */ + acl2->a_entries[1] = acl->a_entries[1]; /* ACL_GROUP_OBJ */ + acl2->a_entries[2] = acl->a_entries[1]; /* ACL_MASK */ + acl2->a_entries[2].e_tag = ACL_MASK; + acl2->a_entries[3] = acl->a_entries[2]; /* ACL_OTHER */ + nfsacl_desc.acl = acl2; + } err = xdr_encode_array2(buf, base + 4, &nfsacl_desc.desc); + if (acl2) + posix_acl_release(acl2); if (!err) err = 8 + nfsacl_desc.desc.elem_size * nfsacl_desc.desc.array_len; diff --git a/fs/ntfs/ChangeLog b/fs/ntfs/ChangeLog index c7e9237379c2..de58579a1d0e 100644 --- a/fs/ntfs/ChangeLog +++ b/fs/ntfs/ChangeLog @@ -29,7 +29,8 @@ ToDo/Notes: The Windows boot will run chkdsk and then reboot. The user can then immediately boot into Linux rather than having to do a full Windows boot first before rebooting into Linux and we will recognize such a - journal and empty it as it is clean by definition. + journal and empty it as it is clean by definition. Note, this only + works if chkdsk left the journal in an obviously clean state. - Support journals ($LogFile) with only one restart page as well as journals with two different restart pages. We sanity check both and either use the only sane one or the more recent one of the two in the @@ -94,6 +95,16 @@ ToDo/Notes: my ways. - Fix various bugs in the runlist merging code. (Based on libntfs changes by Richard Russon.) + - Fix sparse warnings that have crept in over time. + - Change ntfs_cluster_free() to require a write locked runlist on entry + since we otherwise get into a lock reversal deadlock if a read locked + runlist is passed in. In the process also change it to take an ntfs + inode instead of a vfs inode as parameter. + - Fix the definition of the CHKD ntfs record magic. It had an off by + two error causing it to be CHKB instead of CHKD. + - Fix a stupid bug in __ntfs_bitmap_set_bits_in_run() which caused the + count to become negative and hence we had a wild memset() scribbling + all over the system's ram. 2.1.23 - Implement extension of resident files and make writing safe as well as many bug fixes, cleanups, and enhancements... diff --git a/fs/ntfs/bitmap.c b/fs/ntfs/bitmap.c index 12cf2e30c7dd..7a190cdc60e2 100644 --- a/fs/ntfs/bitmap.c +++ b/fs/ntfs/bitmap.c @@ -1,7 +1,7 @@ /* * bitmap.c - NTFS kernel bitmap handling. Part of the Linux-NTFS project. * - * Copyright (c) 2004 Anton Altaparmakov + * Copyright (c) 2004-2005 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published @@ -90,7 +90,8 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, /* If the first byte is partial, modify the appropriate bits in it. */ if (bit) { u8 *byte = kaddr + pos; - while ((bit & 7) && cnt--) { + while ((bit & 7) && cnt) { + cnt--; if (value) *byte |= 1 << bit++; else diff --git a/fs/ntfs/layout.h b/fs/ntfs/layout.h index 609ad1728ce4..5c248d404f05 100644 --- a/fs/ntfs/layout.h +++ b/fs/ntfs/layout.h @@ -123,7 +123,7 @@ enum { magic_RCRD = const_cpu_to_le32(0x44524352), /* Log record page. */ /* Found in $LogFile/$DATA. (May be found in $MFT/$DATA, also?) */ - magic_CHKD = const_cpu_to_le32(0x424b4843), /* Modified by chkdsk. */ + magic_CHKD = const_cpu_to_le32(0x444b4843), /* Modified by chkdsk. */ /* Found in all ntfs record containing records. */ magic_BAAD = const_cpu_to_le32(0x44414142), /* Failed multi sector @@ -308,10 +308,8 @@ typedef le16 MFT_RECORD_FLAGS; * The _LE versions are to be applied on little endian MFT_REFs. * Note: The _LE versions will return a CPU endian formatted value! */ -typedef enum { - MFT_REF_MASK_CPU = 0x0000ffffffffffffULL, - MFT_REF_MASK_LE = const_cpu_to_le64(0x0000ffffffffffffULL), -} MFT_REF_CONSTS; +#define MFT_REF_MASK_CPU 0x0000ffffffffffffULL +#define MFT_REF_MASK_LE const_cpu_to_le64(MFT_REF_MASK_CPU) typedef u64 MFT_REF; typedef le64 leMFT_REF; diff --git a/fs/ntfs/lcnalloc.c b/fs/ntfs/lcnalloc.c index 7b5934290685..5af3bf0b7eee 100644 --- a/fs/ntfs/lcnalloc.c +++ b/fs/ntfs/lcnalloc.c @@ -779,14 +779,13 @@ out: /** * __ntfs_cluster_free - free clusters on an ntfs volume - * @vi: vfs inode whose runlist describes the clusters to free - * @start_vcn: vcn in the runlist of @vi at which to start freeing clusters + * @ni: ntfs inode whose runlist describes the clusters to free + * @start_vcn: vcn in the runlist of @ni at which to start freeing clusters * @count: number of clusters to free or -1 for all clusters - * @write_locked: true if the runlist is locked for writing * @is_rollback: true if this is a rollback operation * * Free @count clusters starting at the cluster @start_vcn in the runlist - * described by the vfs inode @vi. + * described by the vfs inode @ni. * * If @count is -1, all clusters from @start_vcn to the end of the runlist are * deallocated. Thus, to completely free all clusters in a runlist, use @@ -801,31 +800,28 @@ out: * Return the number of deallocated clusters (not counting sparse ones) on * success and -errno on error. * - * Locking: - The runlist described by @vi must be locked on entry and is - * locked on return. Note if the runlist is locked for reading the - * lock may be dropped and reacquired. Note the runlist may be - * modified when needed runlist fragments need to be mapped. + * Locking: - The runlist described by @ni must be locked for writing on entry + * and is locked on return. Note the runlist may be modified when + * needed runlist fragments need to be mapped. * - The volume lcn bitmap must be unlocked on entry and is unlocked * on return. * - This function takes the volume lcn bitmap lock for writing and * modifies the bitmap contents. */ -s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count, - const BOOL write_locked, const BOOL is_rollback) +s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, s64 count, + const BOOL is_rollback) { s64 delta, to_free, total_freed, real_freed; - ntfs_inode *ni; ntfs_volume *vol; struct inode *lcnbmp_vi; runlist_element *rl; int err; - BUG_ON(!vi); + BUG_ON(!ni); ntfs_debug("Entering for i_ino 0x%lx, start_vcn 0x%llx, count " - "0x%llx.%s", vi->i_ino, (unsigned long long)start_vcn, + "0x%llx.%s", ni->mft_no, (unsigned long long)start_vcn, (unsigned long long)count, is_rollback ? " (rollback)" : ""); - ni = NTFS_I(vi); vol = ni->vol; lcnbmp_vi = vol->lcnbmp_ino; BUG_ON(!lcnbmp_vi); @@ -843,7 +839,7 @@ s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count, total_freed = real_freed = 0; - rl = ntfs_attr_find_vcn_nolock(ni, start_vcn, write_locked); + rl = ntfs_attr_find_vcn_nolock(ni, start_vcn, TRUE); if (IS_ERR(rl)) { if (!is_rollback) ntfs_error(vol->sb, "Failed to find first runlist " @@ -897,7 +893,7 @@ s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, s64 count, /* Attempt to map runlist. */ vcn = rl->vcn; - rl = ntfs_attr_find_vcn_nolock(ni, vcn, write_locked); + rl = ntfs_attr_find_vcn_nolock(ni, vcn, TRUE); if (IS_ERR(rl)) { err = PTR_ERR(rl); if (!is_rollback) @@ -965,8 +961,7 @@ err_out: * If rollback fails, set the volume errors flag, emit an error * message, and return the error code. */ - delta = __ntfs_cluster_free(vi, start_vcn, total_freed, write_locked, - TRUE); + delta = __ntfs_cluster_free(ni, start_vcn, total_freed, TRUE); if (delta < 0) { ntfs_error(vol->sb, "Failed to rollback (error %i). Leaving " "inconsistent metadata! Unmount and run " diff --git a/fs/ntfs/lcnalloc.h b/fs/ntfs/lcnalloc.h index e4d7fb98d685..a6a8827882e7 100644 --- a/fs/ntfs/lcnalloc.h +++ b/fs/ntfs/lcnalloc.h @@ -2,7 +2,7 @@ * lcnalloc.h - Exports for NTFS kernel cluster (de)allocation. Part of the * Linux-NTFS project. * - * Copyright (c) 2004 Anton Altaparmakov + * Copyright (c) 2004-2005 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published @@ -28,6 +28,7 @@ #include #include "types.h" +#include "inode.h" #include "runlist.h" #include "volume.h" @@ -42,18 +43,17 @@ extern runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, const VCN start_vcn, const s64 count, const LCN start_lcn, const NTFS_CLUSTER_ALLOCATION_ZONES zone); -extern s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, - s64 count, const BOOL write_locked, const BOOL is_rollback); +extern s64 __ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, + s64 count, const BOOL is_rollback); /** * ntfs_cluster_free - free clusters on an ntfs volume - * @vi: vfs inode whose runlist describes the clusters to free - * @start_vcn: vcn in the runlist of @vi at which to start freeing clusters + * @ni: ntfs inode whose runlist describes the clusters to free + * @start_vcn: vcn in the runlist of @ni at which to start freeing clusters * @count: number of clusters to free or -1 for all clusters - * @write_locked: true if the runlist is locked for writing * * Free @count clusters starting at the cluster @start_vcn in the runlist - * described by the vfs inode @vi. + * described by the ntfs inode @ni. * * If @count is -1, all clusters from @start_vcn to the end of the runlist are * deallocated. Thus, to completely free all clusters in a runlist, use @@ -65,19 +65,18 @@ extern s64 __ntfs_cluster_free(struct inode *vi, const VCN start_vcn, * Return the number of deallocated clusters (not counting sparse ones) on * success and -errno on error. * - * Locking: - The runlist described by @vi must be locked on entry and is - * locked on return. Note if the runlist is locked for reading the - * lock may be dropped and reacquired. Note the runlist may be - * modified when needed runlist fragments need to be mapped. + * Locking: - The runlist described by @ni must be locked for writing on entry + * and is locked on return. Note the runlist may be modified when + * needed runlist fragments need to be mapped. * - The volume lcn bitmap must be unlocked on entry and is unlocked * on return. * - This function takes the volume lcn bitmap lock for writing and * modifies the bitmap contents. */ -static inline s64 ntfs_cluster_free(struct inode *vi, const VCN start_vcn, - s64 count, const BOOL write_locked) +static inline s64 ntfs_cluster_free(ntfs_inode *ni, const VCN start_vcn, + s64 count) { - return __ntfs_cluster_free(vi, start_vcn, count, write_locked, FALSE); + return __ntfs_cluster_free(ni, start_vcn, count, FALSE); } extern int ntfs_cluster_free_from_rl_nolock(ntfs_volume *vol, diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c index 0173e95500d9..0fd70295cca6 100644 --- a/fs/ntfs/logfile.c +++ b/fs/ntfs/logfile.c @@ -51,7 +51,8 @@ static BOOL ntfs_check_restart_page_header(struct inode *vi, RESTART_PAGE_HEADER *rp, s64 pos) { u32 logfile_system_page_size, logfile_log_page_size; - u16 usa_count, usa_ofs, usa_end, ra_ofs; + u16 ra_ofs, usa_count, usa_ofs, usa_end = 0; + BOOL have_usa = TRUE; ntfs_debug("Entering."); /* @@ -86,6 +87,14 @@ static BOOL ntfs_check_restart_page_header(struct inode *vi, (int)sle16_to_cpu(rp->minor_ver)); return FALSE; } + /* + * If chkdsk has been run the restart page may not be protected by an + * update sequence array. + */ + if (ntfs_is_chkd_record(rp->magic) && !le16_to_cpu(rp->usa_count)) { + have_usa = FALSE; + goto skip_usa_checks; + } /* Verify the size of the update sequence array. */ usa_count = 1 + (logfile_system_page_size >> NTFS_BLOCK_SIZE_BITS); if (usa_count != le16_to_cpu(rp->usa_count)) { @@ -102,6 +111,7 @@ static BOOL ntfs_check_restart_page_header(struct inode *vi, "inconsistent update sequence array offset."); return FALSE; } +skip_usa_checks: /* * Verify the position of the restart area. It must be: * - aligned to 8-byte boundary, @@ -109,7 +119,8 @@ static BOOL ntfs_check_restart_page_header(struct inode *vi, * - within the system page size. */ ra_ofs = le16_to_cpu(rp->restart_area_offset); - if (ra_ofs & 7 || ra_ofs < usa_end || + if (ra_ofs & 7 || (have_usa ? ra_ofs < usa_end : + ra_ofs < sizeof(RESTART_PAGE_HEADER)) || ra_ofs > logfile_system_page_size) { ntfs_error(vi->i_sb, "$LogFile restart page specifies " "inconsistent restart area offset."); @@ -402,8 +413,12 @@ static int ntfs_check_and_load_restart_page(struct inode *vi, idx++; } while (to_read > 0); } - /* Perform the multi sector transfer deprotection on the buffer. */ - if (post_read_mst_fixup((NTFS_RECORD*)trp, + /* + * Perform the multi sector transfer deprotection on the buffer if the + * restart page is protected. + */ + if ((!ntfs_is_chkd_record(trp->magic) || le16_to_cpu(trp->usa_count)) + && post_read_mst_fixup((NTFS_RECORD*)trp, le32_to_cpu(rp->system_page_size))) { /* * A multi sector tranfer error was detected. We only need to @@ -615,11 +630,16 @@ is_empty: * Otherwise just throw it away. */ if (rstr2_lsn > rstr1_lsn) { + ntfs_debug("Using second restart page as it is more " + "recent."); ntfs_free(rstr1_ph); rstr1_ph = rstr2_ph; /* rstr1_lsn = rstr2_lsn; */ - } else + } else { + ntfs_debug("Using first restart page as it is more " + "recent."); ntfs_free(rstr2_ph); + } rstr2_ph = NULL; } /* All consistency checks passed. */ diff --git a/fs/ntfs/logfile.h b/fs/ntfs/logfile.h index 42388f95ea6d..a51f3dd0e9eb 100644 --- a/fs/ntfs/logfile.h +++ b/fs/ntfs/logfile.h @@ -113,7 +113,7 @@ typedef struct { */ enum { RESTART_VOLUME_IS_CLEAN = const_cpu_to_le16(0x0002), - RESTART_SPACE_FILLER = 0xffff, /* gcc: Force enum bit width to 16. */ + RESTART_SPACE_FILLER = const_cpu_to_le16(0xffff), /* gcc: Force enum bit width to 16. */ } __attribute__ ((__packed__)); typedef le16 RESTART_AREA_FLAGS; diff --git a/fs/ntfs/malloc.h b/fs/ntfs/malloc.h index 006946efca8c..590887b943f5 100644 --- a/fs/ntfs/malloc.h +++ b/fs/ntfs/malloc.h @@ -40,7 +40,7 @@ * Depending on @gfp_mask the allocation may be guaranteed to succeed. */ static inline void *__ntfs_malloc(unsigned long size, - unsigned int __nocast gfp_mask) + gfp_t gfp_mask) { if (likely(size <= PAGE_SIZE)) { BUG_ON(!size); diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c index 2c32b84385a8..b011369b5956 100644 --- a/fs/ntfs/mft.c +++ b/fs/ntfs/mft.c @@ -58,7 +58,8 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni) * overflowing the unsigned long, but I don't think we would ever get * here if the volume was that big... */ - index = ni->mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT; + index = (u64)ni->mft_no << vol->mft_record_size_bits >> + PAGE_CACHE_SHIFT; ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; i_size = i_size_read(mft_vi); @@ -1953,7 +1954,7 @@ restore_undo_alloc: a = ctx->attr; a->data.non_resident.highest_vcn = cpu_to_sle64(old_last_vcn - 1); undo_alloc: - if (ntfs_cluster_free(vol->mft_ino, old_last_vcn, -1, TRUE) < 0) { + if (ntfs_cluster_free(mft_ni, old_last_vcn, -1) < 0) { ntfs_error(vol->sb, "Failed to free clusters from mft data " "attribute.%s", es); NVolSetErrors(vol); diff --git a/fs/ntfs/unistr.c b/fs/ntfs/unistr.c index a389a5a16c84..0ea887fc859c 100644 --- a/fs/ntfs/unistr.c +++ b/fs/ntfs/unistr.c @@ -1,7 +1,7 @@ /* * unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project. * - * Copyright (c) 2001-2004 Anton Altaparmakov + * Copyright (c) 2001-2005 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published diff --git a/fs/open.c b/fs/open.c index f0d90cf0495c..8d06ec911fd9 100644 --- a/fs/open.c +++ b/fs/open.c @@ -739,7 +739,8 @@ asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group) } static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt, - int flags, struct file *f) + int flags, struct file *f, + int (*open)(struct inode *, struct file *)) { struct inode *inode; int error; @@ -761,11 +762,14 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt, f->f_op = fops_get(inode->i_fop); file_move(f, &inode->i_sb->s_files); - if (f->f_op && f->f_op->open) { - error = f->f_op->open(inode,f); + if (!open && f->f_op) + open = f->f_op->open; + if (open) { + error = open(inode, f); if (error) goto cleanup_all; } + f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping); @@ -814,28 +818,75 @@ struct file *filp_open(const char * filename, int flags, int mode) { int namei_flags, error; struct nameidata nd; - struct file *f; namei_flags = flags; if ((namei_flags+1) & O_ACCMODE) namei_flags++; - if (namei_flags & O_TRUNC) - namei_flags |= 2; - - error = -ENFILE; - f = get_empty_filp(); - if (f == NULL) - return ERR_PTR(error); error = open_namei(filename, namei_flags, mode, &nd); if (!error) - return __dentry_open(nd.dentry, nd.mnt, flags, f); + return nameidata_to_filp(&nd, flags); - put_filp(f); return ERR_PTR(error); } EXPORT_SYMBOL(filp_open); +/** + * lookup_instantiate_filp - instantiates the open intent filp + * @nd: pointer to nameidata + * @dentry: pointer to dentry + * @open: open callback + * + * Helper for filesystems that want to use lookup open intents and pass back + * a fully instantiated struct file to the caller. + * This function is meant to be called from within a filesystem's + * lookup method. + * Note that in case of error, nd->intent.open.file is destroyed, but the + * path information remains valid. + * If the open callback is set to NULL, then the standard f_op->open() + * filesystem callback is substituted. + */ +struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, + int (*open)(struct inode *, struct file *)) +{ + if (IS_ERR(nd->intent.open.file)) + goto out; + if (IS_ERR(dentry)) + goto out_err; + nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->mnt), + nd->intent.open.flags - 1, + nd->intent.open.file, + open); +out: + return nd->intent.open.file; +out_err: + release_open_intent(nd); + nd->intent.open.file = (struct file *)dentry; + goto out; +} +EXPORT_SYMBOL_GPL(lookup_instantiate_filp); + +/** + * nameidata_to_filp - convert a nameidata to an open filp. + * @nd: pointer to nameidata + * @flags: open flags + * + * Note that this function destroys the original nameidata + */ +struct file *nameidata_to_filp(struct nameidata *nd, int flags) +{ + struct file *filp; + + /* Pick up the filp from the open intent */ + filp = nd->intent.open.file; + /* Has the filesystem initialised the file for us? */ + if (filp->f_dentry == NULL) + filp = __dentry_open(nd->dentry, nd->mnt, flags, filp, NULL); + else + path_release(nd); + return filp; +} + struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags) { int error; @@ -846,7 +897,7 @@ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags) if (f == NULL) return ERR_PTR(error); - return __dentry_open(dentry, mnt, flags, f); + return __dentry_open(dentry, mnt, flags, f, NULL); } EXPORT_SYMBOL(dentry_open); diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 77e178f13162..1e848648a322 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -430,7 +430,7 @@ void del_gendisk(struct gendisk *disk) disk->flags &= ~GENHD_FL_UP; unlink_gendisk(disk); disk_stat_set_all(disk, 0); - disk->stamp = disk->stamp_idle = 0; + disk->stamp = 0; devfs_remove_disk(disk); diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 296480e96dd5..6c8dcf7613fd 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -35,7 +35,7 @@ EXPORT_SYMBOL(posix_acl_permission); * Allocate a new ACL with the specified number of entries. */ struct posix_acl * -posix_acl_alloc(int count, unsigned int __nocast flags) +posix_acl_alloc(int count, gfp_t flags) { const size_t size = sizeof(struct posix_acl) + count * sizeof(struct posix_acl_entry); @@ -51,7 +51,7 @@ posix_acl_alloc(int count, unsigned int __nocast flags) * Clone an ACL. */ struct posix_acl * -posix_acl_clone(const struct posix_acl *acl, unsigned int __nocast flags) +posix_acl_clone(const struct posix_acl *acl, gfp_t flags) { struct posix_acl *clone = NULL; @@ -185,7 +185,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, mode_t *mode_p) * Create an ACL representing the file mode permission bits of an inode. */ struct posix_acl * -posix_acl_from_mode(mode_t mode, unsigned int __nocast flags) +posix_acl_from_mode(mode_t mode, gfp_t flags) { struct posix_acl *acl = posix_acl_alloc(3, flags); if (!acl) diff --git a/fs/proc/base.c b/fs/proc/base.c index fb34f88a4a74..a170450aadb1 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -103,7 +103,9 @@ enum pid_directory_inos { PROC_TGID_NUMA_MAPS, PROC_TGID_MOUNTS, PROC_TGID_WCHAN, +#ifdef CONFIG_MMU PROC_TGID_SMAPS, +#endif #ifdef CONFIG_SCHEDSTATS PROC_TGID_SCHEDSTAT, #endif @@ -141,7 +143,9 @@ enum pid_directory_inos { PROC_TID_NUMA_MAPS, PROC_TID_MOUNTS, PROC_TID_WCHAN, +#ifdef CONFIG_MMU PROC_TID_SMAPS, +#endif #ifdef CONFIG_SCHEDSTATS PROC_TID_SCHEDSTAT, #endif @@ -195,7 +199,9 @@ static struct pid_entry tgid_base_stuff[] = { E(PROC_TGID_ROOT, "root", S_IFLNK|S_IRWXUGO), E(PROC_TGID_EXE, "exe", S_IFLNK|S_IRWXUGO), E(PROC_TGID_MOUNTS, "mounts", S_IFREG|S_IRUGO), +#ifdef CONFIG_MMU E(PROC_TGID_SMAPS, "smaps", S_IFREG|S_IRUGO), +#endif #ifdef CONFIG_SECURITY E(PROC_TGID_ATTR, "attr", S_IFDIR|S_IRUGO|S_IXUGO), #endif @@ -235,7 +241,9 @@ static struct pid_entry tid_base_stuff[] = { E(PROC_TID_ROOT, "root", S_IFLNK|S_IRWXUGO), E(PROC_TID_EXE, "exe", S_IFLNK|S_IRWXUGO), E(PROC_TID_MOUNTS, "mounts", S_IFREG|S_IRUGO), +#ifdef CONFIG_MMU E(PROC_TID_SMAPS, "smaps", S_IFREG|S_IRUGO), +#endif #ifdef CONFIG_SECURITY E(PROC_TID_ATTR, "attr", S_IFDIR|S_IRUGO|S_IXUGO), #endif @@ -343,7 +351,8 @@ static int proc_root_link(struct inode *inode, struct dentry **dentry, struct vf /* Same as proc_root_link, but this addionally tries to get fs from other * threads in the group */ -static int proc_task_root_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt) +static int proc_task_root_link(struct inode *inode, struct dentry **dentry, + struct vfsmount **mnt) { struct fs_struct *fs; int result = -ENOENT; @@ -357,9 +366,10 @@ static int proc_task_root_link(struct inode *inode, struct dentry **dentry, stru } else { /* Try to get fs from other threads */ task_unlock(leader); - struct task_struct *task = leader; read_lock(&tasklist_lock); - if (pid_alive(task)) { + if (pid_alive(leader)) { + struct task_struct *task = leader; + while ((task = next_thread(task)) != leader) { task_lock(task); fs = task->fs; @@ -628,6 +638,7 @@ static struct file_operations proc_numa_maps_operations = { }; #endif +#ifdef CONFIG_MMU extern struct seq_operations proc_pid_smaps_op; static int smaps_open(struct inode *inode, struct file *file) { @@ -646,6 +657,7 @@ static struct file_operations proc_smaps_operations = { .llseek = seq_lseek, .release = seq_release, }; +#endif extern struct seq_operations mounts_op; static int mounts_open(struct inode *inode, struct file *file) @@ -1679,10 +1691,12 @@ static struct dentry *proc_pident_lookup(struct inode *dir, case PROC_TGID_MOUNTS: inode->i_fop = &proc_mounts_operations; break; +#ifdef CONFIG_MMU case PROC_TID_SMAPS: case PROC_TGID_SMAPS: inode->i_fop = &proc_smaps_operations; break; +#endif #ifdef CONFIG_SECURITY case PROC_TID_ATTR: inode->i_nlink = 2; diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c index f3bf016d5ee3..cff10ab1af63 100644 --- a/fs/proc/nommu.c +++ b/fs/proc/nommu.c @@ -91,6 +91,7 @@ static void *nommu_vma_list_start(struct seq_file *m, loff_t *_pos) next = _rb; break; } + pos--; } return next; diff --git a/fs/read_write.c b/fs/read_write.c index b60324aaa2b6..a091ee4f430d 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -497,6 +497,9 @@ static ssize_t do_readv_writev(int type, struct file *file, } ret = rw_verify_area(type, file, pos, tot_len); + if (ret) + goto out; + ret = security_file_permission(file, type == READ ? MAY_READ : MAY_WRITE); if (ret) goto out; diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c index 2706e2adffab..45829889dcdc 100644 --- a/fs/reiserfs/fix_node.c +++ b/fs/reiserfs/fix_node.c @@ -2022,7 +2022,7 @@ static int get_neighbors(struct tree_balance *p_s_tb, int n_h) } #ifdef CONFIG_REISERFS_CHECK -void *reiserfs_kmalloc(size_t size, int flags, struct super_block *s) +void *reiserfs_kmalloc(size_t size, gfp_t flags, struct super_block *s) { void *vp; static size_t malloced; diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index d76ee6c4f9b8..5f82352b97e1 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -2842,7 +2842,7 @@ static int reiserfs_set_page_dirty(struct page *page) * even in -o notail mode, we can't be sure an old mount without -o notail * didn't create files with tails. */ -static int reiserfs_releasepage(struct page *page, int unused_gfp_flags) +static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags) { struct inode *inode = page->mapping->host; struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb); diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 87ac9dc8b381..72e120798677 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -453,7 +453,7 @@ static struct page *reiserfs_get_page(struct inode *dir, unsigned long n) struct page *page; /* We can deadlock if we try to free dentries, and an unlink/rmdir has just occured - GFP_NOFS avoids this */ - mapping->flags = (mapping->flags & ~__GFP_BITS_MASK) | GFP_NOFS; + mapping_set_gfp_mask(mapping, GFP_NOFS); page = read_cache_page(mapping, n, (filler_t *) mapping->a_ops->readpage, NULL); if (!IS_ERR(page)) { diff --git a/fs/relayfs/buffers.c b/fs/relayfs/buffers.c index 2aa8e2719999..84e21ffa5ca8 100644 --- a/fs/relayfs/buffers.c +++ b/fs/relayfs/buffers.c @@ -109,7 +109,7 @@ static void *relay_alloc_buf(struct rchan_buf *buf, unsigned long size) if (unlikely(!buf->page_array[i])) goto depopulate; } - mem = vmap(buf->page_array, n_pages, GFP_KERNEL, PAGE_KERNEL); + mem = vmap(buf->page_array, n_pages, VM_MAP, PAGE_KERNEL); if (!mem) goto depopulate; diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c index 4b184559f231..3c92162dc728 100644 --- a/fs/xfs/linux-2.6/kmem.c +++ b/fs/xfs/linux-2.6/kmem.c @@ -47,9 +47,9 @@ void * kmem_alloc(size_t size, unsigned int __nocast flags) { - int retries = 0; - unsigned int lflags = kmem_flags_convert(flags); - void *ptr; + int retries = 0; + gfp_t lflags = kmem_flags_convert(flags); + void *ptr; do { if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS) @@ -107,9 +107,9 @@ kmem_realloc(void *ptr, size_t newsize, size_t oldsize, void * kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags) { - int retries = 0; - unsigned int lflags = kmem_flags_convert(flags); - void *ptr; + int retries = 0; + gfp_t lflags = kmem_flags_convert(flags); + void *ptr; do { ptr = kmem_cache_alloc(zone, lflags); diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h index 109fcf27e256..f4bb78c268c0 100644 --- a/fs/xfs/linux-2.6/kmem.h +++ b/fs/xfs/linux-2.6/kmem.h @@ -81,9 +81,9 @@ typedef unsigned long xfs_pflags_t; *(NSTATEP) = *(OSTATEP); \ } while (0) -static __inline unsigned int kmem_flags_convert(unsigned int __nocast flags) +static __inline gfp_t kmem_flags_convert(unsigned int __nocast flags) { - unsigned int lflags = __GFP_NOWARN; /* we'll report problems, if need be */ + gfp_t lflags = __GFP_NOWARN; /* we'll report problems, if need be */ #ifdef DEBUG if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) { @@ -129,13 +129,12 @@ extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); extern void *kmem_alloc(size_t, unsigned int __nocast); -extern void *kmem_realloc(void *, size_t, size_t, - unsigned int __nocast); +extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast); extern void *kmem_zalloc(size_t, unsigned int __nocast); extern void kmem_free(void *, size_t); typedef struct shrinker *kmem_shaker_t; -typedef int (*kmem_shake_func_t)(int, unsigned int); +typedef int (*kmem_shake_func_t)(int, gfp_t); static __inline kmem_shaker_t kmem_shake_register(kmem_shake_func_t sfunc) @@ -150,7 +149,7 @@ kmem_shake_deregister(kmem_shaker_t shrinker) } static __inline int -kmem_shake_allow(unsigned int gfp_mask) +kmem_shake_allow(gfp_t gfp_mask) { return (gfp_mask & __GFP_WAIT); } diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index c6c077978fe3..7aa398724706 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -1296,7 +1296,7 @@ linvfs_invalidate_page( STATIC int linvfs_release_page( struct page *page, - int gfp_mask) + gfp_t gfp_mask) { struct inode *inode = page->mapping->host; int dirty, delalloc, unmapped, unwritten; diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index e82cf72ac599..ba4767c04adf 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -64,7 +64,7 @@ STATIC kmem_cache_t *pagebuf_zone; STATIC kmem_shaker_t pagebuf_shake; -STATIC int xfsbufd_wakeup(int, unsigned int); +STATIC int xfsbufd_wakeup(int, gfp_t); STATIC void pagebuf_delwri_queue(xfs_buf_t *, int); STATIC struct workqueue_struct *xfslogd_workqueue; @@ -383,7 +383,7 @@ _pagebuf_lookup_pages( size_t blocksize = bp->pb_target->pbr_bsize; size_t size = bp->pb_count_desired; size_t nbytes, offset; - int gfp_mask = pb_to_gfp(flags); + gfp_t gfp_mask = pb_to_gfp(flags); unsigned short page_count, i; pgoff_t first; loff_t end; @@ -1749,8 +1749,8 @@ STATIC int xfsbufd_force_sleep; STATIC int xfsbufd_wakeup( - int priority, - unsigned int mask) + int priority, + gfp_t mask) { if (xfsbufd_force_sleep) return 0; diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h index 1b383e3cb68c..20ac3d95ecd9 100644 --- a/include/asm-alpha/atomic.h +++ b/include/asm-alpha/atomic.h @@ -1,6 +1,8 @@ #ifndef _ALPHA_ATOMIC_H #define _ALPHA_ATOMIC_H +#include + /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc... @@ -100,18 +102,19 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) static __inline__ long atomic_add_return(int i, atomic_t * v) { long temp, result; + smp_mb(); __asm__ __volatile__( "1: ldl_l %0,%1\n" " addl %0,%3,%2\n" " addl %0,%3,%0\n" " stl_c %0,%1\n" " beq %0,2f\n" - " mb\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter), "=&r" (result) :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); return result; } @@ -120,54 +123,57 @@ static __inline__ long atomic_add_return(int i, atomic_t * v) static __inline__ long atomic64_add_return(long i, atomic64_t * v) { long temp, result; + smp_mb(); __asm__ __volatile__( "1: ldq_l %0,%1\n" " addq %0,%3,%2\n" " addq %0,%3,%0\n" " stq_c %0,%1\n" " beq %0,2f\n" - " mb\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter), "=&r" (result) :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); return result; } static __inline__ long atomic_sub_return(int i, atomic_t * v) { long temp, result; + smp_mb(); __asm__ __volatile__( "1: ldl_l %0,%1\n" " subl %0,%3,%2\n" " subl %0,%3,%0\n" " stl_c %0,%1\n" " beq %0,2f\n" - " mb\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter), "=&r" (result) :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); return result; } static __inline__ long atomic64_sub_return(long i, atomic64_t * v) { long temp, result; + smp_mb(); __asm__ __volatile__( "1: ldq_l %0,%1\n" " subq %0,%3,%2\n" " subq %0,%3,%0\n" " stq_c %0,%1\n" " beq %0,2f\n" - " mb\n" ".subsection 2\n" "2: br 1b\n" ".previous" :"=&r" (temp), "=m" (v->counter), "=&r" (result) :"Ir" (i), "m" (v->counter) : "memory"); + smp_mb(); return result; } diff --git a/include/asm-alpha/barrier.h b/include/asm-alpha/barrier.h new file mode 100644 index 000000000000..229c83fe77cb --- /dev/null +++ b/include/asm-alpha/barrier.h @@ -0,0 +1,34 @@ +#ifndef __BARRIER_H +#define __BARRIER_H + +#define mb() \ +__asm__ __volatile__("mb": : :"memory") + +#define rmb() \ +__asm__ __volatile__("mb": : :"memory") + +#define wmb() \ +__asm__ __volatile__("wmb": : :"memory") + +#define read_barrier_depends() \ +__asm__ __volatile__("mb": : :"memory") + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#define smp_read_barrier_depends() read_barrier_depends() +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_read_barrier_depends() barrier() +#endif + +#define set_mb(var, value) \ +do { var = value; mb(); } while (0) + +#define set_wmb(var, value) \ +do { var = value; wmb(); } while (0) + +#endif /* __BARRIER_H */ diff --git a/include/asm-alpha/compiler.h b/include/asm-alpha/compiler.h index 399c33b7be51..0a4a8b40dfcd 100644 --- a/include/asm-alpha/compiler.h +++ b/include/asm-alpha/compiler.h @@ -98,6 +98,9 @@ #undef inline #undef __inline__ #undef __inline - +#if __GNUC__ == 3 && __GNUC_MINOR__ >= 1 || __GNUC__ > 3 +#undef __always_inline +#define __always_inline inline __attribute__((always_inline)) +#endif #endif /* __ALPHA_COMPILER_H */ diff --git a/include/asm-alpha/dma-mapping.h b/include/asm-alpha/dma-mapping.h index c675f282d6ad..680f7ecbb28f 100644 --- a/include/asm-alpha/dma-mapping.h +++ b/include/asm-alpha/dma-mapping.h @@ -31,7 +31,7 @@ #else /* no PCI - no IOMMU. */ void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, int gfp); + dma_addr_t *dma_handle, gfp_t gfp); int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction); diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h index bdb4d66418f1..050e86d12891 100644 --- a/include/asm-alpha/system.h +++ b/include/asm-alpha/system.h @@ -4,6 +4,7 @@ #include #include #include +#include /* * System defines.. Note that this is included both from .c and .S @@ -139,36 +140,6 @@ extern void halt(void) __attribute__((noreturn)); struct task_struct; extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*); -#define mb() \ -__asm__ __volatile__("mb": : :"memory") - -#define rmb() \ -__asm__ __volatile__("mb": : :"memory") - -#define wmb() \ -__asm__ __volatile__("wmb": : :"memory") - -#define read_barrier_depends() \ -__asm__ __volatile__("mb": : :"memory") - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() barrier() -#endif - -#define set_mb(var, value) \ -do { var = value; mb(); } while (0) - -#define set_wmb(var, value) \ -do { var = value; wmb(); } while (0) - #define imb() \ __asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory") diff --git a/include/asm-arm/arch-h720x/system.h b/include/asm-arm/arch-h720x/system.h index 0b025e227ec2..09eda84592ff 100644 --- a/include/asm-arm/arch-h720x/system.h +++ b/include/asm-arm/arch-h720x/system.h @@ -17,9 +17,11 @@ static void arch_idle(void) { CPU_REG (PMU_BASE, PMU_MODE) = PMU_MODE_IDLE; - __asm__ __volatile__( - "mov r0, r0\n\t" - "mov r0, r0"); + nop(); + nop(); + CPU_REG (PMU_BASE, PMU_MODE) = PMU_MODE_RUN; + nop(); + nop(); } diff --git a/include/asm-arm/arch-imx/imx-regs.h b/include/asm-arm/arch-imx/imx-regs.h index 93b840e8fa60..a6912b3d8671 100644 --- a/include/asm-arm/arch-imx/imx-regs.h +++ b/include/asm-arm/arch-imx/imx-regs.h @@ -76,6 +76,7 @@ #define GPIO_PIN_MASK 0x1f #define GPIO_PORT_MASK (0x3 << 5) +#define GPIO_PORT_SHIFT 5 #define GPIO_PORTA (0<<5) #define GPIO_PORTB (1<<5) #define GPIO_PORTC (2<<5) @@ -88,24 +89,37 @@ #define GPIO_PF (0<<9) #define GPIO_AF (1<<9) +#define GPIO_OCR_SHIFT 10 #define GPIO_OCR_MASK (3<<10) #define GPIO_AIN (0<<10) #define GPIO_BIN (1<<10) #define GPIO_CIN (2<<10) -#define GPIO_GPIO (3<<10) +#define GPIO_DR (3<<10) -#define GPIO_AOUT (1<<12) -#define GPIO_BOUT (1<<13) +#define GPIO_AOUT_SHIFT 12 +#define GPIO_AOUT_MASK (3<<12) +#define GPIO_AOUT (0<<12) +#define GPIO_AOUT_ISR (1<<12) +#define GPIO_AOUT_0 (2<<12) +#define GPIO_AOUT_1 (3<<12) + +#define GPIO_BOUT_SHIFT 14 +#define GPIO_BOUT_MASK (3<<14) +#define GPIO_BOUT (0<<14) +#define GPIO_BOUT_ISR (1<<14) +#define GPIO_BOUT_0 (2<<14) +#define GPIO_BOUT_1 (3<<14) + +#define GPIO_GIUS (1<<16) /* assignements for GPIO alternate/primary functions */ /* FIXME: This list is not completed. The correct directions are * missing on some (many) pins */ -#define PA0_PF_A24 ( GPIO_PORTA | GPIO_PF | 0 ) -#define PA0_AIN_SPI2_CLK ( GPIO_PORTA | GPIO_OUT | GPIO_AIN | 0 ) +#define PA0_AIN_SPI2_CLK ( GPIO_GIUS | GPIO_PORTA | GPIO_OUT | 0 ) #define PA0_AF_ETMTRACESYNC ( GPIO_PORTA | GPIO_AF | 0 ) -#define PA1_AOUT_SPI2_RXD ( GPIO_PORTA | GPIO_IN | GPIO_AOUT | 1 ) +#define PA1_AOUT_SPI2_RXD ( GPIO_GIUS | GPIO_PORTA | GPIO_IN | 1 ) #define PA1_PF_TIN ( GPIO_PORTA | GPIO_PF | 1 ) #define PA2_PF_PWM0 ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 2 ) #define PA3_PF_CSI_MCLK ( GPIO_PORTA | GPIO_PF | 3 ) @@ -123,7 +137,7 @@ #define PA15_PF_I2C_SDA ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 15 ) #define PA16_PF_I2C_SCL ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 16 ) #define PA17_AF_ETMTRACEPKT4 ( GPIO_PORTA | GPIO_AF | 17 ) -#define PA17_AIN_SPI2_SS ( GPIO_PORTA | GPIO_AIN | 17 ) +#define PA17_AIN_SPI2_SS ( GPIO_GIUS | GPIO_PORTA | GPIO_OUT | 17 ) #define PA18_AF_ETMTRACEPKT5 ( GPIO_PORTA | GPIO_AF | 18 ) #define PA19_AF_ETMTRACEPKT6 ( GPIO_PORTA | GPIO_AF | 19 ) #define PA20_AF_ETMTRACEPKT7 ( GPIO_PORTA | GPIO_AF | 20 ) @@ -191,19 +205,27 @@ #define PC15_PF_SPI1_SS ( GPIO_PORTC | GPIO_PF | 15 ) #define PC16_PF_SPI1_MISO ( GPIO_PORTC | GPIO_PF | 16 ) #define PC17_PF_SPI1_MOSI ( GPIO_PORTC | GPIO_PF | 17 ) +#define PC24_BIN_UART3_RI ( GPIO_GIUS | GPIO_PORTC | GPIO_OUT | GPIO_BIN | 24 ) +#define PC25_BIN_UART3_DSR ( GPIO_GIUS | GPIO_PORTC | GPIO_OUT | GPIO_BIN | 25 ) +#define PC26_AOUT_UART3_DTR ( GPIO_GIUS | GPIO_PORTC | GPIO_IN | 26 ) +#define PC27_BIN_UART3_DCD ( GPIO_GIUS | GPIO_PORTC | GPIO_OUT | GPIO_BIN | 27 ) +#define PC28_BIN_UART3_CTS ( GPIO_GIUS | GPIO_PORTC | GPIO_OUT | GPIO_BIN | 28 ) +#define PC29_AOUT_UART3_RTS ( GPIO_GIUS | GPIO_PORTC | GPIO_IN | 29 ) +#define PC30_BIN_UART3_TX ( GPIO_GIUS | GPIO_PORTC | GPIO_BIN | 30 ) +#define PC31_AOUT_UART3_RX ( GPIO_GIUS | GPIO_PORTC | GPIO_IN | 31) #define PD6_PF_LSCLK ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 6 ) #define PD7_PF_REV ( GPIO_PORTD | GPIO_PF | 7 ) -#define PD7_AF_UART2_DTR ( GPIO_PORTD | GPIO_IN | GPIO_AF | 7 ) -#define PD7_AIN_SPI2_SCLK ( GPIO_PORTD | GPIO_AIN | 7 ) +#define PD7_AF_UART2_DTR ( GPIO_GIUS | GPIO_PORTD | GPIO_IN | GPIO_AF | 7 ) +#define PD7_AIN_SPI2_SCLK ( GPIO_GIUS | GPIO_PORTD | GPIO_AIN | 7 ) #define PD8_PF_CLS ( GPIO_PORTD | GPIO_PF | 8 ) #define PD8_AF_UART2_DCD ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 8 ) -#define PD8_AIN_SPI2_SS ( GPIO_PORTD | GPIO_AIN | 8 ) +#define PD8_AIN_SPI2_SS ( GPIO_GIUS | GPIO_PORTD | GPIO_AIN | 8 ) #define PD9_PF_PS ( GPIO_PORTD | GPIO_PF | 9 ) #define PD9_AF_UART2_RI ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 9 ) -#define PD9_AOUT_SPI2_RXD ( GPIO_PORTD | GPIO_IN | GPIO_AOUT | 9 ) +#define PD9_AOUT_SPI2_RXD ( GPIO_GIUS | GPIO_PORTD | GPIO_IN | 9 ) #define PD10_PF_SPL_SPR ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 10 ) #define PD10_AF_UART2_DSR ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 10 ) -#define PD10_AIN_SPI2_TXD ( GPIO_PORTD | GPIO_OUT | GPIO_AIN | 10 ) +#define PD10_AIN_SPI2_TXD ( GPIO_GIUS | GPIO_PORTD | GPIO_OUT | 10 ) #define PD11_PF_CONTRAST ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 11 ) #define PD12_PF_ACD_OE ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 12 ) #define PD13_PF_LP_HSYNC ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 13 ) @@ -225,7 +247,7 @@ #define PD29_PF_LD14 ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 29 ) #define PD30_PF_LD15 ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 30 ) #define PD31_PF_TMR2OUT ( GPIO_PORTD | GPIO_PF | 31 ) -#define PD31_BIN_SPI2_TXD ( GPIO_PORTD | GPIO_BIN | 31 ) +#define PD31_BIN_SPI2_TXD ( GPIO_GIUS | GPIO_PORTD | GPIO_BIN | 31 ) /* * PWM controller diff --git a/include/asm-arm/arch-ixp4xx/entry-macro.S b/include/asm-arm/arch-ixp4xx/entry-macro.S index 455da64832de..323b0bc4a39c 100644 --- a/include/asm-arm/arch-ixp4xx/entry-macro.S +++ b/include/asm-arm/arch-ixp4xx/entry-macro.S @@ -15,25 +15,26 @@ ldr \irqstat, =(IXP4XX_INTC_BASE_VIRT+IXP4XX_ICIP_OFFSET) ldr \irqstat, [\irqstat] @ get interrupts cmp \irqstat, #0 - beq 1001f + beq 1001f @ upper IRQ? clz \irqnr, \irqstat mov \base, #31 - subs \irqnr, \base, \irqnr + sub \irqnr, \base, \irqnr + b 1002f @ lower IRQ being + @ handled 1001: /* * IXP465 has an upper IRQ status register */ #if defined(CONFIG_CPU_IXP46X) - bne 1002f ldr \irqstat, =(IXP4XX_INTC_BASE_VIRT+IXP4XX_ICIP2_OFFSET) ldr \irqstat, [\irqstat] @ get upper interrupts mov \irqnr, #63 clz \irqstat, \irqstat cmp \irqstat, #32 subne \irqnr, \irqnr, \irqstat -1002: #endif +1002: .endm diff --git a/include/asm-arm/arch-ixp4xx/hardware.h b/include/asm-arm/arch-ixp4xx/hardware.h index 4ac964b9078a..55d85eea8c1a 100644 --- a/include/asm-arm/arch-ixp4xx/hardware.h +++ b/include/asm-arm/arch-ixp4xx/hardware.h @@ -27,7 +27,7 @@ #define pcibios_assign_all_busses() 1 -#if defined(CONFIG_CPU_IXP465) && !defined(__ASSEMBLY__) +#if defined(CONFIG_CPU_IXP46X) && !defined(__ASSEMBLY__) extern unsigned int processor_id; #define cpu_is_ixp465() ((processor_id & 0xffffffc0) == 0x69054200) #else diff --git a/include/asm-arm/arch-ixp4xx/platform.h b/include/asm-arm/arch-ixp4xx/platform.h index d13ee7f78c70..f14ed63590c3 100644 --- a/include/asm-arm/arch-ixp4xx/platform.h +++ b/include/asm-arm/arch-ixp4xx/platform.h @@ -93,7 +93,7 @@ extern struct pci_bus *ixp4xx_scan_bus(int nr, struct pci_sys_data *sys); static inline void gpio_line_config(u8 line, u32 direction) { - if (direction == IXP4XX_GPIO_OUT) + if (direction == IXP4XX_GPIO_IN) *IXP4XX_GPIO_GPOER |= (1 << line); else *IXP4XX_GPIO_GPOER &= ~(1 << line); diff --git a/include/asm-arm/arch-pxa/pxa-regs.h b/include/asm-arm/arch-pxa/pxa-regs.h index 939d9e5020a0..3af7165ab0d7 100644 --- a/include/asm-arm/arch-pxa/pxa-regs.h +++ b/include/asm-arm/arch-pxa/pxa-regs.h @@ -126,8 +126,8 @@ #define DRCMR12 __REG(0x40000130) /* Request to Channel Map Register for AC97 audio transmit Request */ #define DRCMR13 __REG(0x40000134) /* Request to Channel Map Register for SSP receive Request */ #define DRCMR14 __REG(0x40000138) /* Request to Channel Map Register for SSP transmit Request */ -#define DRCMR15 __REG(0x4000013c) /* Reserved */ -#define DRCMR16 __REG(0x40000140) /* Reserved */ +#define DRCMR15 __REG(0x4000013c) /* Request to Channel Map Register for SSP2 receive Request */ +#define DRCMR16 __REG(0x40000140) /* Request to Channel Map Register for SSP2 transmit Request */ #define DRCMR17 __REG(0x40000144) /* Request to Channel Map Register for ICP receive Request */ #define DRCMR18 __REG(0x40000148) /* Request to Channel Map Register for ICP transmit Request */ #define DRCMR19 __REG(0x4000014c) /* Request to Channel Map Register for STUART receive Request */ @@ -151,7 +151,8 @@ #define DRCMR37 __REG(0x40000194) /* Request to Channel Map Register for USB endpoint 13 Request */ #define DRCMR38 __REG(0x40000198) /* Request to Channel Map Register for USB endpoint 14 Request */ #define DRCMR39 __REG(0x4000019C) /* Reserved */ - +#define DRCMR66 __REG(0x40001108) /* Request to Channel Map Register for SSP3 receive Request */ +#define DRCMR67 __REG(0x4000110C) /* Request to Channel Map Register for SSP3 transmit Request */ #define DRCMR68 __REG(0x40001110) /* Request to Channel Map Register for Camera FIFO 0 Request */ #define DRCMR69 __REG(0x40001114) /* Request to Channel Map Register for Camera FIFO 1 Request */ #define DRCMR70 __REG(0x40001118) /* Request to Channel Map Register for Camera FIFO 2 Request */ @@ -652,7 +653,7 @@ #define UDCCS_IO_RFS (1 << 0) /* Receive FIFO service */ #define UDCCS_IO_RPC (1 << 1) /* Receive packet complete */ -#define UDCCS_IO_ROF (1 << 3) /* Receive overflow */ +#define UDCCS_IO_ROF (1 << 2) /* Receive overflow */ #define UDCCS_IO_DME (1 << 3) /* DMA enable */ #define UDCCS_IO_RNE (1 << 6) /* Receive FIFO not empty */ #define UDCCS_IO_RSP (1 << 7) /* Receive short packet */ diff --git a/include/asm-arm/arch-pxa/pxafb.h b/include/asm-arm/arch-pxa/pxafb.h index 21c0e16dce5f..aba9b30f4249 100644 --- a/include/asm-arm/arch-pxa/pxafb.h +++ b/include/asm-arm/arch-pxa/pxafb.h @@ -66,4 +66,5 @@ struct pxafb_mach_info { }; void set_pxa_fb_info(struct pxafb_mach_info *hard_pxa_fb_info); +void set_pxa_fb_parent(struct device *parent_dev); unsigned long pxafb_get_hsync_time(struct device *dev); diff --git a/include/asm-arm/arch-rpc/hardware.h b/include/asm-arm/arch-rpc/hardware.h index be9754a05c19..9d7f87375aa7 100644 --- a/include/asm-arm/arch-rpc/hardware.h +++ b/include/asm-arm/arch-rpc/hardware.h @@ -15,7 +15,7 @@ #include #ifndef __ASSEMBLY__ -#define IOMEM(x) ((void __iomem *)(x)) +#define IOMEM(x) ((void __iomem *)(unsigned long)(x)) #else #define IOMEM(x) x #endif /* __ASSEMBLY__ */ @@ -52,7 +52,7 @@ /* * IO Addresses */ -#define VIDC_BASE (void __iomem *)0xe0400000 +#define VIDC_BASE IOMEM(0xe0400000) #define EXPMASK_BASE 0xe0360000 #define IOMD_BASE IOMEM(0xe0200000) #define IOC_BASE IOMEM(0xe0200000) diff --git a/include/asm-arm/arch-s3c2410/anubis-map.h b/include/asm-arm/arch-s3c2410/anubis-map.h index 97741d6e506a..d529ffda8599 100644 --- a/include/asm-arm/arch-s3c2410/anubis-map.h +++ b/include/asm-arm/arch-s3c2410/anubis-map.h @@ -20,22 +20,22 @@ /* start peripherals off after the S3C2410 */ -#define ANUBIS_IOADDR(x) (S3C2410_ADDR((x) + 0x02000000)) +#define ANUBIS_IOADDR(x) (S3C2410_ADDR((x) + 0x01800000)) #define ANUBIS_PA_CPLD (S3C2410_CS1 | (1<<26)) /* we put the CPLD registers next, to get them out of the way */ -#define ANUBIS_VA_CTRL1 ANUBIS_IOADDR(0x00000000) /* 0x01300000 */ +#define ANUBIS_VA_CTRL1 ANUBIS_IOADDR(0x00000000) /* 0x01800000 */ #define ANUBIS_PA_CTRL1 (ANUBIS_PA_CPLD) -#define ANUBIS_VA_CTRL2 ANUBIS_IOADDR(0x00100000) /* 0x01400000 */ +#define ANUBIS_VA_CTRL2 ANUBIS_IOADDR(0x00100000) /* 0x01900000 */ #define ANUBIS_PA_CTRL2 (ANUBIS_PA_CPLD) -#define ANUBIS_VA_CTRL3 ANUBIS_IOADDR(0x00200000) /* 0x01500000 */ +#define ANUBIS_VA_CTRL3 ANUBIS_IOADDR(0x00200000) /* 0x01A00000 */ #define ANUBIS_PA_CTRL3 (ANUBIS_PA_CPLD) -#define ANUBIS_VA_CTRL4 ANUBIS_IOADDR(0x00300000) /* 0x01600000 */ +#define ANUBIS_VA_CTRL4 ANUBIS_IOADDR(0x00300000) /* 0x01B00000 */ #define ANUBIS_PA_CTRL4 (ANUBIS_PA_CPLD) #define ANUBIS_IDEPRI ANUBIS_IOADDR(0x01000000) diff --git a/include/asm-arm/arch-s3c2410/hardware.h b/include/asm-arm/arch-s3c2410/hardware.h index 48a39918a760..1c9de29cafef 100644 --- a/include/asm-arm/arch-s3c2410/hardware.h +++ b/include/asm-arm/arch-s3c2410/hardware.h @@ -92,6 +92,13 @@ extern unsigned int s3c2410_gpio_getpin(unsigned int pin); extern unsigned int s3c2410_modify_misccr(unsigned int clr, unsigned int chg); +#ifdef CONFIG_CPU_S3C2440 + +extern int s3c2440_set_dsc(unsigned int pin, unsigned int value); + +#endif /* CONFIG_CPU_S3C2440 */ + + #endif /* __ASSEMBLY__ */ #include diff --git a/include/asm-arm/arch-s3c2410/io.h b/include/asm-arm/arch-s3c2410/io.h index 418233a7ee6f..4bf272ed9add 100644 --- a/include/asm-arm/arch-s3c2410/io.h +++ b/include/asm-arm/arch-s3c2410/io.h @@ -9,7 +9,7 @@ * 06-Dec-1997 RMK Created. * 02-Sep-2003 BJD Modified for S3C2410 * 10-Mar-2005 LCVR Changed S3C2410_VA to S3C24XX_VA - * + * 13-Oct-2005 BJD Fixed problems with LDRH/STRH offset range */ #ifndef __ASM_ARM_ARCH_IO_H @@ -97,7 +97,7 @@ DECLARE_IO(int,l,"") else \ __asm__ __volatile__( \ "strb %0, [%1, #0] @ outbc" \ - : : "r" (value), "r" ((port))); \ + : : "r" (value), "r" ((port))); \ }) #define __inbc(port) \ @@ -110,35 +110,61 @@ DECLARE_IO(int,l,"") else \ __asm__ __volatile__( \ "ldrb %0, [%1, #0] @ inbc" \ - : "=r" (result) : "r" ((port))); \ + : "=r" (result) : "r" ((port))); \ result; \ }) #define __outwc(value,port) \ ({ \ unsigned long v = value; \ - if (__PORT_PCIO((port))) \ - __asm__ __volatile__( \ - "strh %0, [%1, %2] @ outwc" \ - : : "r" (v), "r" (PCIO_BASE), "Jr" ((port))); \ - else \ + if (__PORT_PCIO((port))) { \ + if ((port) < 256 && (port) > -256) \ + __asm__ __volatile__( \ + "strh %0, [%1, %2] @ outwc" \ + : : "r" (v), "r" (PCIO_BASE), "Jr" ((port))); \ + else if ((port) > 0) \ + __asm__ __volatile__( \ + "strh %0, [%1, %2] @ outwc" \ + : : "r" (v), \ + "r" (PCIO_BASE + ((port) & ~0xff)), \ + "Jr" (((port) & 0xff))); \ + else \ + __asm__ __volatile__( \ + "strh %0, [%1, #0] @ outwc" \ + : : "r" (v), \ + "r" (PCIO_BASE + (port))); \ + } else \ __asm__ __volatile__( \ "strh %0, [%1, #0] @ outwc" \ - : : "r" (v), "r" ((port))); \ + : : "r" (v), "r" ((port))); \ }) #define __inwc(port) \ ({ \ unsigned short result; \ - if (__PORT_PCIO((port))) \ - __asm__ __volatile__( \ - "ldrh %0, [%1, %2] @ inwc" \ - : "=r" (result) : "r" (PCIO_BASE), "Jr" ((port))); \ - else \ + if (__PORT_PCIO((port))) { \ + if ((port) < 256 && (port) > -256 ) \ + __asm__ __volatile__( \ + "ldrh %0, [%1, %2] @ inwc" \ + : "=r" (result) \ + : "r" (PCIO_BASE), \ + "Jr" ((port))); \ + else if ((port) > 0) \ + __asm__ __volatile__( \ + "ldrh %0, [%1, %2] @ inwc" \ + : "=r" (result) \ + : "r" (PCIO_BASE + ((port) & ~0xff)), \ + "Jr" (((port) & 0xff))); \ + else \ + __asm__ __volatile__( \ + "ldrh %0, [%1, #0] @ inwc" \ + : "=r" (result) \ + : "r" (PCIO_BASE + ((port)))); \ + } else \ __asm__ __volatile__( \ "ldrh %0, [%1, #0] @ inwc" \ - : "=r" (result) : "r" ((port))); \ - result; \ + : "=r" (result) : "r" ((port))); \ + result; \ }) #define __outlc(value,port) \ diff --git a/include/asm-arm/arch-s3c2410/regs-clock.h b/include/asm-arm/arch-s3c2410/regs-clock.h index 16f4c3cc1388..34360706e016 100644 --- a/include/asm-arm/arch-s3c2410/regs-clock.h +++ b/include/asm-arm/arch-s3c2410/regs-clock.h @@ -18,7 +18,9 @@ * 10-Feb-2005 Ben Dooks Fixed CAMDIVN address (Guillaume Gourat) * 10-Mar-2005 Lucas Villa Real Changed S3C2410_VA to S3C24XX_VA * 27-Aug-2005 Ben Dooks Add clock-slow info - */ + * 20-Oct-2005 Ben Dooks Fixed overflow in PLL (Guillaume Gourat) + * 20-Oct-2005 Ben Dooks Add masks for DCLK (Guillaume Gourat) +*/ #ifndef __ASM_ARM_REGS_CLOCK #define __ASM_ARM_REGS_CLOCK "$Id: clock.h,v 1.4 2003/04/30 14:50:51 ben Exp $" @@ -66,11 +68,16 @@ #define S3C2410_DCLKCON_DCLK0_UCLK (1<<1) #define S3C2410_DCLKCON_DCLK0_DIV(x) (((x) - 1 )<<4) #define S3C2410_DCLKCON_DCLK0_CMP(x) (((x) - 1 )<<8) +#define S3C2410_DCLKCON_DCLK0_DIV_MASK ((0xf)<<4) +#define S3C2410_DCLKCON_DCLK0_CMP_MASK ((0xf)<<8) #define S3C2410_DCLKCON_DCLK1EN (1<<16) #define S3C2410_DCLKCON_DCLK1_PCLK (0<<17) #define S3C2410_DCLKCON_DCLK1_UCLK (1<<17) #define S3C2410_DCLKCON_DCLK1_DIV(x) (((x) - 1) <<20) +#define S3C2410_DCLKCON_DCLK1_CMP(x) (((x) - 1) <<24) +#define S3C2410_DCLKCON_DCLK1_DIV_MASK ((0xf) <<20) +#define S3C2410_DCLKCON_DCLK1_CMP_MASK ((0xf) <<24) #define S3C2410_CLKDIVN_PDIVN (1<<0) #define S3C2410_CLKDIVN_HDIVN (1<<1) @@ -83,10 +90,13 @@ #ifndef __ASSEMBLY__ +#include + static inline unsigned int -s3c2410_get_pll(int pllval, int baseclk) +s3c2410_get_pll(unsigned int pllval, unsigned int baseclk) { - int mdiv, pdiv, sdiv; + unsigned int mdiv, pdiv, sdiv; + uint64_t fvco; mdiv = pllval >> S3C2410_PLLCON_MDIVSHIFT; pdiv = pllval >> S3C2410_PLLCON_PDIVSHIFT; @@ -96,7 +106,10 @@ s3c2410_get_pll(int pllval, int baseclk) pdiv &= S3C2410_PLLCON_PDIVMASK; sdiv &= S3C2410_PLLCON_SDIVMASK; - return (baseclk * (mdiv + 8)) / ((pdiv + 2) << sdiv); + fvco = (uint64_t)baseclk * (mdiv + 8); + do_div(fvco, (pdiv + 2) << sdiv); + + return (unsigned int)fvco; } #endif /* __ASSEMBLY__ */ diff --git a/include/asm-arm/arch-versatile/io.h b/include/asm-arm/arch-versatile/io.h index 9f895bf61494..47e904cf25c7 100644 --- a/include/asm-arm/arch-versatile/io.h +++ b/include/asm-arm/arch-versatile/io.h @@ -22,7 +22,11 @@ #define IO_SPACE_LIMIT 0xffffffff -#define __io(a) ((void __iomem *)(a)) +static inline void __iomem *__io(unsigned long addr) +{ + return (void __iomem *)addr; +} +#define __io(a) __io(a) #define __mem_pci(a) (a) #define __mem_isa(a) (a) diff --git a/include/asm-arm/bitops.h b/include/asm-arm/bitops.h index aad7aad026b3..e007dd990da5 100644 --- a/include/asm-arm/bitops.h +++ b/include/asm-arm/bitops.h @@ -347,7 +347,6 @@ static inline unsigned long __ffs(unsigned long word) * the clz instruction for much better code efficiency. */ -static __inline__ int generic_fls(int x); #define fls(x) \ ( __builtin_constant_p(x) ? generic_fls(x) : \ ({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) ) diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h index d62ade4e4cbb..e3e8541ee63b 100644 --- a/include/asm-arm/dma-mapping.h +++ b/include/asm-arm/dma-mapping.h @@ -70,7 +70,7 @@ static inline int dma_mapping_error(dma_addr_t dma_addr) * device-viewed address. */ extern void * -dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, int gfp); +dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); /** * dma_free_coherent - free memory allocated by dma_alloc_coherent @@ -117,7 +117,7 @@ int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, * device-viewed address. */ extern void * -dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int gfp); +dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); #define dma_free_writecombine(dev,size,cpu_addr,handle) \ dma_free_coherent(dev,size,cpu_addr,handle) diff --git a/include/asm-arm/elf.h b/include/asm-arm/elf.h index a1696ba238d3..7da97a937548 100644 --- a/include/asm-arm/elf.h +++ b/include/asm-arm/elf.h @@ -124,6 +124,8 @@ do { \ if (((ex).e_flags & EF_ARM_EABI_MASK) || \ ((ex).e_flags & EF_ARM_SOFT_FLOAT)) \ set_thread_flag(TIF_USING_IWMMXT); \ + else \ + clear_thread_flag(TIF_USING_IWMMXT); \ } while (0) #endif diff --git a/include/asm-arm/hardware/scoop.h b/include/asm-arm/hardware/scoop.h index 527404b5a8df..a8f1013930e3 100644 --- a/include/asm-arm/hardware/scoop.h +++ b/include/asm-arm/hardware/scoop.h @@ -38,6 +38,8 @@ struct scoop_config { unsigned short io_out; unsigned short io_dir; + unsigned short suspend_clr; + unsigned short suspend_set; }; /* Structure for linking scoop devices to PCMCIA sockets */ diff --git a/include/asm-arm/io.h b/include/asm-arm/io.h index cfa71a0dffb6..5c4ae8f5dbb0 100644 --- a/include/asm-arm/io.h +++ b/include/asm-arm/io.h @@ -136,9 +136,9 @@ extern void __readwrite_bug(const char *fn); /* * String version of IO memory access ops: */ -extern void _memcpy_fromio(void *, void __iomem *, size_t); -extern void _memcpy_toio(void __iomem *, const void *, size_t); -extern void _memset_io(void __iomem *, int, size_t); +extern void _memcpy_fromio(void *, const volatile void __iomem *, size_t); +extern void _memcpy_toio(volatile void __iomem *, const void *, size_t); +extern void _memset_io(volatile void __iomem *, int, size_t); #define mmiowb() diff --git a/include/asm-arm/locks.h b/include/asm-arm/locks.h index f08dc8447913..852220eecdbc 100644 --- a/include/asm-arm/locks.h +++ b/include/asm-arm/locks.h @@ -103,7 +103,7 @@ ({ \ smp_mb(); \ __asm__ __volatile__( \ - "@ up_op_read\n" \ + "@ up_op_write\n" \ "1: ldrex lr, [%0]\n" \ " adds lr, lr, %1\n" \ " strex ip, lr, [%0]\n" \ @@ -231,7 +231,7 @@ #define __up_op_write(ptr,wake) \ ({ \ __asm__ __volatile__( \ - "@ up_op_read\n" \ + "@ up_op_write\n" \ " mrs ip, cpsr\n" \ " orr lr, ip, #128\n" \ " msr cpsr_c, lr\n" \ diff --git a/include/asm-arm/signal.h b/include/asm-arm/signal.h index 760f6e65af05..ced69161917b 100644 --- a/include/asm-arm/signal.h +++ b/include/asm-arm/signal.h @@ -115,7 +115,6 @@ typedef unsigned long sigset_t; #ifdef __KERNEL__ #define SA_TIMER 0x40000000 -#define SA_IRQNOMASK 0x08000000 #endif #include diff --git a/include/asm-cris/dma-mapping.h b/include/asm-cris/dma-mapping.h index 0b5c3fdaefe1..8eff51349ae7 100644 --- a/include/asm-cris/dma-mapping.h +++ b/include/asm-cris/dma-mapping.h @@ -15,14 +15,14 @@ #ifdef CONFIG_PCI void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, int flag); + dma_addr_t *dma_handle, gfp_t flag); void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); #else static inline void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - int flag) + gfp_t flag) { BUG(); return NULL; diff --git a/include/asm-frv/dma-mapping.h b/include/asm-frv/dma-mapping.h index 0206ab35eae0..5003e017fd1e 100644 --- a/include/asm-frv/dma-mapping.h +++ b/include/asm-frv/dma-mapping.h @@ -13,7 +13,7 @@ extern unsigned long __nongprelbss dma_coherent_mem_start; extern unsigned long __nongprelbss dma_coherent_mem_end; -void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, int gfp); +void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); /* diff --git a/include/asm-frv/pci.h b/include/asm-frv/pci.h index b4efe5e3591a..1168451c275f 100644 --- a/include/asm-frv/pci.h +++ b/include/asm-frv/pci.h @@ -32,7 +32,7 @@ extern void pcibios_set_master(struct pci_dev *dev); extern void pcibios_penalize_isa_irq(int irq); #ifdef CONFIG_MMU -extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle); +extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle); extern void consistent_free(void *vaddr); extern void consistent_sync(void *vaddr, size_t size, int direction); extern void consistent_sync_page(struct page *page, unsigned long offset, diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h index fd9de9502dff..a7f1a55ce6b0 100644 --- a/include/asm-generic/dma-mapping-broken.h +++ b/include/asm-generic/dma-mapping-broken.h @@ -6,7 +6,7 @@ static inline void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - int flag) + gfp_t flag) { BUG(); return NULL; diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h index 8cef663c5cd9..747d790295f3 100644 --- a/include/asm-generic/dma-mapping.h +++ b/include/asm-generic/dma-mapping.h @@ -35,7 +35,7 @@ dma_set_mask(struct device *dev, u64 dma_mask) static inline void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - unsigned int __nocast flag) + gfp_t flag) { BUG_ON(dev->bus != &pci_bus_type); @@ -168,7 +168,7 @@ dma_set_mask(struct device *dev, u64 dma_mask) static inline void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - unsigned int __nocast flag) + gfp_t flag) { BUG(); return NULL; diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index f86c1e549466..ff28c8b31f58 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -158,6 +158,19 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #define lazy_mmu_prot_update(pte) do { } while (0) #endif +#ifndef __HAVE_ARCH_MULTIPLE_ZERO_PAGE +#define move_pte(pte, prot, old_addr, new_addr) (pte) +#else +#define move_pte(pte, prot, old_addr, new_addr) \ +({ \ + pte_t newpte = (pte); \ + if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \ + pte_page(pte) == ZERO_PAGE(old_addr)) \ + newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \ + newpte; \ +}) +#endif + /* * When walking page tables, get the address of the next boundary, * or the end address of the range if that comes earlier. Although no diff --git a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h index 563964b2995b..e56c335f8ef9 100644 --- a/include/asm-i386/dma-mapping.h +++ b/include/asm-i386/dma-mapping.h @@ -11,7 +11,7 @@ #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, unsigned int __nocast flag); + dma_addr_t *dma_handle, gfp_t flag); void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h index 4ac84cc6f01a..622815bf3243 100644 --- a/include/asm-i386/hw_irq.h +++ b/include/asm-i386/hw_irq.h @@ -18,6 +18,8 @@ #include #include +struct hw_interrupt_type; + /* * Various low-level irq details needed by irq.c, process.c, * time.c, io_apic.c and smp.c diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h index 79e89a7db566..a2f6ac5aef7d 100644 --- a/include/asm-ia64/machvec.h +++ b/include/asm-ia64/machvec.h @@ -37,7 +37,7 @@ typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val, /* DMA-mapping interface: */ typedef void ia64_mv_dma_init (void); -typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, int); +typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t); typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t); typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int); typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int); diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h index 97a28b8b2ddd..c7d9c9ed38ba 100644 --- a/include/asm-ia64/mca.h +++ b/include/asm-ia64/mca.h @@ -80,7 +80,12 @@ struct ia64_sal_os_state { u64 sal_ra; /* Return address in SAL, physical */ u64 sal_gp; /* GP of the SAL - physical */ pal_min_state_area_t *pal_min_state; /* from R17. physical in asm, virtual in C */ + /* Previous values of IA64_KR(CURRENT) and IA64_KR(CURRENT_STACK). + * Note: if the MCA/INIT recovery code wants to resume to a new context + * then it must change these values to reflect the new kernel stack. + */ u64 prev_IA64_KR_CURRENT; /* previous value of IA64_KR(CURRENT) */ + u64 prev_IA64_KR_CURRENT_STACK; struct task_struct *prev_task; /* previous task, NULL if it is not useful */ /* Some interrupt registers are not saved in minstate, pt_regs or * switch_stack. Because MCA/INIT can occur when interrupts are diff --git a/include/asm-ia64/uaccess.h b/include/asm-ia64/uaccess.h index 3a7829bb5954..9adb51211c22 100644 --- a/include/asm-ia64/uaccess.h +++ b/include/asm-ia64/uaccess.h @@ -187,8 +187,8 @@ extern void __get_user_unknown (void); ({ \ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ __typeof__ (size) __gu_size = (size); \ - long __gu_err = -EFAULT, __gu_val = 0; \ - \ + long __gu_err = -EFAULT; \ + unsigned long __gu_val = 0; \ if (!check || __access_ok(__gu_ptr, size, segment)) \ switch (__gu_size) { \ case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \ @@ -240,13 +240,13 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use static inline unsigned long __copy_to_user (void __user *to, const void *from, unsigned long count) { - return __copy_user(to, (void __user *) from, count); + return __copy_user(to, (__force void __user *) from, count); } static inline unsigned long __copy_from_user (void *to, const void __user *from, unsigned long count) { - return __copy_user((void __user *) to, from, count); + return __copy_user((__force void __user *) to, from, count); } #define __copy_to_user_inatomic __copy_to_user @@ -258,7 +258,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) long __cu_len = (n); \ \ if (__access_ok(__cu_to, __cu_len, get_fs())) \ - __cu_len = __copy_user(__cu_to, (void __user *) __cu_from, __cu_len); \ + __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ __cu_len; \ }) @@ -270,7 +270,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) \ __chk_user_ptr(__cu_from); \ if (__access_ok(__cu_from, __cu_len, get_fs())) \ - __cu_len = __copy_user((void __user *) __cu_to, __cu_from, __cu_len); \ + __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ __cu_len; \ }) diff --git a/include/asm-m32r/dma-mapping.h b/include/asm-m32r/dma-mapping.h index 3a2db28834b6..a7fa0302bda7 100644 --- a/include/asm-m32r/dma-mapping.h +++ b/include/asm-m32r/dma-mapping.h @@ -8,7 +8,7 @@ static inline void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - int flag) + gfp_t flag) { return (void *)NULL; } diff --git a/include/asm-m32r/io.h b/include/asm-m32r/io.h index 8e9e481e6996..70ad1c949c2b 100644 --- a/include/asm-m32r/io.h +++ b/include/asm-m32r/io.h @@ -60,7 +60,7 @@ __ioremap(unsigned long offset, unsigned long size, unsigned long flags); * address. */ -static inline void * ioremap(unsigned long offset, unsigned long size) +static inline void __iomem *ioremap(unsigned long offset, unsigned long size) { return __ioremap(offset, size, 0); } diff --git a/include/asm-m32r/uaccess.h b/include/asm-m32r/uaccess.h index 93d863c455a1..0da7c47d2f01 100644 --- a/include/asm-m32r/uaccess.h +++ b/include/asm-m32r/uaccess.h @@ -208,7 +208,8 @@ extern void __get_user_4(void); * On error, the variable @x is set to zero. */ #define get_user(x,ptr) \ -({ int __ret_gu,__val_gu; \ +({ int __ret_gu; \ + unsigned long __val_gu; \ __chk_user_ptr(ptr); \ switch(sizeof (*(ptr))) { \ case 1: __get_user_x(1,__ret_gu,__val_gu,ptr); break; \ @@ -403,7 +404,8 @@ struct __large_struct { unsigned long buf[100]; }; #define __get_user_nocheck(x,ptr,size) \ ({ \ - long __gu_err, __gu_val; \ + long __gu_err; \ + unsigned long __gu_val; \ __get_user_size(__gu_val,(ptr),(size),__gu_err); \ (x) = (__typeof__(*(ptr)))__gu_val; \ __gu_err; \ @@ -594,8 +596,8 @@ static inline unsigned long __generic_copy_to_user_nocheck(void __user *to, return n; } -unsigned long __generic_copy_to_user(void *, const void *, unsigned long); -unsigned long __generic_copy_from_user(void *, const void *, unsigned long); +unsigned long __generic_copy_to_user(void __user *, const void *, unsigned long); +unsigned long __generic_copy_from_user(void *, const void __user *, unsigned long); /** * __copy_to_user: - Copy a block of data into user space, with less checking. diff --git a/include/asm-mips/dma-mapping.h b/include/asm-mips/dma-mapping.h index af28dc88930b..43288634c38a 100644 --- a/include/asm-mips/dma-mapping.h +++ b/include/asm-mips/dma-mapping.h @@ -5,13 +5,13 @@ #include void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, int flag); + dma_addr_t *dma_handle, gfp_t flag); void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, int flag); + dma_addr_t *dma_handle, gfp_t flag); void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h index cbd1672c94cb..eaf5d9b3a0e1 100644 --- a/include/asm-mips/pgtable.h +++ b/include/asm-mips/pgtable.h @@ -68,6 +68,8 @@ extern unsigned long zero_page_mask; #define ZERO_PAGE(vaddr) \ (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))) +#define __HAVE_ARCH_MULTIPLE_ZERO_PAGE + extern void paging_init(void); /* diff --git a/include/asm-mips/sgi/hpc3.h b/include/asm-mips/sgi/hpc3.h index ac3dfc7af5b0..fcec52bafb25 100644 --- a/include/asm-mips/sgi/hpc3.h +++ b/include/asm-mips/sgi/hpc3.h @@ -128,26 +128,26 @@ struct hpc3_ethregs { volatile u32 rx_gfptr; /* current GIO fifo ptr */ volatile u32 rx_dfptr; /* current device fifo ptr */ u32 _unused1; /* padding */ - volatile u32 rx_reset; /* reset register */ -#define HPC3_ERXRST_CRESET 0x1 /* Reset dma channel and external controller */ -#define HPC3_ERXRST_CLRIRQ 0x2 /* Clear channel interrupt */ -#define HPC3_ERXRST_LBACK 0x4 /* Enable diagnostic loopback mode of Seeq8003 */ + volatile u32 reset; /* reset register */ +#define HPC3_ERST_CRESET 0x1 /* Reset dma channel and external controller */ +#define HPC3_ERST_CLRIRQ 0x2 /* Clear channel interrupt */ +#define HPC3_ERST_LBACK 0x4 /* Enable diagnostic loopback mode of Seeq8003 */ - volatile u32 rx_dconfig; /* DMA configuration register */ -#define HPC3_ERXDCFG_D1 0x0000f /* Cycles to spend in D1 state for PIO */ -#define HPC3_ERXDCFG_D2 0x000f0 /* Cycles to spend in D2 state for PIO */ -#define HPC3_ERXDCFG_D3 0x00f00 /* Cycles to spend in D3 state for PIO */ -#define HPC3_ERXDCFG_WCTRL 0x01000 /* Enable writes of desc into ex ctrl port */ -#define HPC3_ERXDCFG_FRXDC 0x02000 /* Clear eop stat bits upon rxdc, hw seeq fix */ -#define HPC3_ERXDCFG_FEOP 0x04000 /* Bad packet marker timeout enable */ -#define HPC3_ERXDCFG_FIRQ 0x08000 /* Another bad packet timeout enable */ -#define HPC3_ERXDCFG_PTO 0x30000 /* Programmed timeout value for above two */ + volatile u32 dconfig; /* DMA configuration register */ +#define HPC3_EDCFG_D1 0x0000f /* Cycles to spend in D1 state for PIO */ +#define HPC3_EDCFG_D2 0x000f0 /* Cycles to spend in D2 state for PIO */ +#define HPC3_EDCFG_D3 0x00f00 /* Cycles to spend in D3 state for PIO */ +#define HPC3_EDCFG_WCTRL 0x01000 /* Enable writes of desc into ex ctrl port */ +#define HPC3_EDCFG_FRXDC 0x02000 /* Clear eop stat bits upon rxdc, hw seeq fix */ +#define HPC3_EDCFG_FEOP 0x04000 /* Bad packet marker timeout enable */ +#define HPC3_EDCFG_FIRQ 0x08000 /* Another bad packet timeout enable */ +#define HPC3_EDCFG_PTO 0x30000 /* Programmed timeout value for above two */ - volatile u32 rx_pconfig; /* PIO configuration register */ -#define HPC3_ERXPCFG_P1 0x000f /* Cycles to spend in P1 state for PIO */ -#define HPC3_ERXPCFG_P2 0x00f0 /* Cycles to spend in P2 state for PIO */ -#define HPC3_ERXPCFG_P3 0x0f00 /* Cycles to spend in P3 state for PIO */ -#define HPC3_ERXPCFG_TST 0x1000 /* Diagnistic ram test feature bit */ + volatile u32 pconfig; /* PIO configuration register */ +#define HPC3_EPCFG_P1 0x000f /* Cycles to spend in P1 state for PIO */ +#define HPC3_EPCFG_P2 0x00f0 /* Cycles to spend in P2 state for PIO */ +#define HPC3_EPCFG_P3 0x0f00 /* Cycles to spend in P3 state for PIO */ +#define HPC3_EPCFG_TST 0x1000 /* Diagnistic ram test feature bit */ u32 _unused2[0x1000/4 - 8]; /* padding */ diff --git a/include/asm-parisc/dma-mapping.h b/include/asm-parisc/dma-mapping.h index 4db84f969e9e..74d4ac6f2151 100644 --- a/include/asm-parisc/dma-mapping.h +++ b/include/asm-parisc/dma-mapping.h @@ -9,8 +9,8 @@ /* See Documentation/DMA-mapping.txt */ struct hppa_dma_ops { int (*dma_supported)(struct device *dev, u64 mask); - void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, int flag); - void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, int flag); + void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag); + void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag); void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova); dma_addr_t (*map_single)(struct device *dev, void *addr, size_t size, enum dma_data_direction direction); void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction); @@ -49,14 +49,14 @@ extern struct hppa_dma_ops *hppa_dma_ops; static inline void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - int flag) + gfp_t flag) { return hppa_dma_ops->alloc_consistent(dev, size, dma_handle, flag); } static inline void * dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - int flag) + gfp_t flag) { return hppa_dma_ops->alloc_noncoherent(dev, size, dma_handle, flag); } diff --git a/include/asm-powerpc/timex.h b/include/asm-powerpc/timex.h index 51c5b316be55..c02d15aced91 100644 --- a/include/asm-powerpc/timex.h +++ b/include/asm-powerpc/timex.h @@ -10,7 +10,7 @@ #include #include -#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ +#define CLOCK_TICK_RATE 1024000 /* Underlying HZ */ typedef unsigned long cycles_t; diff --git a/include/asm-ppc/cputable.h b/include/asm-ppc/cputable.h index 41d8f8425c04..e17c492c870b 100644 --- a/include/asm-ppc/cputable.h +++ b/include/asm-ppc/cputable.h @@ -24,6 +24,7 @@ #define PPC_FEATURE_HAS_SPE 0x00800000 #define PPC_FEATURE_HAS_EFP_SINGLE 0x00400000 #define PPC_FEATURE_HAS_EFP_DOUBLE 0x00200000 +#define PPC_FEATURE_NO_TB 0x00100000 #ifdef __KERNEL__ diff --git a/include/asm-ppc/dma-mapping.h b/include/asm-ppc/dma-mapping.h index 92b8ee78dcc2..6e9635114433 100644 --- a/include/asm-ppc/dma-mapping.h +++ b/include/asm-ppc/dma-mapping.h @@ -19,7 +19,7 @@ * allocate the space "normally" and use the cache management functions * to ensure it is consistent. */ -extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, int gfp); +extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp); extern void __dma_free_coherent(size_t size, void *vaddr); extern void __dma_sync(void *vaddr, size_t size, int direction); extern void __dma_sync_page(struct page *page, unsigned long offset, @@ -61,7 +61,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask) static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t * dma_handle, - unsigned int __nocast gfp) + gfp_t gfp) { #ifdef CONFIG_NOT_COHERENT_CACHE return __dma_alloc_coherent(size, dma_handle, gfp); diff --git a/include/asm-ppc/io.h b/include/asm-ppc/io.h index 7eb7cf6360bd..94d83998a759 100644 --- a/include/asm-ppc/io.h +++ b/include/asm-ppc/io.h @@ -56,7 +56,7 @@ extern unsigned long pci_dram_offset; * is actually performed (i.e. the data has come back) before we start * executing any following instructions. */ -extern inline int in_8(volatile unsigned char __iomem *addr) +extern inline int in_8(const volatile unsigned char __iomem *addr) { int ret; @@ -72,7 +72,7 @@ extern inline void out_8(volatile unsigned char __iomem *addr, int val) __asm__ __volatile__("stb%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val)); } -extern inline int in_le16(volatile unsigned short __iomem *addr) +extern inline int in_le16(const volatile unsigned short __iomem *addr) { int ret; @@ -83,7 +83,7 @@ extern inline int in_le16(volatile unsigned short __iomem *addr) return ret; } -extern inline int in_be16(volatile unsigned short __iomem *addr) +extern inline int in_be16(const volatile unsigned short __iomem *addr) { int ret; @@ -104,7 +104,7 @@ extern inline void out_be16(volatile unsigned short __iomem *addr, int val) __asm__ __volatile__("sth%U0%X0 %1,%0; eieio" : "=m" (*addr) : "r" (val)); } -extern inline unsigned in_le32(volatile unsigned __iomem *addr) +extern inline unsigned in_le32(const volatile unsigned __iomem *addr) { unsigned ret; @@ -115,7 +115,7 @@ extern inline unsigned in_le32(volatile unsigned __iomem *addr) return ret; } -extern inline unsigned in_be32(volatile unsigned __iomem *addr) +extern inline unsigned in_be32(const volatile unsigned __iomem *addr) { unsigned ret; @@ -139,7 +139,7 @@ extern inline void out_be32(volatile unsigned __iomem *addr, int val) #define readb(addr) in_8((volatile u8 *)(addr)) #define writeb(b,addr) out_8((volatile u8 *)(addr), (b)) #else -static inline __u8 readb(volatile void __iomem *addr) +static inline __u8 readb(const volatile void __iomem *addr) { return in_8(addr); } @@ -150,11 +150,11 @@ static inline void writeb(__u8 b, volatile void __iomem *addr) #endif #if defined(CONFIG_APUS) -static inline __u16 readw(volatile void __iomem *addr) +static inline __u16 readw(const volatile void __iomem *addr) { return *(__force volatile __u16 *)(addr); } -static inline __u32 readl(volatile void __iomem *addr) +static inline __u32 readl(const volatile void __iomem *addr) { return *(__force volatile __u32 *)(addr); } @@ -173,11 +173,11 @@ static inline void writel(__u32 b, volatile void __iomem *addr) #define writew(b,addr) out_le16((volatile u16 *)(addr),(b)) #define writel(b,addr) out_le32((volatile u32 *)(addr),(b)) #else -static inline __u16 readw(volatile void __iomem *addr) +static inline __u16 readw(const volatile void __iomem *addr) { return in_le16(addr); } -static inline __u32 readl(volatile void __iomem *addr) +static inline __u32 readl(const volatile void __iomem *addr) { return in_le32(addr); } diff --git a/include/asm-ppc/macio.h b/include/asm-ppc/macio.h index a481b772d154..b553dd4b139e 100644 --- a/include/asm-ppc/macio.h +++ b/include/asm-ppc/macio.h @@ -1,7 +1,6 @@ #ifndef __MACIO_ASIC_H__ #define __MACIO_ASIC_H__ -#include #include extern struct bus_type macio_bus_type; diff --git a/include/asm-ppc/mv64x60.h b/include/asm-ppc/mv64x60.h index 75c2ffa26b26..ee2f9188cc64 100644 --- a/include/asm-ppc/mv64x60.h +++ b/include/asm-ppc/mv64x60.h @@ -233,7 +233,7 @@ struct mv64x60_chip_info { struct mv64x60_handle { u32 type; /* type of bridge */ u32 rev; /* revision of bridge */ - void *v_base; /* virtual base addr of bridge regs */ + void __iomem *v_base;/* virtual base addr of bridge regs */ phys_addr_t p_base; /* physical base addr of bridge regs */ u32 pci_mode_a; /* pci 0 mode: conventional pci, pci-x*/ @@ -303,7 +303,7 @@ void mv64x60_alloc_hose(struct mv64x60_handle *bh, u32 cfg_addr, u32 cfg_data, struct pci_controller **hose); int mv64x60_get_type(struct mv64x60_handle *bh); int mv64x60_setup_for_chip(struct mv64x60_handle *bh); -void *mv64x60_get_bridge_vbase(void); +void __iomem *mv64x60_get_bridge_vbase(void); u32 mv64x60_get_bridge_type(void); u32 mv64x60_get_bridge_rev(void); void mv64x60_get_mem_windows(struct mv64x60_handle *bh, diff --git a/include/asm-ppc/of_device.h b/include/asm-ppc/of_device.h index 4b264cfd3998..575bce418f80 100644 --- a/include/asm-ppc/of_device.h +++ b/include/asm-ppc/of_device.h @@ -2,6 +2,7 @@ #define __OF_DEVICE_H__ #include +#include #include /* @@ -55,7 +56,9 @@ extern int of_register_driver(struct of_platform_driver *drv); extern void of_unregister_driver(struct of_platform_driver *drv); extern int of_device_register(struct of_device *ofdev); extern void of_device_unregister(struct of_device *ofdev); -extern struct of_device *of_platform_device_create(struct device_node *np, const char *bus_id); +extern struct of_device *of_platform_device_create(struct device_node *np, + const char *bus_id, + struct device *parent); extern void of_release_dev(struct device *dev); #endif /* __OF_DEVICE_H__ */ diff --git a/include/asm-ppc64/dma-mapping.h b/include/asm-ppc64/dma-mapping.h index 9ad8adee0067..fb68fa23bea8 100644 --- a/include/asm-ppc64/dma-mapping.h +++ b/include/asm-ppc64/dma-mapping.h @@ -19,7 +19,7 @@ extern int dma_supported(struct device *dev, u64 mask); extern int dma_set_mask(struct device *dev, u64 dma_mask); extern void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, unsigned int __nocast flag); + dma_addr_t *dma_handle, gfp_t flag); extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle); extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, @@ -118,7 +118,7 @@ dma_cache_sync(void *vaddr, size_t size, */ struct dma_mapping_ops { void * (*alloc_coherent)(struct device *dev, size_t size, - dma_addr_t *dma_handle, unsigned int __nocast flag); + dma_addr_t *dma_handle, gfp_t flag); void (*free_coherent)(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); dma_addr_t (*map_single)(struct device *dev, void *ptr, diff --git a/include/asm-ppc64/iommu.h b/include/asm-ppc64/iommu.h index 72dcf8116b04..c2f3b6e8a42f 100644 --- a/include/asm-ppc64/iommu.h +++ b/include/asm-ppc64/iommu.h @@ -122,7 +122,7 @@ extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, int nelems, enum dma_data_direction direction); extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, - dma_addr_t *dma_handle, unsigned int __nocast flag); + dma_addr_t *dma_handle, gfp_t flag); extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, void *vaddr, dma_addr_t dma_handle); extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, diff --git a/include/asm-ppc64/smu.h b/include/asm-ppc64/smu.h index 10b4397af9aa..dee8eefe47bc 100644 --- a/include/asm-ppc64/smu.h +++ b/include/asm-ppc64/smu.h @@ -1,22 +1,379 @@ +#ifndef _SMU_H +#define _SMU_H + /* * Definitions for talking to the SMU chip in newer G5 PowerMacs */ #include +#include /* - * Basic routines for use by architecture. To be extended as - * we understand more of the chip + * Known SMU commands + * + * Most of what is below comes from looking at the Open Firmware driver, + * though this is still incomplete and could use better documentation here + * or there... + */ + + +/* + * Partition info commands + * + * I do not know what those are for at this point + */ +#define SMU_CMD_PARTITION_COMMAND 0x3e + + +/* + * Fan control + * + * This is a "mux" for fan control commands, first byte is the + * "sub" command. + */ +#define SMU_CMD_FAN_COMMAND 0x4a + + +/* + * Battery access + * + * Same command number as the PMU, could it be same syntax ? + */ +#define SMU_CMD_BATTERY_COMMAND 0x6f +#define SMU_CMD_GET_BATTERY_INFO 0x00 + +/* + * Real time clock control + * + * This is a "mux", first data byte contains the "sub" command. + * The "RTC" part of the SMU controls the date, time, powerup + * timer, but also a PRAM + * + * Dates are in BCD format on 7 bytes: + * [sec] [min] [hour] [weekday] [month day] [month] [year] + * with month being 1 based and year minus 100 + */ +#define SMU_CMD_RTC_COMMAND 0x8e +#define SMU_CMD_RTC_SET_PWRUP_TIMER 0x00 /* i: 7 bytes date */ +#define SMU_CMD_RTC_GET_PWRUP_TIMER 0x01 /* o: 7 bytes date */ +#define SMU_CMD_RTC_STOP_PWRUP_TIMER 0x02 +#define SMU_CMD_RTC_SET_PRAM_BYTE_ACC 0x20 /* i: 1 byte (address?) */ +#define SMU_CMD_RTC_SET_PRAM_AUTOINC 0x21 /* i: 1 byte (data?) */ +#define SMU_CMD_RTC_SET_PRAM_LO_BYTES 0x22 /* i: 10 bytes */ +#define SMU_CMD_RTC_SET_PRAM_HI_BYTES 0x23 /* i: 10 bytes */ +#define SMU_CMD_RTC_GET_PRAM_BYTE 0x28 /* i: 1 bytes (address?) */ +#define SMU_CMD_RTC_GET_PRAM_LO_BYTES 0x29 /* o: 10 bytes */ +#define SMU_CMD_RTC_GET_PRAM_HI_BYTES 0x2a /* o: 10 bytes */ +#define SMU_CMD_RTC_SET_DATETIME 0x80 /* i: 7 bytes date */ +#define SMU_CMD_RTC_GET_DATETIME 0x81 /* o: 7 bytes date */ + + /* + * i2c commands + * + * To issue an i2c command, first is to send a parameter block to the + * the SMU. This is a command of type 0x9a with 9 bytes of header + * eventually followed by data for a write: + * + * 0: bus number (from device-tree usually, SMU has lots of busses !) + * 1: transfer type/format (see below) + * 2: device address. For combined and combined4 type transfers, this + * is the "write" version of the address (bit 0x01 cleared) + * 3: subaddress length (0..3) + * 4: subaddress byte 0 (or only byte for subaddress length 1) + * 5: subaddress byte 1 + * 6: subaddress byte 2 + * 7: combined address (device address for combined mode data phase) + * 8: data length + * + * The transfer types are the same good old Apple ones it seems, + * that is: + * - 0x00: Simple transfer + * - 0x01: Subaddress transfer (addr write + data tx, no restart) + * - 0x02: Combined transfer (addr write + restart + data tx) + * + * This is then followed by actual data for a write. + * + * At this point, the OF driver seems to have a limitation on transfer + * sizes of 0xd bytes on reads and 0x5 bytes on writes. I do not know + * wether this is just an OF limit due to some temporary buffer size + * or if this is an SMU imposed limit. This driver has the same limitation + * for now as I use a 0x10 bytes temporary buffer as well + * + * Once that is completed, a response is expected from the SMU. This is + * obtained via a command of type 0x9a with a length of 1 byte containing + * 0 as the data byte. OF also fills the rest of the data buffer with 0xff's + * though I can't tell yet if this is actually necessary. Once this command + * is complete, at this point, all I can tell is what OF does. OF tests + * byte 0 of the reply: + * - on read, 0xfe or 0xfc : bus is busy, wait (see below) or nak ? + * - on read, 0x00 or 0x01 : reply is in buffer (after the byte 0) + * - on write, < 0 -> failure (immediate exit) + * - else, OF just exists (without error, weird) + * + * So on read, there is this wait-for-busy thing when getting a 0xfc or + * 0xfe result. OF does a loop of up to 64 retries, waiting 20ms and + * doing the above again until either the retries expire or the result + * is no longer 0xfe or 0xfc + * + * The Darwin I2C driver is less subtle though. On any non-success status + * from the response command, it waits 5ms and tries again up to 20 times, + * it doesn't differenciate between fatal errors or "busy" status. + * + * This driver provides an asynchronous paramblock based i2c command + * interface to be used either directly by low level code or by a higher + * level driver interfacing to the linux i2c layer. The current + * implementation of this relies on working timers & timer interrupts + * though, so be careful of calling context for now. This may be "fixed" + * in the future by adding a polling facility. + */ +#define SMU_CMD_I2C_COMMAND 0x9a + /* transfer types */ +#define SMU_I2C_TRANSFER_SIMPLE 0x00 +#define SMU_I2C_TRANSFER_STDSUB 0x01 +#define SMU_I2C_TRANSFER_COMBINED 0x02 + +/* + * Power supply control + * + * The "sub" command is an ASCII string in the data, the + * data lenght is that of the string. + * + * The VSLEW command can be used to get or set the voltage slewing. + * - lenght 5 (only "VSLEW") : it returns "DONE" and 3 bytes of + * reply at data offset 6, 7 and 8. + * - lenght 8 ("VSLEWxyz") has 3 additional bytes appended, and is + * used to set the voltage slewing point. The SMU replies with "DONE" + * I yet have to figure out their exact meaning of those 3 bytes in + * both cases. + * + */ +#define SMU_CMD_POWER_COMMAND 0xaa +#define SMU_CMD_POWER_RESTART "RESTART" +#define SMU_CMD_POWER_SHUTDOWN "SHUTDOWN" +#define SMU_CMD_POWER_VOLTAGE_SLEW "VSLEW" + +/* Misc commands + * + * This command seem to be a grab bag of various things + */ +#define SMU_CMD_MISC_df_COMMAND 0xdf +#define SMU_CMD_MISC_df_SET_DISPLAY_LIT 0x02 /* i: 1 byte */ +#define SMU_CMD_MISC_df_NMI_OPTION 0x04 + +/* + * Version info commands + * + * I haven't quite tried to figure out how these work + */ +#define SMU_CMD_VERSION_COMMAND 0xea + + +/* + * Misc commands + * + * This command seem to be a grab bag of various things + */ +#define SMU_CMD_MISC_ee_COMMAND 0xee +#define SMU_CMD_MISC_ee_GET_DATABLOCK_REC 0x02 +#define SMU_CMD_MISC_ee_LEDS_CTRL 0x04 /* i: 00 (00,01) [00] */ +#define SMU_CMD_MISC_ee_GET_DATA 0x05 /* i: 00 , o: ?? */ + + + +/* + * - Kernel side interface - + */ + +#ifdef __KERNEL__ + +/* + * Asynchronous SMU commands + * + * Fill up this structure and submit it via smu_queue_command(), + * and get notified by the optional done() callback, or because + * status becomes != 1 + */ + +struct smu_cmd; + +struct smu_cmd +{ + /* public */ + u8 cmd; /* command */ + int data_len; /* data len */ + int reply_len; /* reply len */ + void *data_buf; /* data buffer */ + void *reply_buf; /* reply buffer */ + int status; /* command status */ + void (*done)(struct smu_cmd *cmd, void *misc); + void *misc; + + /* private */ + struct list_head link; +}; + +/* + * Queues an SMU command, all fields have to be initialized + */ +extern int smu_queue_cmd(struct smu_cmd *cmd); + +/* + * Simple command wrapper. This structure embeds a small buffer + * to ease sending simple SMU commands from the stack + */ +struct smu_simple_cmd +{ + struct smu_cmd cmd; + u8 buffer[16]; +}; + +/* + * Queues a simple command. All fields will be initialized by that + * function + */ +extern int smu_queue_simple(struct smu_simple_cmd *scmd, u8 command, + unsigned int data_len, + void (*done)(struct smu_cmd *cmd, void *misc), + void *misc, + ...); + +/* + * Completion helper. Pass it to smu_queue_simple or as 'done' + * member to smu_queue_cmd, it will call complete() on the struct + * completion passed in the "misc" argument + */ +extern void smu_done_complete(struct smu_cmd *cmd, void *misc); + +/* + * Synchronous helpers. Will spin-wait for completion of a command + */ +extern void smu_spinwait_cmd(struct smu_cmd *cmd); + +static inline void smu_spinwait_simple(struct smu_simple_cmd *scmd) +{ + smu_spinwait_cmd(&scmd->cmd); +} + +/* + * Poll routine to call if blocked with irqs off + */ +extern void smu_poll(void); + + +/* + * Init routine, presence check.... */ extern int smu_init(void); extern int smu_present(void); +struct of_device; +extern struct of_device *smu_get_ofdev(void); + + +/* + * Common command wrappers + */ extern void smu_shutdown(void); extern void smu_restart(void); -extern int smu_get_rtc_time(struct rtc_time *time); -extern int smu_set_rtc_time(struct rtc_time *time); +struct rtc_time; +extern int smu_get_rtc_time(struct rtc_time *time, int spinwait); +extern int smu_set_rtc_time(struct rtc_time *time, int spinwait); /* * SMU command buffer absolute address, exported by pmac_setup, * this is allocated very early during boot. */ extern unsigned long smu_cmdbuf_abs; + + +/* + * Kenrel asynchronous i2c interface + */ + +/* SMU i2c header, exactly matches i2c header on wire */ +struct smu_i2c_param +{ + u8 bus; /* SMU bus ID (from device tree) */ + u8 type; /* i2c transfer type */ + u8 devaddr; /* device address (includes direction) */ + u8 sublen; /* subaddress length */ + u8 subaddr[3]; /* subaddress */ + u8 caddr; /* combined address, filled by SMU driver */ + u8 datalen; /* length of transfer */ + u8 data[7]; /* data */ +}; + +#define SMU_I2C_READ_MAX 0x0d +#define SMU_I2C_WRITE_MAX 0x05 + +struct smu_i2c_cmd +{ + /* public */ + struct smu_i2c_param info; + void (*done)(struct smu_i2c_cmd *cmd, void *misc); + void *misc; + int status; /* 1 = pending, 0 = ok, <0 = fail */ + + /* private */ + struct smu_cmd scmd; + int read; + int stage; + int retries; + u8 pdata[0x10]; + struct list_head link; +}; + +/* + * Call this to queue an i2c command to the SMU. You must fill info, + * including info.data for a write, done and misc. + * For now, no polling interface is provided so you have to use completion + * callback. + */ +extern int smu_queue_i2c(struct smu_i2c_cmd *cmd); + + +#endif /* __KERNEL__ */ + +/* + * - Userland interface - + */ + +/* + * A given instance of the device can be configured for 2 different + * things at the moment: + * + * - sending SMU commands (default at open() time) + * - receiving SMU events (not yet implemented) + * + * Commands are written with write() of a command block. They can be + * "driver" commands (for example to switch to event reception mode) + * or real SMU commands. They are made of a header followed by command + * data if any. + * + * For SMU commands (not for driver commands), you can then read() back + * a reply. The reader will be blocked or not depending on how the device + * file is opened. poll() isn't implemented yet. The reply will consist + * of a header as well, followed by the reply data if any. You should + * always provide a buffer large enough for the maximum reply data, I + * recommand one page. + * + * It is illegal to send SMU commands through a file descriptor configured + * for events reception + * + */ +struct smu_user_cmd_hdr +{ + __u32 cmdtype; +#define SMU_CMDTYPE_SMU 0 /* SMU command */ +#define SMU_CMDTYPE_WANTS_EVENTS 1 /* switch fd to events mode */ + + __u8 cmd; /* SMU command byte */ + __u32 data_len; /* Lenght of data following */ +}; + +struct smu_user_reply_hdr +{ + __u32 status; /* Command status */ + __u32 reply_len; /* Lenght of data follwing */ +}; + +#endif /* _SMU_H */ diff --git a/include/asm-ppc64/tlbflush.h b/include/asm-ppc64/tlbflush.h index 45411a67e082..74271d7c1d16 100644 --- a/include/asm-ppc64/tlbflush.h +++ b/include/asm-ppc64/tlbflush.h @@ -25,6 +25,7 @@ struct ppc64_tlb_batch { pte_t pte[PPC64_TLB_BATCH_NR]; unsigned long addr[PPC64_TLB_BATCH_NR]; unsigned long vaddr[PPC64_TLB_BATCH_NR]; + unsigned int large; }; DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); diff --git a/include/asm-ppc64/uaccess.h b/include/asm-ppc64/uaccess.h index c181a60d868c..132c1276547b 100644 --- a/include/asm-ppc64/uaccess.h +++ b/include/asm-ppc64/uaccess.h @@ -164,7 +164,8 @@ do { \ #define __get_user_nocheck(x,ptr,size) \ ({ \ - long __gu_err, __gu_val; \ + long __gu_err; \ + unsigned long __gu_val; \ might_sleep(); \ __get_user_size(__gu_val,(ptr),(size),__gu_err,-EFAULT);\ (x) = (__typeof__(*(ptr)))__gu_val; \ @@ -173,7 +174,8 @@ do { \ #define __get_user_check(x,ptr,size) \ ({ \ - long __gu_err = -EFAULT, __gu_val = 0; \ + long __gu_err = -EFAULT; \ + unsigned long __gu_val = 0; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ might_sleep(); \ if (access_ok(VERIFY_READ,__gu_addr,size)) \ diff --git a/include/asm-s390/sigcontext.h b/include/asm-s390/sigcontext.h index d57bc0cebdce..803545351dd8 100644 --- a/include/asm-s390/sigcontext.h +++ b/include/asm-s390/sigcontext.h @@ -61,7 +61,7 @@ typedef struct struct sigcontext { unsigned long oldmask[_SIGCONTEXT_NSIG_WORDS]; - _sigregs *sregs; + _sigregs __user *sregs; }; diff --git a/include/asm-s390/signal.h b/include/asm-s390/signal.h index 3d6e11c6c1fd..7084626de215 100644 --- a/include/asm-s390/signal.h +++ b/include/asm-s390/signal.h @@ -165,7 +165,7 @@ struct sigaction { #endif /* __KERNEL__ */ typedef struct sigaltstack { - void *ss_sp; + void __user *ss_sp; int ss_flags; size_t ss_size; } stack_t; diff --git a/include/asm-sh/dma-mapping.h b/include/asm-sh/dma-mapping.h index 80d164c1529e..d3fa5c2b889d 100644 --- a/include/asm-sh/dma-mapping.h +++ b/include/asm-sh/dma-mapping.h @@ -9,7 +9,7 @@ extern struct bus_type pci_bus_type; /* arch/sh/mm/consistent.c */ -extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle); +extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle); extern void consistent_free(void *vaddr, size_t size); extern void consistent_sync(void *vaddr, size_t size, int direction); @@ -26,7 +26,7 @@ static inline int dma_set_mask(struct device *dev, u64 mask) } static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, int flag) + dma_addr_t *dma_handle, gfp_t flag) { if (sh_mv.mv_consistent_alloc) { void *ret; diff --git a/include/asm-sh/machvec.h b/include/asm-sh/machvec.h index 5771f4baa478..3f18aa180516 100644 --- a/include/asm-sh/machvec.h +++ b/include/asm-sh/machvec.h @@ -64,7 +64,7 @@ struct sh_machine_vector void (*mv_heartbeat)(void); - void *(*mv_consistent_alloc)(struct device *, size_t, dma_addr_t *, int); + void *(*mv_consistent_alloc)(struct device *, size_t, dma_addr_t *, gfp_t); int (*mv_consistent_free)(struct device *, size_t, void *, dma_addr_t); }; diff --git a/include/asm-sh64/dma-mapping.h b/include/asm-sh64/dma-mapping.h index b8d26fe677f4..cc9a2e86f5b4 100644 --- a/include/asm-sh64/dma-mapping.h +++ b/include/asm-sh64/dma-mapping.h @@ -25,7 +25,7 @@ static inline int dma_set_mask(struct device *dev, u64 mask) } static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, int flag) + dma_addr_t *dma_handle, gfp_t flag) { return consistent_alloc(NULL, size, dma_handle); } diff --git a/include/asm-sparc/btfixup.h b/include/asm-sparc/btfixup.h index b84c96c89581..c2868d0f60b6 100644 --- a/include/asm-sparc/btfixup.h +++ b/include/asm-sparc/btfixup.h @@ -49,17 +49,17 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void); /* Put bottom 13bits into some register variable */ #define BTFIXUPDEF_SIMM13(__name) \ - extern unsigned int ___sf_##__name(void) __attribute_const__; \ + static inline unsigned int ___sf_##__name(void) __attribute_const__; \ extern unsigned ___ss_##__name[2]; \ - extern __inline__ unsigned int ___sf_##__name(void) { \ + static inline unsigned int ___sf_##__name(void) { \ unsigned int ret; \ __asm__ ("or %%g0, ___s_" #__name ", %0" : "=r"(ret)); \ return ret; \ } #define BTFIXUPDEF_SIMM13_INIT(__name,__val) \ - extern unsigned int ___sf_##__name(void) __attribute_const__; \ + static inline unsigned int ___sf_##__name(void) __attribute_const__; \ extern unsigned ___ss_##__name[2]; \ - extern __inline__ unsigned int ___sf_##__name(void) { \ + static inline unsigned int ___sf_##__name(void) { \ unsigned int ret; \ __asm__ ("or %%g0, ___s_" #__name "__btset_" #__val ", %0" : "=r"(ret));\ return ret; \ @@ -71,17 +71,17 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void); */ #define BTFIXUPDEF_HALF(__name) \ - extern unsigned int ___af_##__name(void) __attribute_const__; \ + static inline unsigned int ___af_##__name(void) __attribute_const__; \ extern unsigned ___as_##__name[2]; \ - extern __inline__ unsigned int ___af_##__name(void) { \ + static inline unsigned int ___af_##__name(void) { \ unsigned int ret; \ __asm__ ("or %%g0, ___a_" #__name ", %0" : "=r"(ret)); \ return ret; \ } #define BTFIXUPDEF_HALF_INIT(__name,__val) \ - extern unsigned int ___af_##__name(void) __attribute_const__; \ + static inline unsigned int ___af_##__name(void) __attribute_const__; \ extern unsigned ___as_##__name[2]; \ - extern __inline__ unsigned int ___af_##__name(void) { \ + static inline unsigned int ___af_##__name(void) { \ unsigned int ret; \ __asm__ ("or %%g0, ___a_" #__name "__btset_" #__val ", %0" : "=r"(ret));\ return ret; \ @@ -90,17 +90,17 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void); /* Put upper 22 bits into some register variable */ #define BTFIXUPDEF_SETHI(__name) \ - extern unsigned int ___hf_##__name(void) __attribute_const__; \ + static inline unsigned int ___hf_##__name(void) __attribute_const__; \ extern unsigned ___hs_##__name[2]; \ - extern __inline__ unsigned int ___hf_##__name(void) { \ + static inline unsigned int ___hf_##__name(void) { \ unsigned int ret; \ __asm__ ("sethi %%hi(___h_" #__name "), %0" : "=r"(ret)); \ return ret; \ } #define BTFIXUPDEF_SETHI_INIT(__name,__val) \ - extern unsigned int ___hf_##__name(void) __attribute_const__; \ + static inline unsigned int ___hf_##__name(void) __attribute_const__; \ extern unsigned ___hs_##__name[2]; \ - extern __inline__ unsigned int ___hf_##__name(void) { \ + static inline unsigned int ___hf_##__name(void) { \ unsigned int ret; \ __asm__ ("sethi %%hi(___h_" #__name "__btset_" #__val "), %0" : \ "=r"(ret)); \ diff --git a/include/asm-sparc/cache.h b/include/asm-sparc/cache.h index e6316fd7e1a4..a10522cb21b7 100644 --- a/include/asm-sparc/cache.h +++ b/include/asm-sparc/cache.h @@ -27,7 +27,7 @@ */ /* First, cache-tag access. */ -extern __inline__ unsigned int get_icache_tag(int setnum, int tagnum) +static inline unsigned int get_icache_tag(int setnum, int tagnum) { unsigned int vaddr, retval; @@ -38,7 +38,7 @@ extern __inline__ unsigned int get_icache_tag(int setnum, int tagnum) return retval; } -extern __inline__ void put_icache_tag(int setnum, int tagnum, unsigned int entry) +static inline void put_icache_tag(int setnum, int tagnum, unsigned int entry) { unsigned int vaddr; @@ -51,7 +51,7 @@ extern __inline__ void put_icache_tag(int setnum, int tagnum, unsigned int entry /* Second cache-data access. The data is returned two-32bit quantities * at a time. */ -extern __inline__ void get_icache_data(int setnum, int tagnum, int subblock, +static inline void get_icache_data(int setnum, int tagnum, int subblock, unsigned int *data) { unsigned int value1, value2, vaddr; @@ -67,7 +67,7 @@ extern __inline__ void get_icache_data(int setnum, int tagnum, int subblock, data[0] = value1; data[1] = value2; } -extern __inline__ void put_icache_data(int setnum, int tagnum, int subblock, +static inline void put_icache_data(int setnum, int tagnum, int subblock, unsigned int *data) { unsigned int value1, value2, vaddr; @@ -92,35 +92,35 @@ extern __inline__ void put_icache_data(int setnum, int tagnum, int subblock, */ /* Flushes which clear out both the on-chip and external caches */ -extern __inline__ void flush_ei_page(unsigned int addr) +static inline void flush_ei_page(unsigned int addr) { __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r" (addr), "i" (ASI_M_FLUSH_PAGE) : "memory"); } -extern __inline__ void flush_ei_seg(unsigned int addr) +static inline void flush_ei_seg(unsigned int addr) { __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r" (addr), "i" (ASI_M_FLUSH_SEG) : "memory"); } -extern __inline__ void flush_ei_region(unsigned int addr) +static inline void flush_ei_region(unsigned int addr) { __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r" (addr), "i" (ASI_M_FLUSH_REGION) : "memory"); } -extern __inline__ void flush_ei_ctx(unsigned int addr) +static inline void flush_ei_ctx(unsigned int addr) { __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r" (addr), "i" (ASI_M_FLUSH_CTX) : "memory"); } -extern __inline__ void flush_ei_user(unsigned int addr) +static inline void flush_ei_user(unsigned int addr) { __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r" (addr), "i" (ASI_M_FLUSH_USER) : diff --git a/include/asm-sparc/cypress.h b/include/asm-sparc/cypress.h index fc92fc839c3f..99599533efbc 100644 --- a/include/asm-sparc/cypress.h +++ b/include/asm-sparc/cypress.h @@ -48,25 +48,25 @@ #define CYPRESS_NFAULT 0x00000002 #define CYPRESS_MENABLE 0x00000001 -extern __inline__ void cypress_flush_page(unsigned long page) +static inline void cypress_flush_page(unsigned long page) { __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r" (page), "i" (ASI_M_FLUSH_PAGE)); } -extern __inline__ void cypress_flush_segment(unsigned long addr) +static inline void cypress_flush_segment(unsigned long addr) { __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r" (addr), "i" (ASI_M_FLUSH_SEG)); } -extern __inline__ void cypress_flush_region(unsigned long addr) +static inline void cypress_flush_region(unsigned long addr) { __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r" (addr), "i" (ASI_M_FLUSH_REGION)); } -extern __inline__ void cypress_flush_context(void) +static inline void cypress_flush_context(void) { __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : "i" (ASI_M_FLUSH_CTX)); diff --git a/include/asm-sparc/delay.h b/include/asm-sparc/delay.h index 6edf2cbb246b..7ec8e9f7ad4f 100644 --- a/include/asm-sparc/delay.h +++ b/include/asm-sparc/delay.h @@ -10,7 +10,7 @@ #include #include -extern __inline__ void __delay(unsigned long loops) +static inline void __delay(unsigned long loops) { __asm__ __volatile__("cmp %0, 0\n\t" "1: bne 1b\n\t" diff --git a/include/asm-sparc/dma-mapping.h b/include/asm-sparc/dma-mapping.h index 2dc5bb8effa6..d7c3b0f0a901 100644 --- a/include/asm-sparc/dma-mapping.h +++ b/include/asm-sparc/dma-mapping.h @@ -8,7 +8,7 @@ #else static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, int flag) + dma_addr_t *dma_handle, gfp_t flag) { BUG(); return NULL; diff --git a/include/asm-sparc/dma.h b/include/asm-sparc/dma.h index 07e6368a2521..8ec206aa5f2e 100644 --- a/include/asm-sparc/dma.h +++ b/include/asm-sparc/dma.h @@ -198,7 +198,7 @@ extern void dvma_init(struct sbus_bus *); /* Pause until counter runs out or BIT isn't set in the DMA condition * register. */ -extern __inline__ void sparc_dma_pause(struct sparc_dma_registers *regs, +static inline void sparc_dma_pause(struct sparc_dma_registers *regs, unsigned long bit) { int ctr = 50000; /* Let's find some bugs ;) */ diff --git a/include/asm-sparc/iommu.h b/include/asm-sparc/iommu.h index 8171362d56b9..70c589c05a10 100644 --- a/include/asm-sparc/iommu.h +++ b/include/asm-sparc/iommu.h @@ -108,12 +108,12 @@ struct iommu_struct { struct bit_map usemap; }; -extern __inline__ void iommu_invalidate(struct iommu_regs *regs) +static inline void iommu_invalidate(struct iommu_regs *regs) { regs->tlbflush = 0; } -extern __inline__ void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba) +static inline void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba) { regs->pageflush = (ba & PAGE_MASK); } diff --git a/include/asm-sparc/kdebug.h b/include/asm-sparc/kdebug.h index 3ea4916635ee..fba92485fdba 100644 --- a/include/asm-sparc/kdebug.h +++ b/include/asm-sparc/kdebug.h @@ -46,7 +46,7 @@ struct kernel_debug { extern struct kernel_debug *linux_dbvec; /* Use this macro in C-code to enter the debugger. */ -extern __inline__ void sp_enter_debugger(void) +static inline void sp_enter_debugger(void) { __asm__ __volatile__("jmpl %0, %%o7\n\t" "nop\n\t" : : diff --git a/include/asm-sparc/mbus.h b/include/asm-sparc/mbus.h index 5f2749015342..ecacdf4075d7 100644 --- a/include/asm-sparc/mbus.h +++ b/include/asm-sparc/mbus.h @@ -83,7 +83,7 @@ extern unsigned int hwbug_bitmask; */ #define TBR_ID_SHIFT 20 -extern __inline__ int get_cpuid(void) +static inline int get_cpuid(void) { register int retval; __asm__ __volatile__("rd %%tbr, %0\n\t" @@ -93,7 +93,7 @@ extern __inline__ int get_cpuid(void) return (retval & 3); } -extern __inline__ int get_modid(void) +static inline int get_modid(void) { return (get_cpuid() | 0x8); } diff --git a/include/asm-sparc/msi.h b/include/asm-sparc/msi.h index b69543dd3b46..ff72cbd946a4 100644 --- a/include/asm-sparc/msi.h +++ b/include/asm-sparc/msi.h @@ -19,7 +19,7 @@ #define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */ -extern __inline__ void msi_set_sync(void) +static inline void msi_set_sync(void) { __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t" "andn %%g3, %2, %%g3\n\t" diff --git a/include/asm-sparc/mxcc.h b/include/asm-sparc/mxcc.h index 60ef9d6fe7bc..128fe9708135 100644 --- a/include/asm-sparc/mxcc.h +++ b/include/asm-sparc/mxcc.h @@ -85,7 +85,7 @@ #ifndef __ASSEMBLY__ -extern __inline__ void mxcc_set_stream_src(unsigned long *paddr) +static inline void mxcc_set_stream_src(unsigned long *paddr) { unsigned long data0 = paddr[0]; unsigned long data1 = paddr[1]; @@ -98,7 +98,7 @@ extern __inline__ void mxcc_set_stream_src(unsigned long *paddr) "i" (ASI_M_MXCC) : "g2", "g3"); } -extern __inline__ void mxcc_set_stream_dst(unsigned long *paddr) +static inline void mxcc_set_stream_dst(unsigned long *paddr) { unsigned long data0 = paddr[0]; unsigned long data1 = paddr[1]; @@ -111,7 +111,7 @@ extern __inline__ void mxcc_set_stream_dst(unsigned long *paddr) "i" (ASI_M_MXCC) : "g2", "g3"); } -extern __inline__ unsigned long mxcc_get_creg(void) +static inline unsigned long mxcc_get_creg(void) { unsigned long mxcc_control; @@ -125,7 +125,7 @@ extern __inline__ unsigned long mxcc_get_creg(void) return mxcc_control; } -extern __inline__ void mxcc_set_creg(unsigned long mxcc_control) +static inline void mxcc_set_creg(unsigned long mxcc_control) { __asm__ __volatile__("sta %0, [%1] %2\n\t" : : "r" (mxcc_control), "r" (MXCC_CREG), diff --git a/include/asm-sparc/obio.h b/include/asm-sparc/obio.h index 62e1d77965f3..47854a2a12cf 100644 --- a/include/asm-sparc/obio.h +++ b/include/asm-sparc/obio.h @@ -98,7 +98,7 @@ #ifndef __ASSEMBLY__ -extern __inline__ int bw_get_intr_mask(int sbus_level) +static inline int bw_get_intr_mask(int sbus_level) { int mask; @@ -109,7 +109,7 @@ extern __inline__ int bw_get_intr_mask(int sbus_level) return mask; } -extern __inline__ void bw_clear_intr_mask(int sbus_level, int mask) +static inline void bw_clear_intr_mask(int sbus_level, int mask) { __asm__ __volatile__ ("stha %0, [%1] %2" : : "r" (mask), @@ -117,7 +117,7 @@ extern __inline__ void bw_clear_intr_mask(int sbus_level, int mask) "i" (ASI_M_CTL)); } -extern __inline__ unsigned bw_get_prof_limit(int cpu) +static inline unsigned bw_get_prof_limit(int cpu) { unsigned limit; @@ -128,7 +128,7 @@ extern __inline__ unsigned bw_get_prof_limit(int cpu) return limit; } -extern __inline__ void bw_set_prof_limit(int cpu, unsigned limit) +static inline void bw_set_prof_limit(int cpu, unsigned limit) { __asm__ __volatile__ ("sta %0, [%1] %2" : : "r" (limit), @@ -136,7 +136,7 @@ extern __inline__ void bw_set_prof_limit(int cpu, unsigned limit) "i" (ASI_M_CTL)); } -extern __inline__ unsigned bw_get_ctrl(int cpu) +static inline unsigned bw_get_ctrl(int cpu) { unsigned ctrl; @@ -147,7 +147,7 @@ extern __inline__ unsigned bw_get_ctrl(int cpu) return ctrl; } -extern __inline__ void bw_set_ctrl(int cpu, unsigned ctrl) +static inline void bw_set_ctrl(int cpu, unsigned ctrl) { __asm__ __volatile__ ("sta %0, [%1] %2" : : "r" (ctrl), @@ -157,7 +157,7 @@ extern __inline__ void bw_set_ctrl(int cpu, unsigned ctrl) extern unsigned char cpu_leds[32]; -extern __inline__ void show_leds(int cpuid) +static inline void show_leds(int cpuid) { cpuid &= 0x1e; __asm__ __volatile__ ("stba %0, [%1] %2" : : @@ -166,7 +166,7 @@ extern __inline__ void show_leds(int cpuid) "i" (ASI_M_CTL)); } -extern __inline__ unsigned cc_get_ipen(void) +static inline unsigned cc_get_ipen(void) { unsigned pending; @@ -177,7 +177,7 @@ extern __inline__ unsigned cc_get_ipen(void) return pending; } -extern __inline__ void cc_set_iclr(unsigned clear) +static inline void cc_set_iclr(unsigned clear) { __asm__ __volatile__ ("stha %0, [%1] %2" : : "r" (clear), @@ -185,7 +185,7 @@ extern __inline__ void cc_set_iclr(unsigned clear) "i" (ASI_M_MXCC)); } -extern __inline__ unsigned cc_get_imsk(void) +static inline unsigned cc_get_imsk(void) { unsigned mask; @@ -196,7 +196,7 @@ extern __inline__ unsigned cc_get_imsk(void) return mask; } -extern __inline__ void cc_set_imsk(unsigned mask) +static inline void cc_set_imsk(unsigned mask) { __asm__ __volatile__ ("stha %0, [%1] %2" : : "r" (mask), @@ -204,7 +204,7 @@ extern __inline__ void cc_set_imsk(unsigned mask) "i" (ASI_M_MXCC)); } -extern __inline__ unsigned cc_get_imsk_other(int cpuid) +static inline unsigned cc_get_imsk_other(int cpuid) { unsigned mask; @@ -215,7 +215,7 @@ extern __inline__ unsigned cc_get_imsk_other(int cpuid) return mask; } -extern __inline__ void cc_set_imsk_other(int cpuid, unsigned mask) +static inline void cc_set_imsk_other(int cpuid, unsigned mask) { __asm__ __volatile__ ("stha %0, [%1] %2" : : "r" (mask), @@ -223,7 +223,7 @@ extern __inline__ void cc_set_imsk_other(int cpuid, unsigned mask) "i" (ASI_M_CTL)); } -extern __inline__ void cc_set_igen(unsigned gen) +static inline void cc_set_igen(unsigned gen) { __asm__ __volatile__ ("sta %0, [%1] %2" : : "r" (gen), @@ -239,7 +239,7 @@ extern __inline__ void cc_set_igen(unsigned gen) #define IGEN_MESSAGE(bcast, devid, sid, levels) \ (((bcast) << 31) | ((devid) << 23) | ((sid) << 15) | (levels)) -extern __inline__ void sun4d_send_ipi(int cpu, int level) +static inline void sun4d_send_ipi(int cpu, int level) { cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1))); } diff --git a/include/asm-sparc/pci.h b/include/asm-sparc/pci.h index 97052baf90c1..38644742f011 100644 --- a/include/asm-sparc/pci.h +++ b/include/asm-sparc/pci.h @@ -15,12 +15,12 @@ #define PCI_IRQ_NONE 0xffffffff -extern inline void pcibios_set_master(struct pci_dev *dev) +static inline void pcibios_set_master(struct pci_dev *dev) { /* No special bus mastering setup handling */ } -extern inline void pcibios_penalize_isa_irq(int irq, int active) +static inline void pcibios_penalize_isa_irq(int irq, int active) { /* We don't do dynamic PCI IRQ allocation */ } @@ -137,7 +137,7 @@ extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist * only drive the low 24-bits during PCI bus mastering, then * you would pass 0x00ffffff as the mask to this function. */ -extern inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask) +static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask) { return 1; } diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h index 8f4f6a959651..a14e98677500 100644 --- a/include/asm-sparc/pgtable.h +++ b/include/asm-sparc/pgtable.h @@ -82,6 +82,8 @@ extern unsigned long page_kernel; /* Top-level page directory */ extern pgd_t swapper_pg_dir[1024]; +extern void paging_init(void); + /* Page table for 0-4MB for everybody, on the Sparc this * holds the same as on the i386. */ @@ -152,7 +154,7 @@ BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t) BTFIXUPDEF_CALL(void, pte_clear, pte_t *) BTFIXUPDEF_CALL(int, pte_read, pte_t) -extern __inline__ int pte_none(pte_t pte) +static inline int pte_none(pte_t pte) { return !(pte_val(pte) & ~BTFIXUP_SETHI(none_mask)); } @@ -165,7 +167,7 @@ BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t) BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t) BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *) -extern __inline__ int pmd_none(pmd_t pmd) +static inline int pmd_none(pmd_t pmd) { return !(pmd_val(pmd) & ~BTFIXUP_SETHI(none_mask)); } @@ -192,20 +194,20 @@ BTFIXUPDEF_HALF(pte_writei) BTFIXUPDEF_HALF(pte_dirtyi) BTFIXUPDEF_HALF(pte_youngi) -extern int pte_write(pte_t pte) __attribute_const__; -extern __inline__ int pte_write(pte_t pte) +static int pte_write(pte_t pte) __attribute_const__; +static inline int pte_write(pte_t pte) { return pte_val(pte) & BTFIXUP_HALF(pte_writei); } -extern int pte_dirty(pte_t pte) __attribute_const__; -extern __inline__ int pte_dirty(pte_t pte) +static int pte_dirty(pte_t pte) __attribute_const__; +static inline int pte_dirty(pte_t pte) { return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi); } -extern int pte_young(pte_t pte) __attribute_const__; -extern __inline__ int pte_young(pte_t pte) +static int pte_young(pte_t pte) __attribute_const__; +static inline int pte_young(pte_t pte) { return pte_val(pte) & BTFIXUP_HALF(pte_youngi); } @@ -215,8 +217,8 @@ extern __inline__ int pte_young(pte_t pte) */ BTFIXUPDEF_HALF(pte_filei) -extern int pte_file(pte_t pte) __attribute_const__; -extern __inline__ int pte_file(pte_t pte) +static int pte_file(pte_t pte) __attribute_const__; +static inline int pte_file(pte_t pte) { return pte_val(pte) & BTFIXUP_HALF(pte_filei); } @@ -227,20 +229,20 @@ BTFIXUPDEF_HALF(pte_wrprotecti) BTFIXUPDEF_HALF(pte_mkcleani) BTFIXUPDEF_HALF(pte_mkoldi) -extern pte_t pte_wrprotect(pte_t pte) __attribute_const__; -extern __inline__ pte_t pte_wrprotect(pte_t pte) +static pte_t pte_wrprotect(pte_t pte) __attribute_const__; +static inline pte_t pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti)); } -extern pte_t pte_mkclean(pte_t pte) __attribute_const__; -extern __inline__ pte_t pte_mkclean(pte_t pte) +static pte_t pte_mkclean(pte_t pte) __attribute_const__; +static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani)); } -extern pte_t pte_mkold(pte_t pte) __attribute_const__; -extern __inline__ pte_t pte_mkold(pte_t pte) +static pte_t pte_mkold(pte_t pte) __attribute_const__; +static inline pte_t pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi)); } @@ -276,8 +278,8 @@ BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int) BTFIXUPDEF_INT(pte_modify_mask) -extern pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__; -extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot) +static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__; +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) | pgprot_val(newprot)); @@ -384,13 +386,13 @@ extern struct ctx_list ctx_used; /* Head of used contexts list */ #define NO_CONTEXT -1 -extern __inline__ void remove_from_ctx_list(struct ctx_list *entry) +static inline void remove_from_ctx_list(struct ctx_list *entry) { entry->next->prev = entry->prev; entry->prev->next = entry->next; } -extern __inline__ void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry) +static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry) { entry->next = head; (entry->prev = head->prev)->next = entry; @@ -399,7 +401,7 @@ extern __inline__ void add_to_ctx_list(struct ctx_list *head, struct ctx_list *e #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry) #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry) -extern __inline__ unsigned long +static inline unsigned long __get_phys (unsigned long addr) { switch (sparc_cpu_model){ @@ -414,7 +416,7 @@ __get_phys (unsigned long addr) } } -extern __inline__ int +static inline int __get_iospace (unsigned long addr) { switch (sparc_cpu_model){ diff --git a/include/asm-sparc/pgtsrmmu.h b/include/asm-sparc/pgtsrmmu.h index ee3b9d93187c..edeb9811e728 100644 --- a/include/asm-sparc/pgtsrmmu.h +++ b/include/asm-sparc/pgtsrmmu.h @@ -148,7 +148,7 @@ extern void *srmmu_nocache_pool; #define __nocache_fix(VADDR) __va(__nocache_pa(VADDR)) /* Accessing the MMU control register. */ -extern __inline__ unsigned int srmmu_get_mmureg(void) +static inline unsigned int srmmu_get_mmureg(void) { unsigned int retval; __asm__ __volatile__("lda [%%g0] %1, %0\n\t" : @@ -157,14 +157,14 @@ extern __inline__ unsigned int srmmu_get_mmureg(void) return retval; } -extern __inline__ void srmmu_set_mmureg(unsigned long regval) +static inline void srmmu_set_mmureg(unsigned long regval) { __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : : "r" (regval), "i" (ASI_M_MMUREGS) : "memory"); } -extern __inline__ void srmmu_set_ctable_ptr(unsigned long paddr) +static inline void srmmu_set_ctable_ptr(unsigned long paddr) { paddr = ((paddr >> 4) & SRMMU_CTX_PMASK); __asm__ __volatile__("sta %0, [%1] %2\n\t" : : @@ -173,7 +173,7 @@ extern __inline__ void srmmu_set_ctable_ptr(unsigned long paddr) "memory"); } -extern __inline__ unsigned long srmmu_get_ctable_ptr(void) +static inline unsigned long srmmu_get_ctable_ptr(void) { unsigned int retval; @@ -184,14 +184,14 @@ extern __inline__ unsigned long srmmu_get_ctable_ptr(void) return (retval & SRMMU_CTX_PMASK) << 4; } -extern __inline__ void srmmu_set_context(int context) +static inline void srmmu_set_context(int context) { __asm__ __volatile__("sta %0, [%1] %2\n\t" : : "r" (context), "r" (SRMMU_CTX_REG), "i" (ASI_M_MMUREGS) : "memory"); } -extern __inline__ int srmmu_get_context(void) +static inline int srmmu_get_context(void) { register int retval; __asm__ __volatile__("lda [%1] %2, %0\n\t" : @@ -201,7 +201,7 @@ extern __inline__ int srmmu_get_context(void) return retval; } -extern __inline__ unsigned int srmmu_get_fstatus(void) +static inline unsigned int srmmu_get_fstatus(void) { unsigned int retval; @@ -211,7 +211,7 @@ extern __inline__ unsigned int srmmu_get_fstatus(void) return retval; } -extern __inline__ unsigned int srmmu_get_faddr(void) +static inline unsigned int srmmu_get_faddr(void) { unsigned int retval; @@ -222,7 +222,7 @@ extern __inline__ unsigned int srmmu_get_faddr(void) } /* This is guaranteed on all SRMMU's. */ -extern __inline__ void srmmu_flush_whole_tlb(void) +static inline void srmmu_flush_whole_tlb(void) { __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : "r" (0x400), /* Flush entire TLB!! */ @@ -231,7 +231,7 @@ extern __inline__ void srmmu_flush_whole_tlb(void) } /* These flush types are not available on all chips... */ -extern __inline__ void srmmu_flush_tlb_ctx(void) +static inline void srmmu_flush_tlb_ctx(void) { __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : "r" (0x300), /* Flush TLB ctx.. */ @@ -239,7 +239,7 @@ extern __inline__ void srmmu_flush_tlb_ctx(void) } -extern __inline__ void srmmu_flush_tlb_region(unsigned long addr) +static inline void srmmu_flush_tlb_region(unsigned long addr) { addr &= SRMMU_PGDIR_MASK; __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : @@ -249,7 +249,7 @@ extern __inline__ void srmmu_flush_tlb_region(unsigned long addr) } -extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr) +static inline void srmmu_flush_tlb_segment(unsigned long addr) { addr &= SRMMU_REAL_PMD_MASK; __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : @@ -258,7 +258,7 @@ extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr) } -extern __inline__ void srmmu_flush_tlb_page(unsigned long page) +static inline void srmmu_flush_tlb_page(unsigned long page) { page &= PAGE_MASK; __asm__ __volatile__("sta %%g0, [%0] %1\n\t": : @@ -267,7 +267,7 @@ extern __inline__ void srmmu_flush_tlb_page(unsigned long page) } -extern __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr) +static inline unsigned long srmmu_hwprobe(unsigned long vaddr) { unsigned long retval; @@ -279,7 +279,7 @@ extern __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr) return retval; } -extern __inline__ int +static inline int srmmu_get_pte (unsigned long addr) { register unsigned long entry; diff --git a/include/asm-sparc/processor.h b/include/asm-sparc/processor.h index 5a7a1a8d29ac..6fbb3f0af8d8 100644 --- a/include/asm-sparc/processor.h +++ b/include/asm-sparc/processor.h @@ -79,7 +79,7 @@ struct thread_struct { extern unsigned long thread_saved_pc(struct task_struct *t); /* Do necessary setup to start up a newly executed thread. */ -extern __inline__ void start_thread(struct pt_regs * regs, unsigned long pc, +static inline void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) { register unsigned long zero asm("g1"); diff --git a/include/asm-sparc/psr.h b/include/asm-sparc/psr.h index 9778b8c8b15b..19c978051118 100644 --- a/include/asm-sparc/psr.h +++ b/include/asm-sparc/psr.h @@ -38,7 +38,7 @@ #ifndef __ASSEMBLY__ /* Get the %psr register. */ -extern __inline__ unsigned int get_psr(void) +static inline unsigned int get_psr(void) { unsigned int psr; __asm__ __volatile__( @@ -53,7 +53,7 @@ extern __inline__ unsigned int get_psr(void) return psr; } -extern __inline__ void put_psr(unsigned int new_psr) +static inline void put_psr(unsigned int new_psr) { __asm__ __volatile__( "wr %0, 0x0, %%psr\n\t" @@ -72,7 +72,7 @@ extern __inline__ void put_psr(unsigned int new_psr) extern unsigned int fsr_storage; -extern __inline__ unsigned int get_fsr(void) +static inline unsigned int get_fsr(void) { unsigned int fsr = 0; diff --git a/include/asm-sparc/sbi.h b/include/asm-sparc/sbi.h index 739ccac5dcf2..86a603ac7b20 100644 --- a/include/asm-sparc/sbi.h +++ b/include/asm-sparc/sbi.h @@ -65,7 +65,7 @@ struct sbi_regs { #ifndef __ASSEMBLY__ -extern __inline__ int acquire_sbi(int devid, int mask) +static inline int acquire_sbi(int devid, int mask) { __asm__ __volatile__ ("swapa [%2] %3, %0" : "=r" (mask) : @@ -75,7 +75,7 @@ extern __inline__ int acquire_sbi(int devid, int mask) return mask; } -extern __inline__ void release_sbi(int devid, int mask) +static inline void release_sbi(int devid, int mask) { __asm__ __volatile__ ("sta %0, [%1] %2" : : "r" (mask), @@ -83,7 +83,7 @@ extern __inline__ void release_sbi(int devid, int mask) "i" (ASI_M_CTL)); } -extern __inline__ void set_sbi_tid(int devid, int targetid) +static inline void set_sbi_tid(int devid, int targetid) { __asm__ __volatile__ ("sta %0, [%1] %2" : : "r" (targetid), @@ -91,7 +91,7 @@ extern __inline__ void set_sbi_tid(int devid, int targetid) "i" (ASI_M_CTL)); } -extern __inline__ int get_sbi_ctl(int devid, int cfgno) +static inline int get_sbi_ctl(int devid, int cfgno) { int cfg; @@ -102,7 +102,7 @@ extern __inline__ int get_sbi_ctl(int devid, int cfgno) return cfg; } -extern __inline__ void set_sbi_ctl(int devid, int cfgno, int cfg) +static inline void set_sbi_ctl(int devid, int cfgno, int cfg) { __asm__ __volatile__ ("sta %0, [%1] %2" : : "r" (cfg), diff --git a/include/asm-sparc/sbus.h b/include/asm-sparc/sbus.h index 3a8b3908728a..a13cddcecec5 100644 --- a/include/asm-sparc/sbus.h +++ b/include/asm-sparc/sbus.h @@ -28,12 +28,12 @@ * numbers + offsets, and vice versa. */ -extern __inline__ unsigned long sbus_devaddr(int slotnum, unsigned long offset) +static inline unsigned long sbus_devaddr(int slotnum, unsigned long offset) { return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<25)+(offset)); } -extern __inline__ int sbus_dev_slot(unsigned long dev_addr) +static inline int sbus_dev_slot(unsigned long dev_addr) { return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>25); } @@ -80,7 +80,7 @@ struct sbus_bus { extern struct sbus_bus *sbus_root; -extern __inline__ int +static inline int sbus_is_slave(struct sbus_dev *dev) { /* XXX Have to write this for sun4c's */ diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h index 4f96d8333a12..580c51d011df 100644 --- a/include/asm-sparc/smp.h +++ b/include/asm-sparc/smp.h @@ -60,22 +60,22 @@ BTFIXUPDEF_BLACKBOX(load_current) #define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5) #define smp_message_pass(target,msg,data,wait) BTFIXUP_CALL(smp_message_pass)(target,msg,data,wait) -extern __inline__ void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); } -extern __inline__ void xc1(smpfunc_t func, unsigned long arg1) +static inline void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); } +static inline void xc1(smpfunc_t func, unsigned long arg1) { smp_cross_call(func, arg1, 0, 0, 0, 0); } -extern __inline__ void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2) +static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2) { smp_cross_call(func, arg1, arg2, 0, 0, 0); } -extern __inline__ void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2, +static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2, unsigned long arg3) { smp_cross_call(func, arg1, arg2, arg3, 0, 0); } -extern __inline__ void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2, +static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4) { smp_cross_call(func, arg1, arg2, arg3, arg4, 0); } -extern __inline__ void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2, +static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) { smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); } -extern __inline__ int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait) +static inline int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait) { xc1((smpfunc_t)func, (unsigned long)info); return 0; @@ -84,16 +84,16 @@ extern __inline__ int smp_call_function(void (*func)(void *info), void *info, in extern __volatile__ int __cpu_number_map[NR_CPUS]; extern __volatile__ int __cpu_logical_map[NR_CPUS]; -extern __inline__ int cpu_logical_map(int cpu) +static inline int cpu_logical_map(int cpu) { return __cpu_logical_map[cpu]; } -extern __inline__ int cpu_number_map(int cpu) +static inline int cpu_number_map(int cpu) { return __cpu_number_map[cpu]; } -extern __inline__ int hard_smp4m_processor_id(void) +static inline int hard_smp4m_processor_id(void) { int cpuid; @@ -104,7 +104,7 @@ extern __inline__ int hard_smp4m_processor_id(void) return cpuid; } -extern __inline__ int hard_smp4d_processor_id(void) +static inline int hard_smp4d_processor_id(void) { int cpuid; @@ -114,7 +114,7 @@ extern __inline__ int hard_smp4d_processor_id(void) } #ifndef MODULE -extern __inline__ int hard_smp_processor_id(void) +static inline int hard_smp_processor_id(void) { int cpuid; @@ -136,7 +136,7 @@ extern __inline__ int hard_smp_processor_id(void) return cpuid; } #else -extern __inline__ int hard_smp_processor_id(void) +static inline int hard_smp_processor_id(void) { int cpuid; diff --git a/include/asm-sparc/smpprim.h b/include/asm-sparc/smpprim.h index 9b9c28ed748e..e7b6d346ae10 100644 --- a/include/asm-sparc/smpprim.h +++ b/include/asm-sparc/smpprim.h @@ -15,7 +15,7 @@ * atomic. */ -extern __inline__ __volatile__ char test_and_set(void *addr) +static inline __volatile__ char test_and_set(void *addr) { char state = 0; @@ -27,7 +27,7 @@ extern __inline__ __volatile__ char test_and_set(void *addr) } /* Initialize a spin-lock. */ -extern __inline__ __volatile__ smp_initlock(void *spinlock) +static inline __volatile__ smp_initlock(void *spinlock) { /* Unset the lock. */ *((unsigned char *) spinlock) = 0; @@ -36,7 +36,7 @@ extern __inline__ __volatile__ smp_initlock(void *spinlock) } /* This routine spins until it acquires the lock at ADDR. */ -extern __inline__ __volatile__ smp_lock(void *addr) +static inline __volatile__ smp_lock(void *addr) { while(test_and_set(addr) == 0xff) ; @@ -46,7 +46,7 @@ extern __inline__ __volatile__ smp_lock(void *addr) } /* This routine releases the lock at ADDR. */ -extern __inline__ __volatile__ smp_unlock(void *addr) +static inline __volatile__ smp_unlock(void *addr) { *((unsigned char *) addr) = 0; } diff --git a/include/asm-sparc/spinlock.h b/include/asm-sparc/spinlock.h index 111727a2bb4e..e344c98a6f5f 100644 --- a/include/asm-sparc/spinlock.h +++ b/include/asm-sparc/spinlock.h @@ -17,7 +17,7 @@ #define __raw_spin_unlock_wait(lock) \ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) -extern __inline__ void __raw_spin_lock(raw_spinlock_t *lock) +static inline void __raw_spin_lock(raw_spinlock_t *lock) { __asm__ __volatile__( "\n1:\n\t" @@ -37,7 +37,7 @@ extern __inline__ void __raw_spin_lock(raw_spinlock_t *lock) : "g2", "memory", "cc"); } -extern __inline__ int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int __raw_spin_trylock(raw_spinlock_t *lock) { unsigned int result; __asm__ __volatile__("ldstub [%1], %0" @@ -47,7 +47,7 @@ extern __inline__ int __raw_spin_trylock(raw_spinlock_t *lock) return (result == 0); } -extern __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void __raw_spin_unlock(raw_spinlock_t *lock) { __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); } @@ -78,7 +78,7 @@ extern __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) * * Unfortunately this scheme limits us to ~16,000,000 cpus. */ -extern __inline__ void __read_lock(raw_rwlock_t *rw) +static inline void __read_lock(raw_rwlock_t *rw) { register raw_rwlock_t *lp asm("g1"); lp = rw; @@ -98,7 +98,7 @@ do { unsigned long flags; \ local_irq_restore(flags); \ } while(0) -extern __inline__ void __read_unlock(raw_rwlock_t *rw) +static inline void __read_unlock(raw_rwlock_t *rw) { register raw_rwlock_t *lp asm("g1"); lp = rw; diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h index 3557781a4bfd..1f6b71f9e1b6 100644 --- a/include/asm-sparc/system.h +++ b/include/asm-sparc/system.h @@ -204,7 +204,7 @@ static inline unsigned long getipl(void) BTFIXUPDEF_CALL(void, ___xchg32, void) #endif -extern __inline__ unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) +static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) { #ifdef CONFIG_SMP __asm__ __volatile__("swap [%2], %0" diff --git a/include/asm-sparc/traps.h b/include/asm-sparc/traps.h index 6690ab956ea6..f62c7f878ee1 100644 --- a/include/asm-sparc/traps.h +++ b/include/asm-sparc/traps.h @@ -22,7 +22,7 @@ struct tt_entry { /* We set this to _start in system setup. */ extern struct tt_entry *sparc_ttable; -extern __inline__ unsigned long get_tbr(void) +static inline unsigned long get_tbr(void) { unsigned long tbr; diff --git a/include/asm-sparc64/cacheflush.h b/include/asm-sparc64/cacheflush.h index ededd2659eab..b3f61659ba81 100644 --- a/include/asm-sparc64/cacheflush.h +++ b/include/asm-sparc64/cacheflush.h @@ -66,6 +66,11 @@ extern void flush_ptrace_access(struct vm_area_struct *, struct page *, #define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) +#ifdef CONFIG_DEBUG_PAGEALLOC +/* internal debugging function */ +void kernel_map_pages(struct page *page, int numpages, int enable); +#endif + #endif /* !__ASSEMBLY__ */ #endif /* _SPARC64_CACHEFLUSH_H */ diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 9a3a81f1cc58..74de79dca915 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -22,6 +22,16 @@ typedef struct { unsigned int __pad1; unsigned long *pte_cache[2]; unsigned long *pgd_cache; + + /* Dcache line 3, rarely used */ + unsigned int dcache_size; + unsigned int dcache_line_size; + unsigned int icache_size; + unsigned int icache_line_size; + unsigned int ecache_size; + unsigned int ecache_line_size; + unsigned int __pad2; + unsigned int __pad3; } cpuinfo_sparc; DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h index 1c5da41653a4..c7d5804ba76d 100644 --- a/include/asm-sparc64/dma-mapping.h +++ b/include/asm-sparc64/dma-mapping.h @@ -10,7 +10,7 @@ struct device; static inline void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, int flag) + dma_addr_t *dma_handle, gfp_t flag) { BUG(); return NULL; diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h index b63a33cf4971..0abd3a674e8f 100644 --- a/include/asm-sparc64/head.h +++ b/include/asm-sparc64/head.h @@ -12,9 +12,12 @@ #define __JALAPENO_ID 0x003e0016 #define CHEETAH_MANUF 0x003e -#define CHEETAH_IMPL 0x0014 -#define CHEETAH_PLUS_IMPL 0x0015 -#define JALAPENO_IMPL 0x0016 +#define CHEETAH_IMPL 0x0014 /* Ultra-III */ +#define CHEETAH_PLUS_IMPL 0x0015 /* Ultra-III+ */ +#define JALAPENO_IMPL 0x0016 /* Ultra-IIIi */ +#define JAGUAR_IMPL 0x0018 /* Ultra-IV */ +#define PANTHER_IMPL 0x0019 /* Ultra-IV+ */ +#define SERRANO_IMPL 0x0022 /* Ultra-IIIi+ */ #define BRANCH_IF_CHEETAH_BASE(tmp1,tmp2,label) \ rdpr %ver, %tmp1; \ diff --git a/include/asm-sparc64/openprom.h b/include/asm-sparc64/openprom.h index 0a336901d585..b4959d2b0d99 100644 --- a/include/asm-sparc64/openprom.h +++ b/include/asm-sparc64/openprom.h @@ -186,8 +186,8 @@ struct linux_prom_registers { }; struct linux_prom64_registers { - long phys_addr; - long reg_size; + unsigned long phys_addr; + unsigned long reg_size; }; struct linux_prom_irqs { diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h index a432d9e7daaa..d02f1e8ae1a6 100644 --- a/include/asm-sparc64/oplib.h +++ b/include/asm-sparc64/oplib.h @@ -38,6 +38,20 @@ extern int prom_stdin, prom_stdout; */ extern int prom_chosen_node; +/* Helper values and strings in arch/sparc64/kernel/head.S */ +extern const char prom_finddev_name[]; +extern const char prom_chosen_path[]; +extern const char prom_getprop_name[]; +extern const char prom_mmu_name[]; +extern const char prom_callmethod_name[]; +extern const char prom_translate_name[]; +extern const char prom_map_name[]; +extern const char prom_unmap_name[]; +extern int prom_mmu_ihandle_cache; +extern unsigned int prom_boot_mapped_pc; +extern unsigned int prom_boot_mapping_mode; +extern unsigned long prom_boot_mapping_phys_high, prom_boot_mapping_phys_low; + struct linux_mlist_p1275 { struct linux_mlist_p1275 *theres_more; unsigned long start_adr; @@ -68,7 +82,7 @@ extern char *prom_getbootargs(void); * of the string is different on V0 vs. V2->higher proms. The caller must * know what he/she is doing! Returns the device descriptor, an int. */ -extern int prom_devopen(char *device_string); +extern int prom_devopen(const char *device_string); /* Close a previously opened device described by the passed integer * descriptor. @@ -81,27 +95,13 @@ extern int prom_devclose(int device_handle); extern void prom_seek(int device_handle, unsigned int seek_hival, unsigned int seek_lowval); -/* Machine memory configuration routine. */ - -/* This function returns a V0 format memory descriptor table, it has three - * entries. One for the total amount of physical ram on the machine, one - * for the amount of physical ram available, and one describing the virtual - * areas which are allocated by the prom. So, in a sense the physical - * available is a calculation of the total physical minus the physical mapped - * by the prom with virtual mappings. - * - * These lists are returned pre-sorted, this should make your life easier - * since the prom itself is way too lazy to do such nice things. - */ -extern struct linux_mem_p1275 *prom_meminfo(void); - /* Miscellaneous routines, don't really fit in any category per se. */ /* Reboot the machine with the command line passed. */ -extern void prom_reboot(char *boot_command); +extern void prom_reboot(const char *boot_command); /* Evaluate the forth string passed. */ -extern void prom_feval(char *forth_string); +extern void prom_feval(const char *forth_string); /* Enter the prom, with possibility of continuation with the 'go' * command in newer proms. @@ -154,7 +154,7 @@ extern char prom_getchar(void); extern void prom_putchar(char character); /* Prom's internal routines, don't use in kernel/boot code. */ -extern void prom_printf(char *fmt, ...); +extern void prom_printf(const char *fmt, ...); extern void prom_write(const char *buf, unsigned int len); /* Query for input device type */ @@ -215,7 +215,7 @@ extern int prom_getunumber(int syndrome_code, char *buf, int buflen); /* Retain physical memory to the caller across soft resets. */ -extern unsigned long prom_retain(char *name, +extern unsigned long prom_retain(const char *name, unsigned long pa_low, unsigned long pa_high, long size, long align); @@ -269,28 +269,28 @@ extern int prom_getsibling(int node); /* Get the length, at the passed node, of the given property type. * Returns -1 on error (ie. no such property at this node). */ -extern int prom_getproplen(int thisnode, char *property); +extern int prom_getproplen(int thisnode, const char *property); /* Fetch the requested property using the given buffer. Returns * the number of bytes the prom put into your buffer or -1 on error. */ -extern int prom_getproperty(int thisnode, char *property, +extern int prom_getproperty(int thisnode, const char *property, char *prop_buffer, int propbuf_size); /* Acquire an integer property. */ -extern int prom_getint(int node, char *property); +extern int prom_getint(int node, const char *property); /* Acquire an integer property, with a default value. */ -extern int prom_getintdefault(int node, char *property, int defval); +extern int prom_getintdefault(int node, const char *property, int defval); /* Acquire a boolean property, 0=FALSE 1=TRUE. */ -extern int prom_getbool(int node, char *prop); +extern int prom_getbool(int node, const char *prop); /* Acquire a string property, null string on error. */ -extern void prom_getstring(int node, char *prop, char *buf, int bufsize); +extern void prom_getstring(int node, const char *prop, char *buf, int bufsize); /* Does the passed node have the given "name"? YES=1 NO=0 */ -extern int prom_nodematch(int thisnode, char *name); +extern int prom_nodematch(int thisnode, const char *name); /* Puts in buffer a prom name in the form name@x,y or name (x for which_io * and y for first regs phys address @@ -300,7 +300,7 @@ extern int prom_getname(int node, char *buf, int buflen); /* Search all siblings starting at the passed node for "name" matching * the given string. Returns the node on success, zero on failure. */ -extern int prom_searchsiblings(int node_start, char *name); +extern int prom_searchsiblings(int node_start, const char *name); /* Return the first property type, as a string, for the given node. * Returns a null string on error. Buffer should be at least 32B long. @@ -310,21 +310,21 @@ extern char *prom_firstprop(int node, char *buffer); /* Returns the next property after the passed property for the given * node. Returns null string on failure. Buffer should be at least 32B long. */ -extern char *prom_nextprop(int node, char *prev_property, char *buffer); +extern char *prom_nextprop(int node, const char *prev_property, char *buffer); /* Returns 1 if the specified node has given property. */ -extern int prom_node_has_property(int node, char *property); +extern int prom_node_has_property(int node, const char *property); /* Returns phandle of the path specified */ -extern int prom_finddevice(char *name); +extern int prom_finddevice(const char *name); /* Set the indicated property at the given node with the passed value. * Returns the number of bytes of your value that the prom took. */ -extern int prom_setprop(int node, char *prop_name, char *prop_value, +extern int prom_setprop(int node, const char *prop_name, char *prop_value, int value_size); -extern int prom_pathtoinode(char *path); +extern int prom_pathtoinode(const char *path); extern int prom_inst2pkg(int); /* CPU probing helpers. */ @@ -334,7 +334,7 @@ int cpu_find_by_mid(int mid, int *prom_node); /* Client interface level routines. */ extern void prom_set_trap_table(unsigned long tba); -extern long p1275_cmd (char *, long, ...); +extern long p1275_cmd(const char *, long, ...); #if 0 diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h index 7f8d764abc47..5426bb28a993 100644 --- a/include/asm-sparc64/page.h +++ b/include/asm-sparc64/page.h @@ -140,23 +140,6 @@ extern unsigned long page_to_pfn(struct page *); #define virt_to_phys __pa #define phys_to_virt __va -/* The following structure is used to hold the physical - * memory configuration of the machine. This is filled in - * probe_memory() and is later used by mem_init() to set up - * mem_map[]. We statically allocate SPARC_PHYS_BANKS of - * these structs, this is arbitrary. The entry after the - * last valid one has num_bytes==0. - */ - -struct sparc_phys_banks { - unsigned long base_addr; - unsigned long num_bytes; -}; - -#define SPARC_PHYS_BANKS 32 - -extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; - #endif /* !(__ASSEMBLY__) */ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h index 38bbbccb4068..dd35a2c7798a 100644 --- a/include/asm-sparc64/pbm.h +++ b/include/asm-sparc64/pbm.h @@ -27,23 +27,27 @@ * PCI bus. */ -#define PBM_LOGCLUSTERS 3 -#define PBM_NCLUSTERS (1 << PBM_LOGCLUSTERS) - struct pci_controller_info; /* This contains the software state necessary to drive a PCI * controller's IOMMU. */ +struct pci_iommu_arena { + unsigned long *map; + unsigned int hint; + unsigned int limit; +}; + struct pci_iommu { /* This protects the controller's IOMMU and all * streaming buffers underneath. */ spinlock_t lock; + struct pci_iommu_arena arena; + /* IOMMU page table, a linear array of ioptes. */ iopte_t *page_table; /* The page table itself. */ - int page_table_sz_bits; /* log2 of ow many pages does it map? */ /* Base PCI memory space address where IOMMU mappings * begin. @@ -62,12 +66,6 @@ struct pci_iommu { */ unsigned long write_complete_reg; - /* The lowest used consistent mapping entry. Since - * we allocate consistent maps out of cluster 0 this - * is relative to the beginning of closter 0. - */ - u32 lowest_consistent_map; - /* In order to deal with some buggy third-party PCI bridges that * do wrong prefetching, we never mark valid mappings as invalid. * Instead we point them at this dummy page. @@ -75,16 +73,6 @@ struct pci_iommu { unsigned long dummy_page; unsigned long dummy_page_pa; - /* If PBM_NCLUSTERS is ever decreased to 4 or lower, - * or if largest supported page_table_sz * 8K goes above - * 2GB, you must increase the size of the type of - * these counters. You have been duly warned. -DaveM - */ - struct { - u16 next; - u16 flush; - } alloc_info[PBM_NCLUSTERS]; - /* CTX allocation. */ unsigned long ctx_lowest_free; unsigned long ctx_bitmap[IOMMU_NUM_CTXS / (sizeof(unsigned long) * 8)]; @@ -102,7 +90,7 @@ struct pci_iommu { u32 dma_addr_mask; }; -extern void pci_iommu_table_init(struct pci_iommu *, int); +extern void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask); /* This describes a PCI bus module's streaming buffer. */ struct pci_strbuf { diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index a297f6144f0f..8c6dfc6c7af6 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -60,13 +60,13 @@ * table can map */ #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) -#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) #define PMD_BITS (PAGE_SHIFT - 2) /* PGDIR_SHIFT determines what a third-level page table entry can map */ #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS) -#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PGDIR_BITS (PAGE_SHIFT - 2) @@ -98,7 +98,9 @@ #define _PAGE_NFO _AC(0x1000000000000000,UL) /* No Fault Only */ #define _PAGE_IE _AC(0x0800000000000000,UL) /* Invert Endianness */ #define _PAGE_SOFT2 _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ -#define _PAGE_RES1 _AC(0x0003000000000000,UL) /* Reserved */ +#define _PAGE_RES1 _AC(0x0002000000000000,UL) /* Reserved */ +#define _PAGE_SZ32MB _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ +#define _PAGE_SZ256MB _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ #define _PAGE_SN _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */ #define _PAGE_RES2 _AC(0x0000780000000000,UL) /* Reserved */ #define _PAGE_PADDR_SF _AC(0x000001FFFFFFE000,UL) /* (Spitfire) paddr[40:13]*/ @@ -336,7 +338,11 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p #define pte_clear(mm,addr,ptep) \ set_pte_at((mm), (addr), (ptep), __pte(0UL)) -extern pgd_t swapper_pg_dir[1]; +extern pgd_t swapper_pg_dir[2048]; +extern pmd_t swapper_low_pmd_dir[2048]; + +extern void paging_init(void); +extern unsigned long find_ecache_flush_span(unsigned long size); /* These do nothing with the way I have things setup. */ #define mmu_lockarea(vaddr, len) (vaddr) diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h index 80a65d7e3dbf..203e8eee6351 100644 --- a/include/asm-sparc64/uaccess.h +++ b/include/asm-sparc64/uaccess.h @@ -70,26 +70,14 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si * with the main instruction path. This means when everything is well, * we don't even have to jump over them. Further, they do not intrude * on our cache or tlb entries. - * - * There is a special way how to put a range of potentially faulting - * insns (like twenty ldd/std's with now intervening other instructions) - * You specify address of first in insn and 0 in fixup and in the next - * exception_table_entry you specify last potentially faulting insn + 1 - * and in fixup the routine which should handle the fault. - * That fixup code will get - * (faulting_insn_address - first_insn_in_the_range_address)/4 - * in %g2 (ie. index of the faulting instruction in the range). */ -struct exception_table_entry -{ - unsigned insn, fixup; +struct exception_table_entry { + unsigned int insn, fixup; }; -/* Special exable search, which handles ranges. Returns fixup */ -unsigned long search_extables_range(unsigned long addr, unsigned long *g2); - extern void __ret_efault(void); +extern void __retl_efault(void); /* Uh, these should become the main single-value transfer routines.. * They automatically use the right size if we just have the right @@ -263,7 +251,7 @@ copy_from_user(void *to, const void __user *from, unsigned long size) { unsigned long ret = ___copy_from_user(to, from, size); - if (ret) + if (unlikely(ret)) ret = copy_from_user_fixup(to, from, size); return ret; } @@ -279,7 +267,7 @@ copy_to_user(void __user *to, const void *from, unsigned long size) { unsigned long ret = ___copy_to_user(to, from, size); - if (ret) + if (unlikely(ret)) ret = copy_to_user_fixup(to, from, size); return ret; } @@ -295,7 +283,7 @@ copy_in_user(void __user *to, void __user *from, unsigned long size) { unsigned long ret = ___copy_in_user(to, from, size); - if (ret) + if (unlikely(ret)) ret = copy_in_user_fixup(to, from, size); return ret; } diff --git a/include/asm-um/dma-mapping.h b/include/asm-um/dma-mapping.h index 13e6291f7151..babd29895114 100644 --- a/include/asm-um/dma-mapping.h +++ b/include/asm-um/dma-mapping.h @@ -19,7 +19,7 @@ dma_set_mask(struct device *dev, u64 dma_mask) static inline void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - int flag) + gfp_t flag) { BUG(); return((void *) 0); diff --git a/include/asm-um/page.h b/include/asm-um/page.h index 2c192abe9aeb..0229814af31e 100644 --- a/include/asm-um/page.h +++ b/include/asm-um/page.h @@ -115,7 +115,7 @@ extern unsigned long uml_physmem; #define pfn_valid(pfn) ((pfn) < max_mapnr) #define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v))) -extern struct page *arch_validate(struct page *page, int mask, int order); +extern struct page *arch_validate(struct page *page, gfp_t mask, int order); #define HAVE_ARCH_VALIDATE extern void arch_free_page(struct page *page, int order); diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h index ed06170e0edd..616d02b57ea9 100644 --- a/include/asm-um/pgtable.h +++ b/include/asm-um/pgtable.h @@ -346,7 +346,6 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot); - if(pte_present(pte)) pte = pte_mknewpage(pte_mknewprot(pte)); return pte; } diff --git a/include/asm-um/processor-generic.h b/include/asm-um/processor-generic.h index 2d242360c3d6..075771c371f6 100644 --- a/include/asm-um/processor-generic.h +++ b/include/asm-um/processor-generic.h @@ -13,6 +13,7 @@ struct task_struct; #include "linux/config.h" #include "asm/ptrace.h" #include "choose-mode.h" +#include "registers.h" struct mm_struct; @@ -136,19 +137,15 @@ extern struct cpuinfo_um cpu_data[]; #define current_cpu_data boot_cpu_data #endif -#define KSTK_EIP(tsk) (PT_REGS_IP(&tsk->thread.regs)) -#define KSTK_ESP(tsk) (PT_REGS_SP(&tsk->thread.regs)) + +#ifdef CONFIG_MODE_SKAS +#define KSTK_REG(tsk, reg) \ + ({ union uml_pt_regs regs; \ + get_thread_regs(®s, tsk->thread.mode.skas.switch_buf); \ + UPT_REG(®s, reg); }) +#else +#define KSTK_REG(tsk, reg) (0xbadbabe) +#endif #define get_wchan(p) (0) #endif - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/include/asm-um/processor-i386.h b/include/asm-um/processor-i386.h index 431bad3ae9d7..4108a579eb92 100644 --- a/include/asm-um/processor-i386.h +++ b/include/asm-um/processor-i386.h @@ -43,17 +43,10 @@ static inline void rep_nop(void) #define ARCH_IS_STACKGROW(address) \ (address + 32 >= UPT_SP(¤t->thread.regs.regs)) +#define KSTK_EIP(tsk) KSTK_REG(tsk, EIP) +#define KSTK_ESP(tsk) KSTK_REG(tsk, UESP) +#define KSTK_EBP(tsk) KSTK_REG(tsk, EBP) + #include "asm/processor-generic.h" #endif - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/include/asm-um/processor-x86_64.h b/include/asm-um/processor-x86_64.h index 0beb9a42ae05..e1e1255a1d36 100644 --- a/include/asm-um/processor-x86_64.h +++ b/include/asm-um/processor-x86_64.h @@ -36,17 +36,9 @@ extern inline void rep_nop(void) #define ARCH_IS_STACKGROW(address) \ (address + 128 >= UPT_SP(¤t->thread.regs.regs)) +#define KSTK_EIP(tsk) KSTK_REG(tsk, RIP) +#define KSTK_ESP(tsk) KSTK_REG(tsk, RSP) + #include "asm/processor-generic.h" #endif - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only. This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-file-style: "linux" - * End: - */ diff --git a/include/asm-um/uaccess.h b/include/asm-um/uaccess.h index 801710d00a40..2ee028b8de9d 100644 --- a/include/asm-um/uaccess.h +++ b/include/asm-um/uaccess.h @@ -44,7 +44,7 @@ const __typeof__(ptr) __private_ptr = ptr; \ __typeof__(*(__private_ptr)) __private_val; \ int __private_ret = -EFAULT; \ - (x) = 0; \ + (x) = (__typeof__(*(__private_ptr)))0; \ if (__copy_from_user(&__private_val, (__private_ptr), \ sizeof(*(__private_ptr))) == 0) {\ (x) = (__typeof__(*(__private_ptr))) __private_val; \ diff --git a/include/asm-x86_64/dma-mapping.h b/include/asm-x86_64/dma-mapping.h index e784fdc524f1..54a380efed41 100644 --- a/include/asm-x86_64/dma-mapping.h +++ b/include/asm-x86_64/dma-mapping.h @@ -17,7 +17,7 @@ extern dma_addr_t bad_dma_address; (swiotlb ? swiotlb_dma_mapping_error(x) : ((x) == bad_dma_address)) void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - unsigned gfp); + gfp_t gfp); void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h index 4d727f3f5550..5a7fe3c6c3d8 100644 --- a/include/asm-x86_64/msr.h +++ b/include/asm-x86_64/msr.h @@ -234,6 +234,7 @@ static inline unsigned int cpuid_edx(unsigned int op) #define MSR_K8_TOP_MEM1 0xC001001A #define MSR_K8_TOP_MEM2 0xC001001D #define MSR_K8_SYSCFG 0xC0010010 +#define MSR_K8_HWCR 0xC0010015 /* K6 MSRs */ #define MSR_K6_EFER 0xC0000080 diff --git a/include/asm-x86_64/pci.h b/include/asm-x86_64/pci.h index 5a82a6762c21..eeb3088a1c9e 100644 --- a/include/asm-x86_64/pci.h +++ b/include/asm-x86_64/pci.h @@ -50,10 +50,10 @@ extern int iommu_setup(char *opt); * address space. The networking and block device layers use * this boolean for bounce buffer decisions * - * On x86-64 it mostly equals, but we set it to zero to tell some subsystems - * that an hard or soft IOMMU is available. + * On AMD64 it mostly equals, but we set it to zero to tell some subsystems + * that an IOMMU is available. */ -#define PCI_DMA_BUS_IS_PHYS 0 +#define PCI_DMA_BUS_IS_PHYS (no_iommu ? 1 : 0) /* * x86-64 always supports DAC, but sometimes it is useful to force diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index 2cb483516459..dd8711ecaf2f 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h @@ -384,7 +384,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) } #define pte_index(address) \ - ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \ pte_index(address)) diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index 24e32611f0bf..c57ce4071342 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h @@ -81,6 +81,7 @@ static inline int hard_smp_processor_id(void) extern int safe_smp_processor_id(void); extern int __cpu_disable(void); extern void __cpu_die(unsigned int cpu); +extern void prefill_possible_map(void); #endif /* !ASSEMBLY */ diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h index 36293061f4ed..7cbfd10ecc3c 100644 --- a/include/asm-x86_64/swiotlb.h +++ b/include/asm-x86_64/swiotlb.h @@ -27,7 +27,7 @@ extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction); extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); extern void *swiotlb_alloc_coherent (struct device *hwdev, size_t size, - dma_addr_t *dma_handle, int flags); + dma_addr_t *dma_handle, gfp_t flags); extern void swiotlb_free_coherent (struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle); diff --git a/include/asm-xtensa/atomic.h b/include/asm-xtensa/atomic.h index 24f86f0e43cf..12b5732dc6e5 100644 --- a/include/asm-xtensa/atomic.h +++ b/include/asm-xtensa/atomic.h @@ -22,7 +22,7 @@ typedef struct { volatile int counter; } atomic_t; #include #include -#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) +#define ATOMIC_INIT(i) { (i) } /* * This Xtensa implementation assumes that the right mechanism diff --git a/include/asm-xtensa/bitops.h b/include/asm-xtensa/bitops.h index d395ef226c32..e76ee889e21d 100644 --- a/include/asm-xtensa/bitops.h +++ b/include/asm-xtensa/bitops.h @@ -174,7 +174,7 @@ static __inline__ int test_bit(int nr, const volatile void *addr) return 1UL & (((const volatile unsigned int *)addr)[nr>>5] >> (nr&31)); } -#if XCHAL_HAVE_NSAU +#if XCHAL_HAVE_NSA static __inline__ int __cntlz (unsigned long x) { diff --git a/include/asm-xtensa/dma-mapping.h b/include/asm-xtensa/dma-mapping.h index e86a206f1209..c425f10d086a 100644 --- a/include/asm-xtensa/dma-mapping.h +++ b/include/asm-xtensa/dma-mapping.h @@ -28,7 +28,7 @@ extern void consistent_sync(void*, size_t, int); #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, int flag); + dma_addr_t *dma_handle, gfp_t flag); void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); diff --git a/include/asm-xtensa/hardirq.h b/include/asm-xtensa/hardirq.h index e07c76c36b95..aa9c1adf68d7 100644 --- a/include/asm-xtensa/hardirq.h +++ b/include/asm-xtensa/hardirq.h @@ -23,6 +23,7 @@ typedef struct { unsigned int __nmi_count; /* arch dependent */ } ____cacheline_aligned irq_cpustat_t; +void ack_bad_irq(unsigned int irq); #include /* Standard mappings for irq_cpustat_t above */ #endif /* _XTENSA_HARDIRQ_H */ diff --git a/include/asm-xtensa/semaphore.h b/include/asm-xtensa/semaphore.h index db740b8bc6f0..09e89ab3eb61 100644 --- a/include/asm-xtensa/semaphore.h +++ b/include/asm-xtensa/semaphore.h @@ -20,28 +20,19 @@ struct semaphore { atomic_t count; int sleepers; wait_queue_head_t wait; -#if WAITQUEUE_DEBUG - long __magic; -#endif }; -#if WAITQUEUE_DEBUG -# define __SEM_DEBUG_INIT(name) \ - , (int)&(name).__magic -#else -# define __SEM_DEBUG_INIT(name) -#endif +#define __SEMAPHORE_INITIALIZER(name,n) \ +{ \ + .count = ATOMIC_INIT(n), \ + .sleepers = 0, \ + .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ +} -#define __SEMAPHORE_INITIALIZER(name,count) \ - { ATOMIC_INIT(count), \ - 0, \ - __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ - __SEM_DEBUG_INIT(name) } - -#define __MUTEX_INITIALIZER(name) \ +#define __MUTEX_INITIALIZER(name) \ __SEMAPHORE_INITIALIZER(name, 1) -#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ +#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) @@ -49,17 +40,8 @@ struct semaphore { static inline void sema_init (struct semaphore *sem, int val) { -/* - * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); - * - * i'd rather use the more flexible initialization above, but sadly - * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well. - */ atomic_set(&sem->count, val); init_waitqueue_head(&sem->wait); -#if WAITQUEUE_DEBUG - sem->__magic = (int)&sem->__magic; -#endif } static inline void init_MUTEX (struct semaphore *sem) @@ -81,9 +63,7 @@ extern spinlock_t semaphore_wake_lock; static inline void down(struct semaphore * sem) { -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); -#endif + might_sleep(); if (atomic_sub_return(1, &sem->count) < 0) __down(sem); @@ -92,9 +72,8 @@ static inline void down(struct semaphore * sem) static inline int down_interruptible(struct semaphore * sem) { int ret = 0; -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); -#endif + + might_sleep(); if (atomic_sub_return(1, &sem->count) < 0) ret = __down_interruptible(sem); @@ -104,9 +83,6 @@ static inline int down_interruptible(struct semaphore * sem) static inline int down_trylock(struct semaphore * sem) { int ret = 0; -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); -#endif if (atomic_sub_return(1, &sem->count) < 0) ret = __down_trylock(sem); @@ -119,9 +95,6 @@ static inline int down_trylock(struct semaphore * sem) */ static inline void up(struct semaphore * sem) { -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); -#endif if (atomic_add_return(1, &sem->count) <= 0) __up(sem); } diff --git a/include/asm-xtensa/system.h b/include/asm-xtensa/system.h index f09393232e5e..9284867f1cb9 100644 --- a/include/asm-xtensa/system.h +++ b/include/asm-xtensa/system.h @@ -189,20 +189,6 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) #define tas(ptr) (xchg((ptr),1)) -#if ( __XCC__ == 1 ) - -/* xt-xcc processes __inline__ differently than xt-gcc and decides to - * insert an out-of-line copy of function __xchg. This presents the - * unresolved symbol at link time of __xchg_called_with_bad_pointer, - * even though such a function would never be called at run-time. - * xt-gcc always inlines __xchg, and optimizes away the undefined - * bad_pointer function. - */ - -#define xchg(ptr,x) xchg_u32(ptr,x) - -#else /* assume xt-gcc */ - #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) /* @@ -224,8 +210,6 @@ __xchg(unsigned long x, volatile void * ptr, int size) return x; } -#endif - extern void set_except_vector(int n, void *addr); static inline void spill_registers(void) diff --git a/include/linux/acct.h b/include/linux/acct.h index 1993a3691768..19f70462b3be 100644 --- a/include/linux/acct.h +++ b/include/linux/acct.h @@ -162,13 +162,13 @@ typedef struct acct acct_t; #ifdef __KERNEL__ /* * Yet another set of HZ to *HZ helper functions. - * See for the original. + * See for the original. */ static inline u32 jiffies_to_AHZ(unsigned long x) { #if (TICK_NSEC % (NSEC_PER_SEC / AHZ)) == 0 - return x / (HZ / USER_HZ); + return x / (HZ / AHZ); #else u64 tmp = (u64)x * TICK_NSEC; do_div(tmp, (NSEC_PER_SEC / AHZ)); diff --git a/include/linux/aio.h b/include/linux/aio.h index a4d5af907f90..0decf66117c1 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h @@ -24,7 +24,12 @@ struct kioctx; #define KIOCB_SYNC_KEY (~0U) /* ki_flags bits */ -#define KIF_LOCKED 0 +/* + * This may be used for cancel/retry serialization in the future, but + * for now it's unused and we probably don't want modules to even + * think they can use it. + */ +/* #define KIF_LOCKED 0 */ #define KIF_KICKED 1 #define KIF_CANCELLED 2 @@ -43,6 +48,40 @@ struct kioctx; #define kiocbIsKicked(iocb) test_bit(KIF_KICKED, &(iocb)->ki_flags) #define kiocbIsCancelled(iocb) test_bit(KIF_CANCELLED, &(iocb)->ki_flags) +/* is there a better place to document function pointer methods? */ +/** + * ki_retry - iocb forward progress callback + * @kiocb: The kiocb struct to advance by performing an operation. + * + * This callback is called when the AIO core wants a given AIO operation + * to make forward progress. The kiocb argument describes the operation + * that is to be performed. As the operation proceeds, perhaps partially, + * ki_retry is expected to update the kiocb with progress made. Typically + * ki_retry is set in the AIO core and it itself calls file_operations + * helpers. + * + * ki_retry's return value determines when the AIO operation is completed + * and an event is generated in the AIO event ring. Except the special + * return values described below, the value that is returned from ki_retry + * is transferred directly into the completion ring as the operation's + * resulting status. Once this has happened ki_retry *MUST NOT* reference + * the kiocb pointer again. + * + * If ki_retry returns -EIOCBQUEUED it has made a promise that aio_complete() + * will be called on the kiocb pointer in the future. The AIO core will + * not ask the method again -- ki_retry must ensure forward progress. + * aio_complete() must be called once and only once in the future, multiple + * calls may result in undefined behaviour. + * + * If ki_retry returns -EIOCBRETRY it has made a promise that kick_iocb() + * will be called on the kiocb pointer in the future. This may happen + * through generic helpers that associate kiocb->ki_wait with a wait + * queue head that ki_retry uses via current->io_wait. It can also happen + * with custom tracking and manual calls to kick_iocb(), though that is + * discouraged. In either case, kick_iocb() must be called once and only + * once. ki_retry must ensure forward progress, the AIO core will wait + * indefinitely for kick_iocb() to be called. + */ struct kiocb { struct list_head ki_run_list; long ki_flags; diff --git a/include/linux/ata.h b/include/linux/ata.h index a5b74efab067..d2873b732bb1 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -42,13 +42,18 @@ enum { ATA_SECT_SIZE = 512, ATA_ID_WORDS = 256, - ATA_ID_PROD_OFS = 27, - ATA_ID_FW_REV_OFS = 23, ATA_ID_SERNO_OFS = 10, - ATA_ID_MAJOR_VER = 80, - ATA_ID_PIO_MODES = 64, + ATA_ID_FW_REV_OFS = 23, + ATA_ID_PROD_OFS = 27, + ATA_ID_OLD_PIO_MODES = 51, + ATA_ID_FIELD_VALID = 53, ATA_ID_MWDMA_MODES = 63, + ATA_ID_PIO_MODES = 64, + ATA_ID_EIDE_DMA_MIN = 65, + ATA_ID_EIDE_PIO = 67, + ATA_ID_EIDE_PIO_IORDY = 68, ATA_ID_UDMA_MODES = 88, + ATA_ID_MAJOR_VER = 80, ATA_ID_PIO4 = (1 << 1), ATA_PCI_CTL_OFS = 2, @@ -128,10 +133,15 @@ enum { ATA_CMD_PIO_READ_EXT = 0x24, ATA_CMD_PIO_WRITE = 0x30, ATA_CMD_PIO_WRITE_EXT = 0x34, + ATA_CMD_READ_MULTI = 0xC4, + ATA_CMD_READ_MULTI_EXT = 0x29, + ATA_CMD_WRITE_MULTI = 0xC5, + ATA_CMD_WRITE_MULTI_EXT = 0x39, ATA_CMD_SET_FEATURES = 0xEF, ATA_CMD_PACKET = 0xA0, ATA_CMD_VERIFY = 0x40, ATA_CMD_VERIFY_EXT = 0x42, + ATA_CMD_INIT_DEV_PARAMS = 0x91, /* SETFEATURES stuff */ SETFEATURES_XFER = 0x03, @@ -146,14 +156,14 @@ enum { XFER_MW_DMA_2 = 0x22, XFER_MW_DMA_1 = 0x21, XFER_MW_DMA_0 = 0x20, + XFER_SW_DMA_2 = 0x12, + XFER_SW_DMA_1 = 0x11, + XFER_SW_DMA_0 = 0x10, XFER_PIO_4 = 0x0C, XFER_PIO_3 = 0x0B, XFER_PIO_2 = 0x0A, XFER_PIO_1 = 0x09, XFER_PIO_0 = 0x08, - XFER_SW_DMA_2 = 0x12, - XFER_SW_DMA_1 = 0x11, - XFER_SW_DMA_0 = 0x10, XFER_PIO_SLOW = 0x00, /* ATAPI stuff */ @@ -181,6 +191,7 @@ enum { ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */ ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */ ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ + ATA_TFLAG_LBA = (1 << 4), /* enable LBA */ }; enum ata_tf_protocols { @@ -250,7 +261,19 @@ struct ata_taskfile { ((u64) (id)[(n) + 1] << 16) | \ ((u64) (id)[(n) + 0]) ) -static inline int atapi_cdb_len(u16 *dev_id) +static inline int ata_id_current_chs_valid(const u16 *id) +{ + /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command + has not been issued to the device then the values of + id[54] to id[56] are vendor specific. */ + return (id[53] & 0x01) && /* Current translation valid */ + id[54] && /* cylinders in current translation */ + id[55] && /* heads in current translation */ + id[55] <= 16 && + id[56]; /* sectors in current translation */ +} + +static inline int atapi_cdb_len(const u16 *dev_id) { u16 tmp = dev_id[0] & 0x3; switch (tmp) { @@ -260,7 +283,7 @@ static inline int atapi_cdb_len(u16 *dev_id) } } -static inline int is_atapi_taskfile(struct ata_taskfile *tf) +static inline int is_atapi_taskfile(const struct ata_taskfile *tf) { return (tf->protocol == ATA_PROT_ATAPI) || (tf->protocol == ATA_PROT_ATAPI_NODATA) || diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 9f374cfa1b05..e7d0593bb576 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -76,6 +76,13 @@ struct atm_dev_stats { /* set interface ESI */ #define ATM_SETESIF _IOW('a',ATMIOC_ITF+13,struct atmif_sioc) /* force interface ESI */ +#define ATM_ADDLECSADDR _IOW('a', ATMIOC_ITF+14, struct atmif_sioc) + /* register a LECS address */ +#define ATM_DELLECSADDR _IOW('a', ATMIOC_ITF+15, struct atmif_sioc) + /* unregister a LECS address */ +#define ATM_GETLECSADDR _IOW('a', ATMIOC_ITF+16, struct atmif_sioc) + /* retrieve LECS address(es) */ + #define ATM_GETSTAT _IOW('a',ATMIOC_SARCOM+0,struct atmif_sioc) /* get AAL layer statistics */ #define ATM_GETSTATZ _IOW('a',ATMIOC_SARCOM+1,struct atmif_sioc) @@ -328,6 +335,8 @@ struct atm_dev_addr { struct list_head entry; /* next address */ }; +enum atm_addr_type_t { ATM_ADDR_LOCAL, ATM_ADDR_LECS }; + struct atm_dev { const struct atmdev_ops *ops; /* device operations; NULL if unused */ const struct atmphy_ops *phy; /* PHY operations, may be undefined */ @@ -338,6 +347,7 @@ struct atm_dev { void *phy_data; /* private PHY date */ unsigned long flags; /* device flags (ATM_DF_*) */ struct list_head local; /* local ATM addresses */ + struct list_head lecs; /* LECS ATM addresses learned via ILMI */ unsigned char esi[ESI_LEN]; /* ESI ("MAC" addr) */ struct atm_cirange ci_range; /* VPI/VCI range */ struct k_atm_dev_stats stats; /* statistics */ @@ -457,7 +467,7 @@ static inline void atm_dev_put(struct atm_dev *dev) int atm_charge(struct atm_vcc *vcc,int truesize); struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, - int gfp_flags); + gfp_t gfp_flags); int atm_pcr_goal(struct atm_trafprm *tp); void vcc_release_async(struct atm_vcc *vcc, int reply); diff --git a/include/linux/audit.h b/include/linux/audit.h index b2a2509bd7ea..da3c01955f3d 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -260,11 +260,11 @@ extern int audit_filter_user(struct netlink_skb_parms *cb, int type); #ifdef CONFIG_AUDIT /* These are defined in audit.c */ /* Public API */ -extern void audit_log(struct audit_context *ctx, int gfp_mask, +extern void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, const char *fmt, ...) __attribute__((format(printf,4,5))); -extern struct audit_buffer *audit_log_start(struct audit_context *ctx, int gfp_mask, int type); +extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type); extern void audit_log_format(struct audit_buffer *ab, const char *fmt, ...) __attribute__((format(printf,2,3))); diff --git a/include/linux/bfs_fs.h b/include/linux/bfs_fs.h index c1237aa92e38..8ed6dfdcd783 100644 --- a/include/linux/bfs_fs.h +++ b/include/linux/bfs_fs.h @@ -20,19 +20,19 @@ /* BFS inode layout on disk */ struct bfs_inode { - __u16 i_ino; + __le16 i_ino; __u16 i_unused; - __u32 i_sblock; - __u32 i_eblock; - __u32 i_eoffset; - __u32 i_vtype; - __u32 i_mode; - __s32 i_uid; - __s32 i_gid; - __u32 i_nlink; - __u32 i_atime; - __u32 i_mtime; - __u32 i_ctime; + __le32 i_sblock; + __le32 i_eblock; + __le32 i_eoffset; + __le32 i_vtype; + __le32 i_mode; + __le32 i_uid; + __le32 i_gid; + __le32 i_nlink; + __le32 i_atime; + __le32 i_mtime; + __le32 i_ctime; __u32 i_padding[4]; }; @@ -41,17 +41,17 @@ struct bfs_inode { #define BFS_DIRS_PER_BLOCK 32 struct bfs_dirent { - __u16 ino; + __le16 ino; char name[BFS_NAMELEN]; }; /* BFS superblock layout on disk */ struct bfs_super_block { - __u32 s_magic; - __u32 s_start; - __u32 s_end; - __s32 s_from; - __s32 s_to; + __le32 s_magic; + __le32 s_start; + __le32 s_end; + __le32 s_from; + __le32 s_to; __s32 s_bfrom; __s32 s_bto; char s_fsname[6]; @@ -66,15 +66,15 @@ struct bfs_super_block { #define BFS_INO2OFF(ino) \ ((__u32)(((ino) - BFS_ROOT_INO) * sizeof(struct bfs_inode)) + BFS_BSIZE) #define BFS_NZFILESIZE(ip) \ - ((cpu_to_le32((ip)->i_eoffset) + 1) - cpu_to_le32((ip)->i_sblock) * BFS_BSIZE) + ((le32_to_cpu((ip)->i_eoffset) + 1) - le32_to_cpu((ip)->i_sblock) * BFS_BSIZE) #define BFS_FILESIZE(ip) \ ((ip)->i_sblock == 0 ? 0 : BFS_NZFILESIZE(ip)) #define BFS_FILEBLOCKS(ip) \ - ((ip)->i_sblock == 0 ? 0 : (cpu_to_le32((ip)->i_eblock) + 1) - cpu_to_le32((ip)->i_sblock)) + ((ip)->i_sblock == 0 ? 0 : (le32_to_cpu((ip)->i_eblock) + 1) - le32_to_cpu((ip)->i_sblock)) #define BFS_UNCLEAN(bfs_sb, sb) \ - ((cpu_to_le32(bfs_sb->s_from) != -1) && (cpu_to_le32(bfs_sb->s_to) != -1) && !(sb->s_flags & MS_RDONLY)) + ((le32_to_cpu(bfs_sb->s_from) != -1) && (le32_to_cpu(bfs_sb->s_to) != -1) && !(sb->s_flags & MS_RDONLY)) #endif /* _LINUX_BFS_FS_H */ diff --git a/include/linux/bio.h b/include/linux/bio.h index 6e1c79c8b6bf..685fd3720df5 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -276,8 +276,8 @@ extern void bio_pair_release(struct bio_pair *dbio); extern struct bio_set *bioset_create(int, int, int); extern void bioset_free(struct bio_set *); -extern struct bio *bio_alloc(unsigned int __nocast, int); -extern struct bio *bio_alloc_bioset(unsigned int __nocast, int, struct bio_set *); +extern struct bio *bio_alloc(gfp_t, int); +extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); extern void bio_put(struct bio *); extern void bio_free(struct bio *, struct bio_set *); @@ -287,7 +287,7 @@ extern int bio_phys_segments(struct request_queue *, struct bio *); extern int bio_hw_segments(struct request_queue *, struct bio *); extern void __bio_clone(struct bio *, struct bio *); -extern struct bio *bio_clone(struct bio *, unsigned int __nocast); +extern struct bio *bio_clone(struct bio *, gfp_t); extern void bio_init(struct bio *); @@ -301,7 +301,7 @@ extern struct bio *bio_map_user_iov(struct request_queue *, struct sg_iovec *, int, int); extern void bio_unmap_user(struct bio *); extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, - unsigned int); + gfp_t); extern void bio_set_pages_dirty(struct bio *bio); extern void bio_check_pages_dirty(struct bio *bio); extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index efdc9b5bc05c..025a7f084dbd 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -96,8 +96,8 @@ struct io_context { void put_io_context(struct io_context *ioc); void exit_io_context(void); -struct io_context *current_io_context(int gfp_flags); -struct io_context *get_io_context(int gfp_flags); +struct io_context *current_io_context(gfp_t gfp_flags); +struct io_context *get_io_context(gfp_t gfp_flags); void copy_io_context(struct io_context **pdst, struct io_context **psrc); void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); @@ -107,9 +107,9 @@ typedef void (rq_end_io_fn)(struct request *); struct request_list { int count[2]; int starved[2]; + int elvpriv; mempool_t *rq_pool; wait_queue_head_t wait[2]; - wait_queue_head_t drain; }; #define BLK_MAX_CDB 16 @@ -203,6 +203,7 @@ struct request { enum rq_flag_bits { __REQ_RW, /* not set, read. set, write */ __REQ_FAILFAST, /* no low level driver retries */ + __REQ_SORTED, /* elevator knows about this request */ __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ __REQ_HARDBARRIER, /* may not be passed by drive either */ __REQ_CMD, /* is a regular fs rw request */ @@ -210,6 +211,7 @@ enum rq_flag_bits { __REQ_STARTED, /* drive already may have started this one */ __REQ_DONTPREP, /* don't call prep for this one */ __REQ_QUEUED, /* uses queueing */ + __REQ_ELVPRIV, /* elevator private data attached */ /* * for ATA/ATAPI devices */ @@ -235,6 +237,7 @@ enum rq_flag_bits { #define REQ_RW (1 << __REQ_RW) #define REQ_FAILFAST (1 << __REQ_FAILFAST) +#define REQ_SORTED (1 << __REQ_SORTED) #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) #define REQ_CMD (1 << __REQ_CMD) @@ -242,6 +245,7 @@ enum rq_flag_bits { #define REQ_STARTED (1 << __REQ_STARTED) #define REQ_DONTPREP (1 << __REQ_DONTPREP) #define REQ_QUEUED (1 << __REQ_QUEUED) +#define REQ_ELVPRIV (1 << __REQ_ELVPRIV) #define REQ_PC (1 << __REQ_PC) #define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC) #define REQ_SENSE (1 << __REQ_SENSE) @@ -332,6 +336,12 @@ struct request_queue prepare_flush_fn *prepare_flush_fn; end_flush_fn *end_flush_fn; + /* + * Dispatch queue sorting + */ + sector_t end_sector; + struct request *boundary_rq; + /* * Auto-unplugging state */ @@ -354,7 +364,7 @@ struct request_queue * queue needs bounce pages for pages above this limit */ unsigned long bounce_pfn; - unsigned int bounce_gfp; + gfp_t bounce_gfp; /* * various queue flags, see QUEUE_* below @@ -405,8 +415,6 @@ struct request_queue unsigned int sg_reserved_size; int node; - struct list_head drain_list; - /* * reserved for flush operations */ @@ -434,7 +442,7 @@ enum { #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ -#define QUEUE_FLAG_DRAIN 8 /* draining queue for sched switch */ +#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ #define QUEUE_FLAG_FLUSH 9 /* doing barrier flush sequence */ #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) @@ -454,6 +462,7 @@ enum { #define blk_pm_request(rq) \ ((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME)) +#define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED) #define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) #define blk_barrier_preflush(rq) ((rq)->flags & REQ_BAR_PREFLUSH) #define blk_barrier_postflush(rq) ((rq)->flags & REQ_BAR_POSTFLUSH) @@ -550,7 +559,7 @@ extern void generic_make_request(struct bio *bio); extern void blk_put_request(struct request *); extern void blk_end_sync_rq(struct request *rq); extern void blk_attempt_remerge(request_queue_t *, struct request *); -extern struct request *blk_get_request(request_queue_t *, int, int); +extern struct request *blk_get_request(request_queue_t *, int, gfp_t); extern void blk_insert_request(request_queue_t *, struct request *, int, void *); extern void blk_requeue_request(request_queue_t *, struct request *); extern void blk_plug_device(request_queue_t *); @@ -565,7 +574,7 @@ extern void blk_run_queue(request_queue_t *); extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *); extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int); extern int blk_rq_unmap_user(struct bio *, unsigned int); -extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, unsigned int); +extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t); extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int); extern int blk_execute_rq(request_queue_t *, struct gendisk *, struct request *, int); @@ -611,12 +620,21 @@ extern void end_request(struct request *req, int uptodate); static inline void blkdev_dequeue_request(struct request *req) { - BUG_ON(list_empty(&req->queuelist)); + elv_dequeue_request(req->q, req); +} - list_del_init(&req->queuelist); +/* + * This should be in elevator.h, but that requires pulling in rq and q + */ +static inline void elv_dispatch_add_tail(struct request_queue *q, + struct request *rq) +{ + if (q->last_merge == rq) + q->last_merge = NULL; - if (req->rl) - elv_remove_request(req->q, req); + q->end_sector = rq_end_sector(rq); + q->boundary_rq = rq; + list_add_tail(&rq->queuelist, &q->queue_head); } /* @@ -650,12 +668,10 @@ extern void blk_dump_rq_flags(struct request *, char *); extern void generic_unplug_device(request_queue_t *); extern void __generic_unplug_device(request_queue_t *); extern long nr_blockdev_pages(void); -extern void blk_wait_queue_drained(request_queue_t *, int); -extern void blk_finish_queue_drain(request_queue_t *); int blk_get_queue(request_queue_t *); -request_queue_t *blk_alloc_queue(int gfp_mask); -request_queue_t *blk_alloc_queue_node(int,int); +request_queue_t *blk_alloc_queue(gfp_t); +request_queue_t *blk_alloc_queue_node(gfp_t, int); #define blk_put_queue(q) blk_cleanup_queue((q)) /* diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 82bd8842d11c..3b03b0b868dd 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -43,7 +43,7 @@ typedef struct bootmem_data { extern unsigned long __init bootmem_bootmap_pages (unsigned long); extern unsigned long __init init_bootmem (unsigned long addr, unsigned long memend); extern void __init free_bootmem (unsigned long addr, unsigned long size); -extern void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal); +extern void * __init __alloc_bootmem_limit (unsigned long size, unsigned long align, unsigned long goal, unsigned long limit); #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE extern void __init reserve_bootmem (unsigned long addr, unsigned long size); #define alloc_bootmem(x) \ @@ -54,6 +54,16 @@ extern void __init reserve_bootmem (unsigned long addr, unsigned long size); __alloc_bootmem((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low_pages(x) \ __alloc_bootmem((x), PAGE_SIZE, 0) + +#define alloc_bootmem_limit(x, limit) \ + __alloc_bootmem_limit((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS), (limit)) +#define alloc_bootmem_low_limit(x, limit) \ + __alloc_bootmem_limit((x), SMP_CACHE_BYTES, 0, (limit)) +#define alloc_bootmem_pages_limit(x, limit) \ + __alloc_bootmem_limit((x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS), (limit)) +#define alloc_bootmem_low_pages_limit(x, limit) \ + __alloc_bootmem_limit((x), PAGE_SIZE, 0, (limit)) + #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ extern unsigned long __init free_all_bootmem (void); @@ -61,7 +71,7 @@ extern unsigned long __init init_bootmem_node (pg_data_t *pgdat, unsigned long f extern void __init reserve_bootmem_node (pg_data_t *pgdat, unsigned long physaddr, unsigned long size); extern void __init free_bootmem_node (pg_data_t *pgdat, unsigned long addr, unsigned long size); extern unsigned long __init free_all_bootmem_node (pg_data_t *pgdat); -extern void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal); +extern void * __init __alloc_bootmem_node_limit (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal, unsigned long limit); #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE #define alloc_bootmem_node(pgdat, x) \ __alloc_bootmem_node((pgdat), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) @@ -69,6 +79,14 @@ extern void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, __alloc_bootmem_node((pgdat), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_low_pages_node(pgdat, x) \ __alloc_bootmem_node((pgdat), (x), PAGE_SIZE, 0) + +#define alloc_bootmem_node_limit(pgdat, x, limit) \ + __alloc_bootmem_node_limit((pgdat), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS), (limit)) +#define alloc_bootmem_pages_node_limit(pgdat, x, limit) \ + __alloc_bootmem_node_limit((pgdat), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS), (limit)) +#define alloc_bootmem_low_pages_node_limit(pgdat, x, limit) \ + __alloc_bootmem_node_limit((pgdat), (x), PAGE_SIZE, 0, (limit)) + #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ #ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP @@ -105,5 +123,15 @@ extern void *__init alloc_large_system_hash(const char *tablename, #endif extern int __initdata hashdist; /* Distribute hashes across NUMA nodes? */ +static inline void *__alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal) +{ + return __alloc_bootmem_limit(size, align, goal, 0); +} + +static inline void *__alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, + unsigned long goal) +{ + return __alloc_bootmem_node_limit(pgdat, size, align, goal, 0); +} #endif /* _LINUX_BOOTMEM_H */ diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 90828493791f..88af42f5e04a 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -172,7 +172,7 @@ void __brelse(struct buffer_head *); void __bforget(struct buffer_head *); void __breadahead(struct block_device *, sector_t block, int size); struct buffer_head *__bread(struct block_device *, sector_t block, int size); -struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags); +struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); void free_buffer_head(struct buffer_head * bh); void FASTCALL(unlock_buffer(struct buffer_head *bh)); void FASTCALL(__lock_buffer(struct buffer_head *bh)); @@ -188,7 +188,7 @@ extern int buffer_heads_over_limit; * Generic address_space_operations implementations for buffer_head-backed * address_spaces. */ -int try_to_release_page(struct page * page, int gfp_mask); +int try_to_release_page(struct page * page, gfp_t gfp_mask); int block_invalidatepage(struct page *page, unsigned long offset); int block_write_full_page(struct page *page, get_block_t *get_block, struct writeback_control *wbc); diff --git a/include/linux/connector.h b/include/linux/connector.h index 96de26301f84..95952cc1f525 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h @@ -104,12 +104,19 @@ struct cn_queue_dev { struct sock *nls; }; -struct cn_callback { +struct cn_callback_id { unsigned char name[CN_CBQ_NAMELEN]; - struct cb_id id; +}; + +struct cn_callback_data { + void (*destruct_data) (void *); + void *ddata; + + void *callback_priv; void (*callback) (void *); - void *priv; + + void *free; }; struct cn_callback_entry { @@ -118,8 +125,8 @@ struct cn_callback_entry { struct work_struct work; struct cn_queue_dev *pdev; - void (*destruct_data) (void *); - void *ddata; + struct cn_callback_id id; + struct cn_callback_data data; int seq, group; struct sock *nls; @@ -142,9 +149,9 @@ struct cn_dev { int cn_add_callback(struct cb_id *, char *, void (*callback) (void *)); void cn_del_callback(struct cb_id *); -int cn_netlink_send(struct cn_msg *, u32, int); +int cn_netlink_send(struct cn_msg *, u32, gfp_t); -int cn_queue_add_callback(struct cn_queue_dev *dev, struct cn_callback *cb); +int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *)); void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *); @@ -152,6 +159,8 @@ void cn_queue_free_dev(struct cn_queue_dev *dev); int cn_cb_equal(struct cb_id *, struct cb_id *); +void cn_queue_wrapper(void *data); + extern int cn_already_initialized; #endif /* __KERNEL__ */ diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index b15826f6e3a2..9bdba8169b41 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -392,4 +392,14 @@ extern cpumask_t cpu_present_map; #define for_each_online_cpu(cpu) for_each_cpu_mask((cpu), cpu_online_map) #define for_each_present_cpu(cpu) for_each_cpu_mask((cpu), cpu_present_map) +/* Find the highest possible smp_processor_id() */ +#define highest_possible_processor_id() \ +({ \ + unsigned int cpu, highest = 0; \ + for_each_cpu_mask(cpu, cpu_possible_map) \ + highest = cpu; \ + highest; \ +}) + + #endif /* __LINUX_CPUMASK_H */ diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 24062a1dbf61..6e2deef96b34 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -23,7 +23,7 @@ void cpuset_init_current_mems_allowed(void); void cpuset_update_current_mems_allowed(void); void cpuset_restrict_to_mems_allowed(unsigned long *nodes); int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl); -extern int cpuset_zone_allowed(struct zone *z, unsigned int __nocast gfp_mask); +extern int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask); extern int cpuset_excl_nodes_overlap(const struct task_struct *p); extern struct file_operations proc_cpuset_operations; extern char *cpuset_task_status_allowed(struct task_struct *task, char *buffer); @@ -49,8 +49,7 @@ static inline int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) return 1; } -static inline int cpuset_zone_allowed(struct zone *z, - unsigned int __nocast gfp_mask) +static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { return 1; } diff --git a/include/linux/cyclomx.h b/include/linux/cyclomx.h index 04fa7dff079c..300d704bdb9a 100644 --- a/include/linux/cyclomx.h +++ b/include/linux/cyclomx.h @@ -37,8 +37,6 @@ #include #endif -#define is_digit(ch) (((ch)>=(unsigned)'0'&&(ch)<=(unsigned)'9')?1:0) - /* Adapter Data Space. * This structure is needed because we handle multiple cards, otherwise * static data would do it. diff --git a/include/linux/cycx_drv.h b/include/linux/cycx_drv.h index 6621df86a748..12fe6b0bfcff 100644 --- a/include/linux/cycx_drv.h +++ b/include/linux/cycx_drv.h @@ -60,6 +60,5 @@ extern int cycx_peek(struct cycx_hw *hw, u32 addr, void *buf, u32 len); extern int cycx_poke(struct cycx_hw *hw, u32 addr, void *buf, u32 len); extern int cycx_exec(void __iomem *addr); -extern void cycx_inten(struct cycx_hw *hw); extern void cycx_intr(struct cycx_hw *hw); #endif /* _CYCX_DRV_H */ diff --git a/include/linux/device.h b/include/linux/device.h index 06e5d42f2c7b..95d607a48f06 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -317,6 +317,11 @@ dev_set_drvdata (struct device *dev, void *data) dev->driver_data = data; } +static inline int device_is_registered(struct device *dev) +{ + return klist_node_attached(&dev->knode_bus); +} + /* * High level routines for use by the bus drivers */ diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h index 4932ee5c77f0..76f12f46db7f 100644 --- a/include/linux/dmapool.h +++ b/include/linux/dmapool.h @@ -19,7 +19,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, void dma_pool_destroy(struct dma_pool *pool); -void *dma_pool_alloc(struct dma_pool *pool, unsigned int __nocast mem_flags, +void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle); void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); diff --git a/include/linux/elevator.h b/include/linux/elevator.h index ea6bbc2d7407..a74c27e460ba 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -8,18 +8,17 @@ typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struc typedef void (elevator_merged_fn) (request_queue_t *, struct request *); -typedef struct request *(elevator_next_req_fn) (request_queue_t *); +typedef int (elevator_dispatch_fn) (request_queue_t *, int); -typedef void (elevator_add_req_fn) (request_queue_t *, struct request *, int); +typedef void (elevator_add_req_fn) (request_queue_t *, struct request *); typedef int (elevator_queue_empty_fn) (request_queue_t *); -typedef void (elevator_remove_req_fn) (request_queue_t *, struct request *); -typedef void (elevator_requeue_req_fn) (request_queue_t *, struct request *); typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *); typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *); typedef int (elevator_may_queue_fn) (request_queue_t *, int, struct bio *); -typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, struct bio *, int); +typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, struct bio *, gfp_t); typedef void (elevator_put_req_fn) (request_queue_t *, struct request *); +typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *); typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *); typedef int (elevator_init_fn) (request_queue_t *, elevator_t *); @@ -31,10 +30,9 @@ struct elevator_ops elevator_merged_fn *elevator_merged_fn; elevator_merge_req_fn *elevator_merge_req_fn; - elevator_next_req_fn *elevator_next_req_fn; + elevator_dispatch_fn *elevator_dispatch_fn; elevator_add_req_fn *elevator_add_req_fn; - elevator_remove_req_fn *elevator_remove_req_fn; - elevator_requeue_req_fn *elevator_requeue_req_fn; + elevator_activate_req_fn *elevator_activate_req_fn; elevator_deactivate_req_fn *elevator_deactivate_req_fn; elevator_queue_empty_fn *elevator_queue_empty_fn; @@ -81,15 +79,15 @@ struct elevator_queue /* * block elevator interface */ +extern void elv_dispatch_sort(request_queue_t *, struct request *); extern void elv_add_request(request_queue_t *, struct request *, int, int); extern void __elv_add_request(request_queue_t *, struct request *, int, int); extern int elv_merge(request_queue_t *, struct request **, struct bio *); extern void elv_merge_requests(request_queue_t *, struct request *, struct request *); extern void elv_merged_request(request_queue_t *, struct request *); -extern void elv_remove_request(request_queue_t *, struct request *); +extern void elv_dequeue_request(request_queue_t *, struct request *); extern void elv_requeue_request(request_queue_t *, struct request *); -extern void elv_deactivate_request(request_queue_t *, struct request *); extern int elv_queue_empty(request_queue_t *); extern struct request *elv_next_request(struct request_queue *q); extern struct request *elv_former_request(request_queue_t *, struct request *); @@ -98,7 +96,7 @@ extern int elv_register_queue(request_queue_t *q); extern void elv_unregister_queue(request_queue_t *q); extern int elv_may_queue(request_queue_t *, int, struct bio *); extern void elv_completed_request(request_queue_t *, struct request *); -extern int elv_set_request(request_queue_t *, struct request *, struct bio *, int); +extern int elv_set_request(request_queue_t *, struct request *, struct bio *, gfp_t); extern void elv_put_request(request_queue_t *, struct request *); /* @@ -142,4 +140,6 @@ enum { ELV_MQUEUE_MUST, }; +#define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors) + #endif diff --git a/include/linux/fs.h b/include/linux/fs.h index e0b77c5af9a0..f83d997c5582 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -320,7 +320,7 @@ struct address_space_operations { /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ sector_t (*bmap)(struct address_space *, sector_t); int (*invalidatepage) (struct page *, unsigned long); - int (*releasepage) (struct page *, int); + int (*releasepage) (struct page *, gfp_t); ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, loff_t offset, unsigned long nr_segs); struct page* (*get_xip_page)(struct address_space *, sector_t, diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 01796c41c951..142e1c1e0689 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -119,7 +119,7 @@ struct gendisk { int policy; atomic_t sync_io; /* RAID */ - unsigned long stamp, stamp_idle; + unsigned long stamp; int in_flight; #ifdef CONFIG_SMP struct disk_stats *dkstats; diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 4dc990f3b5cc..c3779432a723 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -12,8 +12,8 @@ struct vm_area_struct; * GFP bitmasks.. */ /* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */ -#define __GFP_DMA 0x01u -#define __GFP_HIGHMEM 0x02u +#define __GFP_DMA ((__force gfp_t)0x01u) +#define __GFP_HIGHMEM ((__force gfp_t)0x02u) /* * Action modifiers - doesn't change the zoning @@ -26,24 +26,24 @@ struct vm_area_struct; * * __GFP_NORETRY: The VM implementation must not retry indefinitely. */ -#define __GFP_WAIT 0x10u /* Can wait and reschedule? */ -#define __GFP_HIGH 0x20u /* Should access emergency pools? */ -#define __GFP_IO 0x40u /* Can start physical IO? */ -#define __GFP_FS 0x80u /* Can call down to low-level FS? */ -#define __GFP_COLD 0x100u /* Cache-cold page required */ -#define __GFP_NOWARN 0x200u /* Suppress page allocation failure warning */ -#define __GFP_REPEAT 0x400u /* Retry the allocation. Might fail */ -#define __GFP_NOFAIL 0x800u /* Retry for ever. Cannot fail */ -#define __GFP_NORETRY 0x1000u /* Do not retry. Might fail */ -#define __GFP_NO_GROW 0x2000u /* Slab internal usage */ -#define __GFP_COMP 0x4000u /* Add compound page metadata */ -#define __GFP_ZERO 0x8000u /* Return zeroed page on success */ -#define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */ -#define __GFP_NORECLAIM 0x20000u /* No realy zone reclaim during allocation */ -#define __GFP_HARDWALL 0x40000u /* Enforce hardwall cpuset memory allocs */ +#define __GFP_WAIT ((__force gfp_t)0x10u) /* Can wait and reschedule? */ +#define __GFP_HIGH ((__force gfp_t)0x20u) /* Should access emergency pools? */ +#define __GFP_IO ((__force gfp_t)0x40u) /* Can start physical IO? */ +#define __GFP_FS ((__force gfp_t)0x80u) /* Can call down to low-level FS? */ +#define __GFP_COLD ((__force gfp_t)0x100u) /* Cache-cold page required */ +#define __GFP_NOWARN ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */ +#define __GFP_REPEAT ((__force gfp_t)0x400u) /* Retry the allocation. Might fail */ +#define __GFP_NOFAIL ((__force gfp_t)0x800u) /* Retry for ever. Cannot fail */ +#define __GFP_NORETRY ((__force gfp_t)0x1000u)/* Do not retry. Might fail */ +#define __GFP_NO_GROW ((__force gfp_t)0x2000u)/* Slab internal usage */ +#define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */ +#define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ +#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ +#define __GFP_NORECLAIM ((__force gfp_t)0x20000u) /* No realy zone reclaim during allocation */ +#define __GFP_HARDWALL ((__force gfp_t)0x40000u) /* Enforce hardwall cpuset memory allocs */ #define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ -#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1) +#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /* if you forget to add the bitmask here kernel will crash, period */ #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ @@ -64,6 +64,7 @@ struct vm_area_struct; #define GFP_DMA __GFP_DMA +#define gfp_zone(mask) ((__force int)((mask) & (__force gfp_t)GFP_ZONEMASK)) /* * There is only one page-allocator function, and two main namespaces to @@ -85,30 +86,30 @@ static inline void arch_free_page(struct page *page, int order) { } #endif extern struct page * -FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *)); +FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *)); -static inline struct page *alloc_pages_node(int nid, unsigned int __nocast gfp_mask, +static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) { if (unlikely(order >= MAX_ORDER)) return NULL; return __alloc_pages(gfp_mask, order, - NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK)); + NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_mask)); } #ifdef CONFIG_NUMA -extern struct page *alloc_pages_current(unsigned int __nocast gfp_mask, unsigned order); +extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order); static inline struct page * -alloc_pages(unsigned int __nocast gfp_mask, unsigned int order) +alloc_pages(gfp_t gfp_mask, unsigned int order) { if (unlikely(order >= MAX_ORDER)) return NULL; return alloc_pages_current(gfp_mask, order); } -extern struct page *alloc_page_vma(unsigned __nocast gfp_mask, +extern struct page *alloc_page_vma(gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr); #else #define alloc_pages(gfp_mask, order) \ @@ -117,8 +118,8 @@ extern struct page *alloc_page_vma(unsigned __nocast gfp_mask, #endif #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) -extern unsigned long FASTCALL(__get_free_pages(unsigned int __nocast gfp_mask, unsigned int order)); -extern unsigned long FASTCALL(get_zeroed_page(unsigned int __nocast gfp_mask)); +extern unsigned long FASTCALL(__get_free_pages(gfp_t gfp_mask, unsigned int order)); +extern unsigned long FASTCALL(get_zeroed_page(gfp_t gfp_mask)); #define __get_free_page(gfp_mask) \ __get_free_pages((gfp_mask),0) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index e670b0d13fe0..d664330d900e 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -25,6 +25,8 @@ int is_hugepage_mem_enough(size_t); unsigned long hugetlb_total_pages(void); struct page *alloc_huge_page(void); void free_huge_page(struct page *); +int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, int write_access); extern unsigned long max_huge_pages; extern const unsigned long hugetlb_zero, hugetlb_infinity; @@ -99,6 +101,7 @@ static inline unsigned long hugetlb_total_pages(void) do { } while (0) #define alloc_huge_page() ({ NULL; }) #define free_huge_page(p) ({ (void)(p); BUG(); }) +#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; }) #ifndef HPAGE_MASK #define HPAGE_MASK 0 /* Keep the compiler happy */ diff --git a/include/linux/i2o.h b/include/linux/i2o.h index bdc286ec947c..b4af45aad25d 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h @@ -492,7 +492,7 @@ static inline int i2o_dma_map_sg(struct i2o_controller *c, * Returns 0 on success or -ENOMEM on failure. */ static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, - size_t len, unsigned int gfp_mask) + size_t len, gfp_t gfp_mask) { struct pci_dev *pdev = to_pci_dev(dev); int dma_64 = 0; @@ -551,7 +551,7 @@ static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr) * Returns the 0 on success or negative error code on failure. */ static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, - size_t len, unsigned int gfp_mask) + size_t len, gfp_t gfp_mask) { i2o_dma_free(dev, addr); diff --git a/include/linux/ibmtr.h b/include/linux/ibmtr.h index 2ef0b21517fb..1c7a0dd5536a 100644 --- a/include/linux/ibmtr.h +++ b/include/linux/ibmtr.h @@ -7,8 +7,8 @@ /* ported to the Alpha architecture 02/20/96 (just used the HZ macro) */ #define TR_RETRY_INTERVAL (30*HZ) /* 500 on PC = 5 s */ -#define TR_RST_TIME (HZ/20) /* 5 on PC = 50 ms */ -#define TR_BUSY_INTERVAL (HZ/5) /* 5 on PC = 200 ms */ +#define TR_RST_TIME (msecs_to_jiffies(50)) /* 5 on PC = 50 ms */ +#define TR_BUSY_INTERVAL (msecs_to_jiffies(200)) /* 5 on PC = 200 ms */ #define TR_SPIN_INTERVAL (3*HZ) /* 3 seconds before init timeout */ #define TR_ISA 1 diff --git a/include/linux/idr.h b/include/linux/idr.h index ca3b7e462576..7fb3ff9c7b0e 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -71,8 +71,9 @@ struct idr { */ void *idr_find(struct idr *idp, int id); -int idr_pre_get(struct idr *idp, unsigned gfp_mask); +int idr_pre_get(struct idr *idp, gfp_t gfp_mask); int idr_get_new(struct idr *idp, void *ptr, int *id); int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); void idr_remove(struct idr *idp, int id); +void idr_destroy(struct idr *idp); void idr_init(struct idr *idp); diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index 0856548a2a08..a8b1a2071838 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -84,6 +84,7 @@ #define ARPHRD_IEEE802_TR 800 /* Magic type ident for TR */ #define ARPHRD_IEEE80211 801 /* IEEE 802.11 */ #define ARPHRD_IEEE80211_PRISM 802 /* IEEE 802.11 + Prism2 header */ +#define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */ #define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */ #define ARPHRD_NONE 0xFFFE /* zero header length */ diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index fc2d4c8225aa..d21c305c6c64 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -111,7 +111,9 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb) return (struct ethhdr *)skb->mac.raw; } +#ifdef CONFIG_SYSCTL extern struct ctl_table ether_table[]; #endif +#endif #endif /* _LINUX_IF_ETHER_H */ diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 7e1e15f934f3..fd7af86151b1 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -142,13 +142,21 @@ static __inline__ int bad_mask(u32 mask, u32 addr) #define endfor_ifa(in_dev) } +static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev) +{ + struct in_device *in_dev = dev->ip_ptr; + if (in_dev) + in_dev = rcu_dereference(in_dev); + return in_dev; +} + static __inline__ struct in_device * in_dev_get(const struct net_device *dev) { struct in_device *in_dev; rcu_read_lock(); - in_dev = dev->ip_ptr; + in_dev = __in_dev_get_rcu(dev); if (in_dev) atomic_inc(&in_dev->refcnt); rcu_read_unlock(); @@ -156,7 +164,7 @@ in_dev_get(const struct net_device *dev) } static __inline__ struct in_device * -__in_dev_get(const struct net_device *dev) +__in_dev_get_rtnl(const struct net_device *dev) { return (struct in_device*)dev->ip_ptr; } diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index bb6f88e14061..e0b922785d98 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -372,8 +372,9 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk) #define inet_v6_ipv6only(__sk) 0 #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ -#define INET6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \ - (((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ +#define INET6_MATCH(__sk, __hash, __saddr, __daddr, __ports, __dif)\ + (((__sk)->sk_hash == (__hash)) && \ + ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ ((__sk)->sk_family == AF_INET6) && \ ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr)) && \ ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \ diff --git a/include/linux/jbd.h b/include/linux/jbd.h index de097269bd7f..be197eb90077 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -69,7 +69,7 @@ extern int journal_enable_debug; #define jbd_debug(f, a...) /**/ #endif -extern void * __jbd_kmalloc (const char *where, size_t size, int flags, int retry); +extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry); #define jbd_kmalloc(size, flags) \ __jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry) #define jbd_rep_kmalloc(size, flags) \ @@ -890,7 +890,7 @@ extern int journal_forget (handle_t *, struct buffer_head *); extern void journal_sync_buffer (struct buffer_head *); extern int journal_invalidatepage(journal_t *, struct page *, unsigned long); -extern int journal_try_to_free_buffers(journal_t *, struct page *, int); +extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); extern int journal_stop(handle_t *); extern int journal_flush (journal_t *); extern void journal_lock_updates (journal_t *); @@ -935,7 +935,7 @@ void journal_put_journal_head(struct journal_head *jh); */ extern kmem_cache_t *jbd_handle_cache; -static inline handle_t *jbd_alloc_handle(unsigned int __nocast gfp_flags) +static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags) { return kmem_cache_alloc(jbd_handle_cache, gfp_flags); } diff --git a/include/linux/key-ui.h b/include/linux/key-ui.h index cc326174a808..7a2e332067c3 100644 --- a/include/linux/key-ui.h +++ b/include/linux/key-ui.h @@ -38,92 +38,21 @@ struct keyring_list { struct key *keys[0]; }; - /* * check to see whether permission is granted to use a key in the desired way */ -static inline int key_permission(const struct key *key, key_perm_t perm) +extern int key_task_permission(const key_ref_t key_ref, + struct task_struct *context, + key_perm_t perm); + +static inline int key_permission(const key_ref_t key_ref, key_perm_t perm) { - key_perm_t kperm; - - if (key->uid == current->fsuid) - kperm = key->perm >> 16; - else if (key->gid != -1 && - key->perm & KEY_GRP_ALL && - in_group_p(key->gid) - ) - kperm = key->perm >> 8; - else - kperm = key->perm; - - kperm = kperm & perm & KEY_ALL; - - return kperm == perm; + return key_task_permission(key_ref, current, perm); } -/* - * check to see whether permission is granted to use a key in at least one of - * the desired ways - */ -static inline int key_any_permission(const struct key *key, key_perm_t perm) -{ - key_perm_t kperm; - - if (key->uid == current->fsuid) - kperm = key->perm >> 16; - else if (key->gid != -1 && - key->perm & KEY_GRP_ALL && - in_group_p(key->gid) - ) - kperm = key->perm >> 8; - else - kperm = key->perm; - - kperm = kperm & perm & KEY_ALL; - - return kperm != 0; -} - -static inline int key_task_groups_search(struct task_struct *tsk, gid_t gid) -{ - int ret; - - task_lock(tsk); - ret = groups_search(tsk->group_info, gid); - task_unlock(tsk); - return ret; -} - -static inline int key_task_permission(const struct key *key, - struct task_struct *context, - key_perm_t perm) -{ - key_perm_t kperm; - - if (key->uid == context->fsuid) { - kperm = key->perm >> 16; - } - else if (key->gid != -1 && - key->perm & KEY_GRP_ALL && ( - key->gid == context->fsgid || - key_task_groups_search(context, key->gid) - ) - ) { - kperm = key->perm >> 8; - } - else { - kperm = key->perm; - } - - kperm = kperm & perm & KEY_ALL; - - return kperm == perm; - -} - -extern struct key *lookup_user_key(struct task_struct *context, - key_serial_t id, int create, int partial, - key_perm_t perm); +extern key_ref_t lookup_user_key(struct task_struct *context, + key_serial_t id, int create, int partial, + key_perm_t perm); extern long join_session_keyring(const char *name); diff --git a/include/linux/key.h b/include/linux/key.h index 970bbd916cf4..f1efa016dbf3 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -35,11 +35,18 @@ struct key; #undef KEY_DEBUGGING -#define KEY_USR_VIEW 0x00010000 /* user can view a key's attributes */ -#define KEY_USR_READ 0x00020000 /* user can read key payload / view keyring */ -#define KEY_USR_WRITE 0x00040000 /* user can update key payload / add link to keyring */ -#define KEY_USR_SEARCH 0x00080000 /* user can find a key in search / search a keyring */ -#define KEY_USR_LINK 0x00100000 /* user can create a link to a key/keyring */ +#define KEY_POS_VIEW 0x01000000 /* possessor can view a key's attributes */ +#define KEY_POS_READ 0x02000000 /* possessor can read key payload / view keyring */ +#define KEY_POS_WRITE 0x04000000 /* possessor can update key payload / add link to keyring */ +#define KEY_POS_SEARCH 0x08000000 /* possessor can find a key in search / search a keyring */ +#define KEY_POS_LINK 0x10000000 /* possessor can create a link to a key/keyring */ +#define KEY_POS_ALL 0x1f000000 + +#define KEY_USR_VIEW 0x00010000 /* user permissions... */ +#define KEY_USR_READ 0x00020000 +#define KEY_USR_WRITE 0x00040000 +#define KEY_USR_SEARCH 0x00080000 +#define KEY_USR_LINK 0x00100000 #define KEY_USR_ALL 0x001f0000 #define KEY_GRP_VIEW 0x00000100 /* group permissions... */ @@ -65,6 +72,38 @@ struct key_owner; struct keyring_list; struct keyring_name; +/*****************************************************************************/ +/* + * key reference with possession attribute handling + * + * NOTE! key_ref_t is a typedef'd pointer to a type that is not actually + * defined. This is because we abuse the bottom bit of the reference to carry a + * flag to indicate whether the calling process possesses that key in one of + * its keyrings. + * + * the key_ref_t has been made a separate type so that the compiler can reject + * attempts to dereference it without proper conversion. + * + * the three functions are used to assemble and disassemble references + */ +typedef struct __key_reference_with_attributes *key_ref_t; + +static inline key_ref_t make_key_ref(const struct key *key, + unsigned long possession) +{ + return (key_ref_t) ((unsigned long) key | possession); +} + +static inline struct key *key_ref_to_ptr(const key_ref_t key_ref) +{ + return (struct key *) ((unsigned long) key_ref & ~1UL); +} + +static inline unsigned long is_key_possessed(const key_ref_t key_ref) +{ + return (unsigned long) key_ref & 1UL; +} + /*****************************************************************************/ /* * authentication token / access credential / keyring @@ -215,20 +254,25 @@ static inline struct key *key_get(struct key *key) return key; } +static inline void key_ref_put(key_ref_t key_ref) +{ + key_put(key_ref_to_ptr(key_ref)); +} + extern struct key *request_key(struct key_type *type, const char *description, const char *callout_info); extern int key_validate(struct key *key); -extern struct key *key_create_or_update(struct key *keyring, - const char *type, - const char *description, - const void *payload, - size_t plen, - int not_in_quota); +extern key_ref_t key_create_or_update(key_ref_t keyring, + const char *type, + const char *description, + const void *payload, + size_t plen, + int not_in_quota); -extern int key_update(struct key *key, +extern int key_update(key_ref_t key, const void *payload, size_t plen); @@ -243,9 +287,9 @@ extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, extern int keyring_clear(struct key *keyring); -extern struct key *keyring_search(struct key *keyring, - struct key_type *type, - const char *description); +extern key_ref_t keyring_search(key_ref_t keyring, + struct key_type *type, + const char *description); extern int keyring_add_key(struct key *keyring, struct key *key); @@ -285,6 +329,10 @@ extern void key_init(void); #define key_serial(k) 0 #define key_get(k) ({ NULL; }) #define key_put(k) do { } while(0) +#define key_ref_put(k) do { } while(0) +#define make_key_ref(k) ({ NULL; }) +#define key_ref_to_ptr(k) ({ NULL; }) +#define is_key_possessed(k) 0 #define alloc_uid_keyring(u) 0 #define switch_uid_keyring(u) do { } while(0) #define __install_session_keyring(t, k) ({ NULL; }) diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index c27cd428d269..48eccd865bd8 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -35,8 +35,8 @@ struct kfifo { }; extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, - unsigned int __nocast gfp_mask, spinlock_t *lock); -extern struct kfifo *kfifo_alloc(unsigned int size, unsigned int __nocast gfp_mask, + gfp_t gfp_mask, spinlock_t *lock); +extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock); extern void kfifo_free(struct kfifo *fifo); extern unsigned int __kfifo_put(struct kfifo *fifo, diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 3b22304f12fd..7f7403aa4a41 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -65,7 +65,7 @@ extern void kobject_unregister(struct kobject *); extern struct kobject * kobject_get(struct kobject *); extern void kobject_put(struct kobject *); -extern char * kobject_get_path(struct kobject *, int); +extern char * kobject_get_path(struct kobject *, gfp_t); struct kobj_type { void (*release)(struct kobject *); diff --git a/include/linux/libata.h b/include/linux/libata.h index 022105c745fc..00a8a5738858 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -91,12 +91,13 @@ enum { ATA_SHT_EMULATED = 1, ATA_SHT_CMD_PER_LUN = 1, ATA_SHT_THIS_ID = -1, - ATA_SHT_USE_CLUSTERING = 0, + ATA_SHT_USE_CLUSTERING = 1, /* struct ata_device stuff */ ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */ ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ ATA_DFLAG_LOCK_SECTORS = (1 << 2), /* don't adjust max_sectors */ + ATA_DFLAG_LBA = (1 << 3), /* device supports LBA */ ATA_DEV_UNKNOWN = 0, /* unknown device */ ATA_DEV_ATA = 1, /* ATA device */ @@ -154,17 +155,21 @@ enum { ATA_SHIFT_UDMA = 0, ATA_SHIFT_MWDMA = 8, ATA_SHIFT_PIO = 11, + + /* Masks for port functions */ + ATA_PORT_PRIMARY = (1 << 0), + ATA_PORT_SECONDARY = (1 << 1), }; -enum pio_task_states { - PIO_ST_UNKNOWN, - PIO_ST_IDLE, - PIO_ST_POLL, - PIO_ST_TMOUT, - PIO_ST, - PIO_ST_LAST, - PIO_ST_LAST_POLL, - PIO_ST_ERR, +enum hsm_task_states { + HSM_ST_UNKNOWN, + HSM_ST_IDLE, + HSM_ST_POLL, + HSM_ST_TMOUT, + HSM_ST, + HSM_ST_LAST, + HSM_ST_LAST_POLL, + HSM_ST_ERR, }; /* forward declarations */ @@ -197,7 +202,7 @@ struct ata_ioports { struct ata_probe_ent { struct list_head node; struct device *dev; - struct ata_port_operations *port_ops; + const struct ata_port_operations *port_ops; Scsi_Host_Template *sht; struct ata_ioports port[ATA_MAX_PORTS]; unsigned int n_ports; @@ -220,7 +225,7 @@ struct ata_host_set { void __iomem *mmio_base; unsigned int n_ports; void *private_data; - struct ata_port_operations *ops; + const struct ata_port_operations *ops; struct ata_port * ports[0]; }; @@ -278,15 +283,18 @@ struct ata_device { u8 xfer_mode; unsigned int xfer_shift; /* ATA_SHIFT_xxx */ - /* cache info about current transfer mode */ - u8 xfer_protocol; /* taskfile xfer protocol */ - u8 read_cmd; /* opcode to use on read */ - u8 write_cmd; /* opcode to use on write */ + unsigned int multi_count; /* sectors count for + READ/WRITE MULTIPLE */ + + /* for CHS addressing */ + u16 cylinders; /* Number of cylinders */ + u16 heads; /* Number of heads */ + u16 sectors; /* Number of sectors per track */ }; struct ata_port { struct Scsi_Host *host; /* our co-allocated scsi host */ - struct ata_port_operations *ops; + const struct ata_port_operations *ops; unsigned long flags; /* ATA_FLAG_xxx */ unsigned int id; /* unique id req'd by scsi midlyr */ unsigned int port_no; /* unique port #; from zero */ @@ -319,7 +327,7 @@ struct ata_port { struct work_struct packet_task; struct work_struct pio_task; - unsigned int pio_task_state; + unsigned int hsm_task_state; unsigned long pio_task_timeout; void *private_data; @@ -333,10 +341,10 @@ struct ata_port_operations { void (*set_piomode) (struct ata_port *, struct ata_device *); void (*set_dmamode) (struct ata_port *, struct ata_device *); - void (*tf_load) (struct ata_port *ap, struct ata_taskfile *tf); + void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf); void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf); - void (*exec_command)(struct ata_port *ap, struct ata_taskfile *tf); + void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf); u8 (*check_status)(struct ata_port *ap); u8 (*check_altstatus)(struct ata_port *ap); u8 (*check_err)(struct ata_port *ap); @@ -377,9 +385,22 @@ struct ata_port_info { unsigned long pio_mask; unsigned long mwdma_mask; unsigned long udma_mask; - struct ata_port_operations *port_ops; + const struct ata_port_operations *port_ops; }; +struct ata_timing { + unsigned short mode; /* ATA mode */ + unsigned short setup; /* t1 */ + unsigned short act8b; /* t2 for 8-bit I/O */ + unsigned short rec8b; /* t2i for 8-bit I/O */ + unsigned short cyc8b; /* t0 for 8-bit I/O */ + unsigned short active; /* t2 or tD */ + unsigned short recover; /* t2i or tK */ + unsigned short cycle; /* t0 */ + unsigned short udma; /* t2CYCTYP/2 */ +}; + +#define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin) extern void ata_port_probe(struct ata_port *); extern void __sata_phy_reset(struct ata_port *ap); @@ -392,26 +413,29 @@ extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_i unsigned int n_ports); extern void ata_pci_remove_one (struct pci_dev *pdev); #endif /* CONFIG_PCI */ -extern int ata_device_add(struct ata_probe_ent *ent); +extern int ata_device_add(const struct ata_probe_ent *ent); +extern void ata_host_set_remove(struct ata_host_set *host_set); extern int ata_scsi_detect(Scsi_Host_Template *sht); extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); extern int ata_scsi_error(struct Scsi_Host *host); extern int ata_scsi_release(struct Scsi_Host *host); extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); +extern int ata_ratelimit(void); + /* * Default driver ops implementations */ -extern void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf); +extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf); -extern void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp); -extern void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf); +extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp); +extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf); extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device); extern void ata_std_dev_select (struct ata_port *ap, unsigned int device); extern u8 ata_check_status(struct ata_port *ap); extern u8 ata_altstatus(struct ata_port *ap); extern u8 ata_chk_err(struct ata_port *ap); -extern void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf); +extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); extern int ata_port_start (struct ata_port *ap); extern void ata_port_stop (struct ata_port *ap); extern void ata_host_stop (struct ata_host_set *host_set); @@ -422,8 +446,8 @@ extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen); extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, unsigned int n_elem); -extern unsigned int ata_dev_classify(struct ata_taskfile *tf); -extern void ata_dev_id_string(u16 *id, unsigned char *s, +extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); +extern void ata_dev_id_string(const u16 *id, unsigned char *s, unsigned int ofs, unsigned int len); extern void ata_dev_config(struct ata_port *ap, unsigned int i); extern void ata_bmdma_setup (struct ata_queued_cmd *qc); @@ -440,6 +464,32 @@ extern int ata_std_bios_param(struct scsi_device *sdev, sector_t capacity, int geom[]); extern int ata_scsi_slave_config(struct scsi_device *sdev); +/* + * Timing helpers + */ +extern int ata_timing_compute(struct ata_device *, unsigned short, + struct ata_timing *, int, int); +extern void ata_timing_merge(const struct ata_timing *, + const struct ata_timing *, struct ata_timing *, + unsigned int); + +enum { + ATA_TIMING_SETUP = (1 << 0), + ATA_TIMING_ACT8B = (1 << 1), + ATA_TIMING_REC8B = (1 << 2), + ATA_TIMING_CYC8B = (1 << 3), + ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B | + ATA_TIMING_CYC8B, + ATA_TIMING_ACTIVE = (1 << 4), + ATA_TIMING_RECOVER = (1 << 5), + ATA_TIMING_CYCLE = (1 << 6), + ATA_TIMING_UDMA = (1 << 7), + ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B | + ATA_TIMING_REC8B | ATA_TIMING_CYC8B | + ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER | + ATA_TIMING_CYCLE | ATA_TIMING_UDMA, +}; + #ifdef CONFIG_PCI struct pci_bits { @@ -451,8 +501,8 @@ struct pci_bits { extern void ata_pci_host_stop (struct ata_host_set *host_set); extern struct ata_probe_ent * -ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port); -extern int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits); +ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask); +extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); #endif /* CONFIG_PCI */ @@ -462,7 +512,7 @@ static inline unsigned int ata_tag_valid(unsigned int tag) return (tag < ATA_MAX_QUEUE) ? 1 : 0; } -static inline unsigned int ata_dev_present(struct ata_device *dev) +static inline unsigned int ata_dev_present(const struct ata_device *dev) { return ((dev->class == ATA_DEV_ATA) || (dev->class == ATA_DEV_ATAPI)); @@ -661,7 +711,7 @@ static inline unsigned int sata_dev_present(struct ata_port *ap) return ((scr_read(ap, SCR_STATUS) & 0xf) == 0x3) ? 1 : 0; } -static inline int ata_try_flush_cache(struct ata_device *dev) +static inline int ata_try_flush_cache(const struct ata_device *dev) { return ata_id_wcache_enabled(dev->id) || ata_id_has_flush(dev->id) || diff --git a/include/linux/list.h b/include/linux/list.h index e6ec59682274..084971f333fe 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -442,12 +442,14 @@ static inline void list_splice_init(struct list_head *list, * as long as the traversal is guarded by rcu_read_lock(). */ #define list_for_each_rcu(pos, head) \ - for (pos = (head)->next; prefetch(pos->next), pos != (head); \ - pos = rcu_dereference(pos->next)) + for (pos = (head)->next; \ + prefetch(rcu_dereference(pos)->next), pos != (head); \ + pos = pos->next) #define __list_for_each_rcu(pos, head) \ - for (pos = (head)->next; pos != (head); \ - pos = rcu_dereference(pos->next)) + for (pos = (head)->next; \ + rcu_dereference(pos) != (head); \ + pos = pos->next) /** * list_for_each_safe_rcu - iterate over an rcu-protected list safe @@ -461,8 +463,9 @@ static inline void list_splice_init(struct list_head *list, * as long as the traversal is guarded by rcu_read_lock(). */ #define list_for_each_safe_rcu(pos, n, head) \ - for (pos = (head)->next, n = pos->next; pos != (head); \ - pos = rcu_dereference(n), n = pos->next) + for (pos = (head)->next; \ + n = rcu_dereference(pos)->next, pos != (head); \ + pos = n) /** * list_for_each_entry_rcu - iterate over rcu list of given type @@ -474,11 +477,11 @@ static inline void list_splice_init(struct list_head *list, * the _rcu list-mutation primitives such as list_add_rcu() * as long as the traversal is guarded by rcu_read_lock(). */ -#define list_for_each_entry_rcu(pos, head, member) \ - for (pos = list_entry((head)->next, typeof(*pos), member); \ - prefetch(pos->member.next), &pos->member != (head); \ - pos = rcu_dereference(list_entry(pos->member.next, \ - typeof(*pos), member))) +#define list_for_each_entry_rcu(pos, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member); \ + prefetch(rcu_dereference(pos)->member.next), \ + &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) /** @@ -492,8 +495,9 @@ static inline void list_splice_init(struct list_head *list, * as long as the traversal is guarded by rcu_read_lock(). */ #define list_for_each_continue_rcu(pos, head) \ - for ((pos) = (pos)->next; prefetch((pos)->next), (pos) != (head); \ - (pos) = rcu_dereference((pos)->next)) + for ((pos) = (pos)->next; \ + prefetch(rcu_dereference((pos))->next), (pos) != (head); \ + (pos) = (pos)->next) /* * Double linked lists with a single pointer list head. @@ -696,8 +700,9 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, pos = n) #define hlist_for_each_rcu(pos, head) \ - for ((pos) = (head)->first; pos && ({ prefetch((pos)->next); 1; }); \ - (pos) = rcu_dereference((pos)->next)) + for ((pos) = (head)->first; \ + rcu_dereference((pos)) && ({ prefetch((pos)->next); 1; }); \ + (pos) = (pos)->next) /** * hlist_for_each_entry - iterate over list of given type @@ -762,9 +767,9 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, */ #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ for (pos = (head)->first; \ - pos && ({ prefetch(pos->next); 1;}) && \ + rcu_dereference(pos) && ({ prefetch(pos->next); 1;}) && \ ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = rcu_dereference(pos->next)) + pos = pos->next) #else #warning "don't include kernel headers in userspace" diff --git a/include/linux/loop.h b/include/linux/loop.h index 53fa51595443..40f63c9879d2 100644 --- a/include/linux/loop.h +++ b/include/linux/loop.h @@ -52,7 +52,7 @@ struct loop_device { unsigned lo_blocksize; void *key_data; - int old_gfp_mask; + gfp_t old_gfp_mask; spinlock_t lo_lock; struct bio *lo_bio; diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h index 9263d2db2d67..99e044b4efc6 100644 --- a/include/linux/mbcache.h +++ b/include/linux/mbcache.h @@ -22,7 +22,7 @@ struct mb_cache_entry { }; struct mb_cache_op { - int (*free)(struct mb_cache_entry *, int); + int (*free)(struct mb_cache_entry *, gfp_t); }; /* Functions on caches */ diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 796220ce47cc..f2427d7394b0 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -6,7 +6,7 @@ #include -typedef void * (mempool_alloc_t)(unsigned int __nocast gfp_mask, void *pool_data); +typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); typedef void (mempool_free_t)(void *element, void *pool_data); typedef struct mempool_s { @@ -26,17 +26,16 @@ extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, int nid); -extern int mempool_resize(mempool_t *pool, int new_min_nr, - unsigned int __nocast gfp_mask); +extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask); extern void mempool_destroy(mempool_t *pool); -extern void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask); +extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask); extern void mempool_free(void *element, mempool_t *pool); /* * A mempool_alloc_t and mempool_free_t that get the memory from * a slab that is passed in through pool_data. */ -void *mempool_alloc_slab(unsigned int __nocast gfp_mask, void *pool_data); +void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); void mempool_free_slab(void *element, void *pool_data); #endif /* _LINUX_MEMPOOL_H */ diff --git a/include/linux/mii.h b/include/linux/mii.h index 9b8d0476988a..68f5a0f392dd 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -158,6 +158,7 @@ extern int mii_link_ok (struct mii_if_info *mii); extern int mii_nway_restart (struct mii_if_info *mii); extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); +extern int mii_check_gmii_support(struct mii_if_info *mii); extern void mii_check_link (struct mii_if_info *mii); extern unsigned int mii_check_media (struct mii_if_info *mii, unsigned int ok_to_print, diff --git a/include/linux/mm.h b/include/linux/mm.h index 097b3a3c693d..e1649578fb0c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -747,7 +747,7 @@ extern unsigned long do_mremap(unsigned long addr, * The callback will be passed nr_to_scan == 0 when the VM is querying the * cache size, so a fastpath for that case is appropriate. */ -typedef int (*shrinker_t)(int nr_to_scan, unsigned int gfp_mask); +typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask); /* * Add an aging callback. The int is the number of 'seeks' it takes diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 5ed471b58f4f..7519eb4191e7 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -302,7 +302,7 @@ void get_zone_counts(unsigned long *active, unsigned long *inactive, void build_all_zonelists(void); void wakeup_kswapd(struct zone *zone, int order); int zone_watermark_ok(struct zone *z, int order, unsigned long mark, - int alloc_type, int can_try_harder, int gfp_high); + int alloc_type, int can_try_harder, gfp_t gfp_high); #ifdef CONFIG_HAVE_MEMORY_PRESENT void memory_present(int nid, unsigned long start, unsigned long end); diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 47da39ba3f03..2f0299a448f6 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -183,7 +183,7 @@ struct of_device_id char name[32]; char type[32]; char compatible[128]; -#if __KERNEL__ +#ifdef __KERNEL__ void *data; #else kernel_ulong_t data; @@ -209,10 +209,11 @@ struct pcmcia_device_id { /* for real multi-function devices */ __u8 function; - /* for pseude multi-function devices */ + /* for pseudo multi-function devices */ __u8 device_no; - __u32 prod_id_hash[4]; + __u32 prod_id_hash[4] + __attribute__((aligned(sizeof(__u32)))); /* not matched against in kernelspace*/ #ifdef __KERNEL__ diff --git a/include/linux/namei.h b/include/linux/namei.h index 7db67b008cac..1c975d0d9e94 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -8,6 +8,7 @@ struct vfsmount; struct open_intent { int flags; int create_mode; + struct file *file; }; enum { MAX_NESTED_LINKS = 5 }; @@ -65,6 +66,13 @@ extern int FASTCALL(link_path_walk(const char *, struct nameidata *)); extern void path_release(struct nameidata *); extern void path_release_on_umount(struct nameidata *); +extern int __user_path_lookup_open(const char __user *, unsigned lookup_flags, struct nameidata *nd, int open_flags); +extern int path_lookup_open(const char *, unsigned lookup_flags, struct nameidata *, int open_flags); +extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, + int (*open)(struct inode *, struct file *)); +extern struct file *nameidata_to_filp(struct nameidata *nd, int flags); +extern void release_open_intent(struct nameidata *); + extern struct dentry * lookup_one_len(const char *, struct dentry *, int); extern struct dentry * lookup_hash(struct qstr *, struct dentry *); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5e90557715ab..deacea65273a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -265,6 +265,8 @@ struct net_device * the interface. */ char name[IFNAMSIZ]; + /* device name hash chain */ + struct hlist_node name_hlist; /* * I/O specific fields @@ -292,6 +294,21 @@ struct net_device /* ------- Fields preinitialized in Space.c finish here ------- */ + /* Net device features */ + unsigned long features; +#define NETIF_F_SG 1 /* Scatter/gather IO. */ +#define NETIF_F_IP_CSUM 2 /* Can checksum only TCP/UDP over IPv4. */ +#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ +#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ +#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ +#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ +#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ +#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ +#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ +#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ +#define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */ +#define NETIF_F_LLTX 4096 /* LockLess TX */ + struct net_device *next_sched; /* Interface index. Unique device identifier */ @@ -316,9 +333,6 @@ struct net_device * will (read: may be cleaned up at will). */ - /* These may be needed for future network-power-down code. */ - unsigned long trans_start; /* Time (in jiffies) of last Tx */ - unsigned long last_rx; /* Time of last Rx */ unsigned short flags; /* interface flags (a la BSD) */ unsigned short gflags; @@ -328,15 +342,12 @@ struct net_device unsigned mtu; /* interface MTU value */ unsigned short type; /* interface hardware type */ unsigned short hard_header_len; /* hardware hdr length */ - void *priv; /* pointer to private data */ struct net_device *master; /* Pointer to master device of a group, * which this device is member of. */ /* Interface address info. */ - unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ - unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address */ unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ unsigned char addr_len; /* hardware address length */ unsigned short dev_id; /* for shared network cards */ @@ -346,8 +357,6 @@ struct net_device int promiscuity; int allmulti; - int watchdog_timeo; - struct timer_list watchdog_timer; /* Protocol specific pointers */ @@ -358,32 +367,62 @@ struct net_device void *ec_ptr; /* Econet specific data */ void *ax25_ptr; /* AX.25 specific data */ - struct list_head poll_list; /* Link to poll list */ +/* + * Cache line mostly used on receive path (including eth_type_trans()) + */ + struct list_head poll_list ____cacheline_aligned_in_smp; + /* Link to poll list */ + + int (*poll) (struct net_device *dev, int *quota); int quota; int weight; + unsigned long last_rx; /* Time of last Rx */ + /* Interface address info used in eth_type_trans() */ + unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast + because most packets are unicast) */ + unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ + +/* + * Cache line mostly used on queue transmit path (qdisc) + */ + /* device queue lock */ + spinlock_t queue_lock ____cacheline_aligned_in_smp; struct Qdisc *qdisc; struct Qdisc *qdisc_sleeping; - struct Qdisc *qdisc_ingress; struct list_head qdisc_list; unsigned long tx_queue_len; /* Max frames per queue allowed */ /* ingress path synchronizer */ spinlock_t ingress_lock; + struct Qdisc *qdisc_ingress; + +/* + * One part is mostly used on xmit path (device) + */ /* hard_start_xmit synchronizer */ - spinlock_t xmit_lock; + spinlock_t xmit_lock ____cacheline_aligned_in_smp; /* cpu id of processor entered to hard_start_xmit or -1, if nobody entered there. */ int xmit_lock_owner; - /* device queue lock */ - spinlock_t queue_lock; + void *priv; /* pointer to private data */ + int (*hard_start_xmit) (struct sk_buff *skb, + struct net_device *dev); + /* These may be needed for future network-power-down code. */ + unsigned long trans_start; /* Time (in jiffies) of last Tx */ + + int watchdog_timeo; /* used by dev_watchdog() */ + struct timer_list watchdog_timer; + +/* + * refcnt is a very hot point, so align it on SMP + */ /* Number of references to this device */ - atomic_t refcnt; + atomic_t refcnt ____cacheline_aligned_in_smp; + /* delayed register/unregister */ struct list_head todo_list; - /* device name hash chain */ - struct hlist_node name_hlist; /* device index hash chain */ struct hlist_node index_hlist; @@ -396,21 +435,6 @@ struct net_device NETREG_RELEASED, /* called free_netdev */ } reg_state; - /* Net device features */ - unsigned long features; -#define NETIF_F_SG 1 /* Scatter/gather IO. */ -#define NETIF_F_IP_CSUM 2 /* Can checksum only TCP/UDP over IPv4. */ -#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ -#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ -#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ -#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ -#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ -#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ -#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ -#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ -#define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */ -#define NETIF_F_LLTX 4096 /* LockLess TX */ - /* Called after device is detached from network. */ void (*uninit)(struct net_device *dev); /* Called after last user reference disappears. */ @@ -419,10 +443,7 @@ struct net_device /* Pointers to interface service routines. */ int (*open)(struct net_device *dev); int (*stop)(struct net_device *dev); - int (*hard_start_xmit) (struct sk_buff *skb, - struct net_device *dev); #define HAVE_NETDEV_POLL - int (*poll) (struct net_device *dev, int *quota); int (*hard_header) (struct sk_buff *skb, struct net_device *dev, unsigned short type, @@ -856,11 +877,9 @@ static inline void netif_rx_complete(struct net_device *dev) static inline void netif_poll_disable(struct net_device *dev) { - while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state)) { + while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state)) /* No hurry. */ - current->state = TASK_INTERRUPTIBLE; - schedule_timeout(1); - } + schedule_timeout_interruptible(1); } static inline void netif_poll_enable(struct net_device *dev) diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 1d5b10ae2399..f08e870100f4 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -41,11 +41,15 @@ enum nfnetlink_groups { struct nfattr { u_int16_t nfa_len; - u_int16_t nfa_type; + u_int16_t nfa_type; /* we use 15 bits for the type, and the highest + * bit to indicate whether the payload is nested */ } __attribute__ ((packed)); -/* FIXME: Shamelessly copy and pasted from rtnetlink.h, it's time - * to put this in a generic file */ +/* FIXME: Apart from NFNL_NFA_NESTED shamelessly copy and pasted from + * rtnetlink.h, it's time to put this in a generic file */ + +#define NFNL_NFA_NEST 0x8000 +#define NFA_TYPE(attr) ((attr)->nfa_type & 0x7fff) #define NFA_ALIGNTO 4 #define NFA_ALIGN(len) (((len) + NFA_ALIGNTO - 1) & ~(NFA_ALIGNTO - 1)) @@ -59,7 +63,7 @@ struct nfattr #define NFA_PAYLOAD(nfa) ((int)((nfa)->nfa_len) - NFA_LENGTH(0)) #define NFA_NEST(skb, type) \ ({ struct nfattr *__start = (struct nfattr *) (skb)->tail; \ - NFA_PUT(skb, type, 0, NULL); \ + NFA_PUT(skb, (NFNL_NFA_NEST | type), 0, NULL); \ __start; }) #define NFA_NEST_END(skb, start) \ ({ (start)->nfa_len = ((skb)->tail - (unsigned char *) (start)); \ diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h index 5c55751c78e4..116fcaced909 100644 --- a/include/linux/netfilter/nfnetlink_conntrack.h +++ b/include/linux/netfilter/nfnetlink_conntrack.h @@ -70,15 +70,24 @@ enum ctattr_l4proto { enum ctattr_protoinfo { CTA_PROTOINFO_UNSPEC, - CTA_PROTOINFO_TCP_STATE, + CTA_PROTOINFO_TCP, __CTA_PROTOINFO_MAX }; #define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1) +enum ctattr_protoinfo_tcp { + CTA_PROTOINFO_TCP_UNSPEC, + CTA_PROTOINFO_TCP_STATE, + __CTA_PROTOINFO_TCP_MAX +}; +#define CTA_PROTOINFO_TCP_MAX (__CTA_PROTOINFO_TCP_MAX - 1) + enum ctattr_counters { CTA_COUNTERS_UNSPEC, - CTA_COUNTERS_PACKETS, - CTA_COUNTERS_BYTES, + CTA_COUNTERS_PACKETS, /* old 64bit counters */ + CTA_COUNTERS_BYTES, /* old 64bit counters */ + CTA_COUNTERS32_PACKETS, + CTA_COUNTERS32_BYTES, __CTA_COUNTERS_MAX }; #define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1) diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h index bace72a76cc4..d078bb91d9e5 100644 --- a/include/linux/netfilter_ipv4/ip_conntrack.h +++ b/include/linux/netfilter_ipv4/ip_conntrack.h @@ -117,6 +117,10 @@ enum ip_conntrack_events /* NAT info */ IPCT_NATINFO_BIT = 10, IPCT_NATINFO = (1 << IPCT_NATINFO_BIT), + + /* Counter highest bit has been set */ + IPCT_COUNTER_FILLING_BIT = 11, + IPCT_COUNTER_FILLING = (1 << IPCT_COUNTER_FILLING_BIT), }; enum ip_conntrack_expect_events { @@ -192,8 +196,8 @@ do { \ struct ip_conntrack_counter { - u_int64_t packets; - u_int64_t bytes; + u_int32_t packets; + u_int32_t bytes; }; struct ip_conntrack_helper; @@ -332,11 +336,28 @@ extern void need_ip_conntrack(void); extern int invert_tuplepr(struct ip_conntrack_tuple *inverse, const struct ip_conntrack_tuple *orig); +extern void __ip_ct_refresh_acct(struct ip_conntrack *ct, + enum ip_conntrack_info ctinfo, + const struct sk_buff *skb, + unsigned long extra_jiffies, + int do_acct); + +/* Refresh conntrack for this many jiffies and do accounting */ +static inline void ip_ct_refresh_acct(struct ip_conntrack *ct, + enum ip_conntrack_info ctinfo, + const struct sk_buff *skb, + unsigned long extra_jiffies) +{ + __ip_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, 1); +} + /* Refresh conntrack for this many jiffies */ -extern void ip_ct_refresh_acct(struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - const struct sk_buff *skb, - unsigned long extra_jiffies); +static inline void ip_ct_refresh(struct ip_conntrack *ct, + const struct sk_buff *skb, + unsigned long extra_jiffies) +{ + __ip_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0); +} /* These are for NAT. Icky. */ /* Update TCP window tracking data when NAT mangles the packet */ diff --git a/include/linux/netfilter_ipv4/ip_conntrack_pptp.h b/include/linux/netfilter_ipv4/ip_conntrack_pptp.h index 389e3851d52f..816144c75de0 100644 --- a/include/linux/netfilter_ipv4/ip_conntrack_pptp.h +++ b/include/linux/netfilter_ipv4/ip_conntrack_pptp.h @@ -60,8 +60,8 @@ struct ip_ct_pptp_expect { struct pptp_pkt_hdr { __u16 packetLength; - __u16 packetType; - __u32 magicCookie; + __be16 packetType; + __be32 magicCookie; }; /* PptpControlMessageType values */ @@ -93,7 +93,7 @@ struct pptp_pkt_hdr { #define PPTP_REMOVE_DEVICE_ERROR 6 struct PptpControlHeader { - __u16 messageType; + __be16 messageType; __u16 reserved; }; @@ -106,13 +106,13 @@ struct PptpControlHeader { #define PPTP_BEARER_CAP_DIGITAL 0x2 struct PptpStartSessionRequest { - __u16 protocolVersion; + __be16 protocolVersion; __u8 reserved1; __u8 reserved2; - __u32 framingCapability; - __u32 bearerCapability; - __u16 maxChannels; - __u16 firmwareRevision; + __be32 framingCapability; + __be32 bearerCapability; + __be16 maxChannels; + __be16 firmwareRevision; __u8 hostName[64]; __u8 vendorString[64]; }; @@ -125,13 +125,13 @@ struct PptpStartSessionRequest { #define PPTP_START_UNKNOWN_PROTOCOL 5 struct PptpStartSessionReply { - __u16 protocolVersion; + __be16 protocolVersion; __u8 resultCode; __u8 generalErrorCode; - __u32 framingCapability; - __u32 bearerCapability; - __u16 maxChannels; - __u16 firmwareRevision; + __be32 framingCapability; + __be32 bearerCapability; + __be16 maxChannels; + __be16 firmwareRevision; __u8 hostName[64]; __u8 vendorString[64]; }; @@ -155,7 +155,7 @@ struct PptpStopSessionReply { }; struct PptpEchoRequest { - __u32 identNumber; + __be32 identNumber; }; /* PptpEchoReplyResultCode */ @@ -163,7 +163,7 @@ struct PptpEchoRequest { #define PPTP_ECHO_GENERAL_ERROR 2 struct PptpEchoReply { - __u32 identNumber; + __be32 identNumber; __u8 resultCode; __u8 generalErrorCode; __u16 reserved; @@ -180,16 +180,16 @@ struct PptpEchoReply { #define PPTP_DONT_CARE_BEARER_TYPE 3 struct PptpOutCallRequest { - __u16 callID; - __u16 callSerialNumber; - __u32 minBPS; - __u32 maxBPS; - __u32 bearerType; - __u32 framingType; - __u16 packetWindow; - __u16 packetProcDelay; + __be16 callID; + __be16 callSerialNumber; + __be32 minBPS; + __be32 maxBPS; + __be32 bearerType; + __be32 framingType; + __be16 packetWindow; + __be16 packetProcDelay; __u16 reserved1; - __u16 phoneNumberLength; + __be16 phoneNumberLength; __u16 reserved2; __u8 phoneNumber[64]; __u8 subAddress[64]; @@ -205,24 +205,24 @@ struct PptpOutCallRequest { #define PPTP_OUTCALL_DONT_ACCEPT 7 struct PptpOutCallReply { - __u16 callID; - __u16 peersCallID; + __be16 callID; + __be16 peersCallID; __u8 resultCode; __u8 generalErrorCode; - __u16 causeCode; - __u32 connectSpeed; - __u16 packetWindow; - __u16 packetProcDelay; - __u32 physChannelID; + __be16 causeCode; + __be32 connectSpeed; + __be16 packetWindow; + __be16 packetProcDelay; + __be32 physChannelID; }; struct PptpInCallRequest { - __u16 callID; - __u16 callSerialNumber; - __u32 callBearerType; - __u32 physChannelID; - __u16 dialedNumberLength; - __u16 dialingNumberLength; + __be16 callID; + __be16 callSerialNumber; + __be32 callBearerType; + __be32 physChannelID; + __be16 dialedNumberLength; + __be16 dialingNumberLength; __u8 dialedNumber[64]; __u8 dialingNumber[64]; __u8 subAddress[64]; @@ -234,61 +234,54 @@ struct PptpInCallRequest { #define PPTP_INCALL_DONT_ACCEPT 3 struct PptpInCallReply { - __u16 callID; - __u16 peersCallID; + __be16 callID; + __be16 peersCallID; __u8 resultCode; __u8 generalErrorCode; - __u16 packetWindow; - __u16 packetProcDelay; + __be16 packetWindow; + __be16 packetProcDelay; __u16 reserved; }; struct PptpInCallConnected { - __u16 peersCallID; + __be16 peersCallID; __u16 reserved; - __u32 connectSpeed; - __u16 packetWindow; - __u16 packetProcDelay; - __u32 callFramingType; + __be32 connectSpeed; + __be16 packetWindow; + __be16 packetProcDelay; + __be32 callFramingType; }; struct PptpClearCallRequest { - __u16 callID; + __be16 callID; __u16 reserved; }; struct PptpCallDisconnectNotify { - __u16 callID; + __be16 callID; __u8 resultCode; __u8 generalErrorCode; - __u16 causeCode; + __be16 causeCode; __u16 reserved; __u8 callStatistics[128]; }; struct PptpWanErrorNotify { - __u16 peersCallID; + __be16 peersCallID; __u16 reserved; - __u32 crcErrors; - __u32 framingErrors; - __u32 hardwareOverRuns; - __u32 bufferOverRuns; - __u32 timeoutErrors; - __u32 alignmentErrors; + __be32 crcErrors; + __be32 framingErrors; + __be32 hardwareOverRuns; + __be32 bufferOverRuns; + __be32 timeoutErrors; + __be32 alignmentErrors; }; struct PptpSetLinkInfo { - __u16 peersCallID; + __be16 peersCallID; __u16 reserved; - __u32 sendAccm; - __u32 recvAccm; -}; - - -struct pptp_priv_data { - __u16 call_id; - __u16 mcall_id; - __u16 pcall_id; + __be32 sendAccm; + __be32 recvAccm; }; union pptp_ctrl_union { diff --git a/include/linux/netfilter_ipv4/ip_conntrack_protocol.h b/include/linux/netfilter_ipv4/ip_conntrack_protocol.h index b6b99be8632a..2c76b879e3dc 100644 --- a/include/linux/netfilter_ipv4/ip_conntrack_protocol.h +++ b/include/linux/netfilter_ipv4/ip_conntrack_protocol.h @@ -52,6 +52,9 @@ struct ip_conntrack_protocol int (*to_nfattr)(struct sk_buff *skb, struct nfattr *nfa, const struct ip_conntrack *ct); + /* convert nfnetlink attributes to protoinfo */ + int (*from_nfattr)(struct nfattr *tb[], struct ip_conntrack *ct); + int (*tuple_to_nfattr)(struct sk_buff *skb, const struct ip_conntrack_tuple *t); int (*nfattr_to_tuple)(struct nfattr *tb[], diff --git a/include/linux/netfilter_ipv4/ip_conntrack_tuple.h b/include/linux/netfilter_ipv4/ip_conntrack_tuple.h index 14dc0f7b6556..3232db11a4e5 100644 --- a/include/linux/netfilter_ipv4/ip_conntrack_tuple.h +++ b/include/linux/netfilter_ipv4/ip_conntrack_tuple.h @@ -1,6 +1,8 @@ #ifndef _IP_CONNTRACK_TUPLE_H #define _IP_CONNTRACK_TUPLE_H +#include + /* A `tuple' is a structure containing the information to uniquely identify a connection. ie. if two packets have the same tuple, they are in the same connection; if not, they are not. @@ -17,7 +19,7 @@ union ip_conntrack_manip_proto u_int16_t all; struct { - u_int16_t port; + __be16 port; } tcp; struct { u_int16_t port; @@ -29,7 +31,7 @@ union ip_conntrack_manip_proto u_int16_t port; } sctp; struct { - u_int16_t key; /* key is 32bit, pptp only uses 16 */ + __be16 key; /* key is 32bit, pptp only uses 16 */ } gre; }; @@ -65,7 +67,7 @@ struct ip_conntrack_tuple u_int16_t port; } sctp; struct { - u_int16_t key; /* key is 32bit, + __be16 key; /* key is 32bit, * pptp only uses 16 */ } gre; } u; diff --git a/include/linux/netfilter_ipv4/ip_nat.h b/include/linux/netfilter_ipv4/ip_nat.h index e201ec6e9905..41a107de17cf 100644 --- a/include/linux/netfilter_ipv4/ip_nat.h +++ b/include/linux/netfilter_ipv4/ip_nat.h @@ -58,10 +58,6 @@ extern rwlock_t ip_nat_lock; struct ip_nat_info { struct list_head bysource; - - /* Helper (NULL if none). */ - struct ip_nat_helper *helper; - struct ip_nat_seq seq[IP_CT_DIR_MAX]; }; diff --git a/include/linux/netfilter_ipv4/ip_nat_core.h b/include/linux/netfilter_ipv4/ip_nat_core.h index 3b50eb91f007..30db23f06b03 100644 --- a/include/linux/netfilter_ipv4/ip_nat_core.h +++ b/include/linux/netfilter_ipv4/ip_nat_core.h @@ -5,16 +5,14 @@ /* This header used to share core functionality between the standalone NAT module, and the compatibility layer's use of NAT for masquerading. */ -extern int ip_nat_init(void); -extern void ip_nat_cleanup(void); -extern unsigned int nat_packet(struct ip_conntrack *ct, +extern unsigned int ip_nat_packet(struct ip_conntrack *ct, enum ip_conntrack_info conntrackinfo, unsigned int hooknum, struct sk_buff **pskb); -extern int icmp_reply_translation(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_nat_manip_type manip, - enum ip_conntrack_dir dir); +extern int ip_nat_icmp_reply_translation(struct sk_buff **pskb, + struct ip_conntrack *ct, + enum ip_nat_manip_type manip, + enum ip_conntrack_dir dir); #endif /* _IP_NAT_CORE_H */ diff --git a/include/linux/netlink.h b/include/linux/netlink.h index bdebdc564506..ba25ca874c20 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -131,7 +131,7 @@ extern struct sock *netlink_kernel_create(int unit, unsigned int groups, void (* extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err); extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock); extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid, - __u32 group, unsigned int __nocast allocation); + __u32 group, gfp_t allocation); extern void netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code); extern int netlink_register_notifier(struct notifier_block *nb); extern int netlink_unregister_notifier(struct notifier_block *nb); diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 5ade54a78dbb..ca5a8733000f 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -86,7 +86,7 @@ static inline void netpoll_poll_unlock(void *have) #else #define netpoll_rx(a) 0 -#define netpoll_poll_lock(a) 0 +#define netpoll_poll_lock(a) NULL #define netpoll_poll_unlock(a) #endif diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 9a6047ff1b25..325fe7ae49bb 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -41,6 +41,10 @@ #define NFS_MAX_FILE_IO_BUFFER_SIZE 32768 #define NFS_DEF_FILE_IO_BUFFER_SIZE 4096 +/* Default timeout values */ +#define NFS_MAX_UDP_TIMEOUT (60*HZ) +#define NFS_MAX_TCP_TIMEOUT (600*HZ) + /* * superblock magic number for NFS */ @@ -137,6 +141,7 @@ struct nfs_inode { unsigned long attrtimeo_timestamp; __u64 change_attr; /* v4 only */ + unsigned long last_updated; /* "Generation counter" for the attribute cache. This is * bumped whenever we update the metadata on the * server. @@ -236,13 +241,17 @@ static inline int nfs_caches_unstable(struct inode *inode) return atomic_read(&NFS_I(inode)->data_updates) != 0; } +static inline void nfs_mark_for_revalidate(struct inode *inode) +{ + spin_lock(&inode->i_lock); + NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS; + spin_unlock(&inode->i_lock); +} + static inline void NFS_CACHEINV(struct inode *inode) { - if (!nfs_caches_unstable(inode)) { - spin_lock(&inode->i_lock); - NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS; - spin_unlock(&inode->i_lock); - } + if (!nfs_caches_unstable(inode)) + nfs_mark_for_revalidate(inode); } static inline int nfs_server_capable(struct inode *inode, int cap) @@ -276,7 +285,7 @@ static inline long nfs_save_change_attribute(struct inode *inode) static inline int nfs_verify_change_attribute(struct inode *inode, unsigned long chattr) { return !nfs_caches_unstable(inode) - && chattr == NFS_I(inode)->cache_change_attribute; + && time_after_eq(chattr, NFS_I(inode)->cache_change_attribute); } /* @@ -286,6 +295,7 @@ extern void nfs_zap_caches(struct inode *); extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *, struct nfs_fattr *); extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); +extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); extern int nfs_permission(struct inode *, int, struct nameidata *); extern int nfs_access_get_cached(struct inode *, struct rpc_cred *, struct nfs_access_entry *); @@ -312,6 +322,12 @@ extern void nfs_file_clear_open_context(struct file *filp); /* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */ extern u32 root_nfs_parse_addr(char *name); /*__init*/ +static inline void nfs_fattr_init(struct nfs_fattr *fattr) +{ + fattr->valid = 0; + fattr->time_start = jiffies; +} + /* * linux/fs/nfs/file.c */ diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index a2bf6914ff1b..40718669b9c8 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -41,7 +41,7 @@ struct nfs_fattr { __u32 bitmap[2]; /* NFSv4 returned attribute bitmap */ __u64 change_attr; /* NFSv4 change attribute */ __u64 pre_change_attr;/* pre-op NFSv4 change attribute */ - unsigned long timestamp; + unsigned long time_start; }; #define NFS_ATTR_WCC 0x0001 /* pre-op WCC data */ @@ -96,12 +96,13 @@ struct nfs4_change_info { u64 after; }; +struct nfs_seqid; /* * Arguments to the open call. */ struct nfs_openargs { const struct nfs_fh * fh; - __u32 seqid; + struct nfs_seqid * seqid; int open_flags; __u64 clientid; __u32 id; @@ -123,6 +124,7 @@ struct nfs_openres { struct nfs4_change_info cinfo; __u32 rflags; struct nfs_fattr * f_attr; + struct nfs_fattr * dir_attr; const struct nfs_server *server; int delegation_type; nfs4_stateid delegation; @@ -136,7 +138,7 @@ struct nfs_openres { struct nfs_open_confirmargs { const struct nfs_fh * fh; nfs4_stateid stateid; - __u32 seqid; + struct nfs_seqid * seqid; }; struct nfs_open_confirmres { @@ -148,13 +150,16 @@ struct nfs_open_confirmres { */ struct nfs_closeargs { struct nfs_fh * fh; - nfs4_stateid stateid; - __u32 seqid; + nfs4_stateid * stateid; + struct nfs_seqid * seqid; int open_flags; + const u32 * bitmask; }; struct nfs_closeres { nfs4_stateid stateid; + struct nfs_fattr * fattr; + const struct nfs_server *server; }; /* * * Arguments to the lock,lockt, and locku call. @@ -164,30 +169,19 @@ struct nfs_lowner { u32 id; }; -struct nfs_open_to_lock { - __u32 open_seqid; - nfs4_stateid open_stateid; - __u32 lock_seqid; - struct nfs_lowner lock_owner; -}; - -struct nfs_exist_lock { - nfs4_stateid stateid; - __u32 seqid; -}; - struct nfs_lock_opargs { + struct nfs_seqid * lock_seqid; + nfs4_stateid * lock_stateid; + struct nfs_seqid * open_seqid; + nfs4_stateid * open_stateid; + struct nfs_lowner lock_owner; __u32 reclaim; __u32 new_lock_owner; - union { - struct nfs_open_to_lock *open_lock; - struct nfs_exist_lock *exist_lock; - } u; }; struct nfs_locku_opargs { - __u32 seqid; - nfs4_stateid stateid; + struct nfs_seqid * seqid; + nfs4_stateid * stateid; }; struct nfs_lockargs { @@ -262,6 +256,7 @@ struct nfs_writeargs { enum nfs3_stable_how stable; unsigned int pgbase; struct page ** pages; + const u32 * bitmask; }; struct nfs_writeverf { @@ -273,6 +268,7 @@ struct nfs_writeres { struct nfs_fattr * fattr; struct nfs_writeverf * verf; __u32 count; + const struct nfs_server *server; }; /* @@ -550,6 +546,7 @@ struct nfs4_create_res { struct nfs_fh * fh; struct nfs_fattr * fattr; struct nfs4_change_info dir_cinfo; + struct nfs_fattr * dir_fattr; }; struct nfs4_fsinfo_arg { @@ -571,8 +568,17 @@ struct nfs4_link_arg { const struct nfs_fh * fh; const struct nfs_fh * dir_fh; const struct qstr * name; + const u32 * bitmask; }; +struct nfs4_link_res { + const struct nfs_server * server; + struct nfs_fattr * fattr; + struct nfs4_change_info cinfo; + struct nfs_fattr * dir_attr; +}; + + struct nfs4_lookup_arg { const struct nfs_fh * dir_fh; const struct qstr * name; @@ -619,6 +625,13 @@ struct nfs4_readlink { struct nfs4_remove_arg { const struct nfs_fh * fh; const struct qstr * name; + const u32 * bitmask; +}; + +struct nfs4_remove_res { + const struct nfs_server * server; + struct nfs4_change_info cinfo; + struct nfs_fattr * dir_attr; }; struct nfs4_rename_arg { @@ -626,11 +639,15 @@ struct nfs4_rename_arg { const struct nfs_fh * new_dir; const struct qstr * old_name; const struct qstr * new_name; + const u32 * bitmask; }; struct nfs4_rename_res { + const struct nfs_server * server; struct nfs4_change_info old_cinfo; + struct nfs_fattr * old_fattr; struct nfs4_change_info new_cinfo; + struct nfs_fattr * new_fattr; }; struct nfs4_setclientid { @@ -722,7 +739,7 @@ struct nfs_rpc_ops { int (*write) (struct nfs_write_data *); int (*commit) (struct nfs_write_data *); int (*create) (struct inode *, struct dentry *, - struct iattr *, int); + struct iattr *, int, struct nameidata *); int (*remove) (struct inode *, struct qstr *); int (*unlink_setup) (struct rpc_message *, struct dentry *, struct qstr *); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index d9a25647a295..ba6c310a055f 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -19,18 +19,19 @@ #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ -static inline unsigned int __nocast mapping_gfp_mask(struct address_space * mapping) +static inline gfp_t mapping_gfp_mask(struct address_space * mapping) { - return mapping->flags & __GFP_BITS_MASK; + return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; } /* * This is non-atomic. Only to be used before the mapping is activated. * Probably needs a barrier... */ -static inline void mapping_set_gfp_mask(struct address_space *m, int mask) +static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) { - m->flags = (m->flags & ~__GFP_BITS_MASK) | mask; + m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) | + (__force unsigned long)mask; } /* @@ -69,7 +70,7 @@ extern struct page * find_lock_page(struct address_space *mapping, extern struct page * find_trylock_page(struct address_space *mapping, unsigned long index); extern struct page * find_or_create_page(struct address_space *mapping, - unsigned long index, unsigned int gfp_mask); + unsigned long index, gfp_t gfp_mask); unsigned find_get_pages(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages); unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, @@ -92,9 +93,9 @@ extern int read_cache_pages(struct address_space *mapping, struct list_head *pages, filler_t *filler, void *data); int add_to_page_cache(struct page *page, struct address_space *mapping, - unsigned long index, int gfp_mask); + unsigned long index, gfp_t gfp_mask); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, - unsigned long index, int gfp_mask); + unsigned long index, gfp_t gfp_mask); extern void remove_from_page_cache(struct page *page); extern void __remove_from_page_cache(struct page *page); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index c49d28eca561..71834f05504f 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -185,6 +185,7 @@ #define PCI_DEVICE_ID_LSI_61C102 0x0901 #define PCI_DEVICE_ID_LSI_63C815 0x1000 #define PCI_DEVICE_ID_LSI_SAS1064 0x0050 +#define PCI_DEVICE_ID_LSI_SAS1064R 0x0411 #define PCI_DEVICE_ID_LSI_SAS1066 0x005E #define PCI_DEVICE_ID_LSI_SAS1068 0x0054 #define PCI_DEVICE_ID_LSI_SAS1064A 0x005C @@ -392,6 +393,7 @@ #define PCI_DEVICE_ID_NS_87560_USB 0x0012 #define PCI_DEVICE_ID_NS_83815 0x0020 #define PCI_DEVICE_ID_NS_83820 0x0022 +#define PCI_DEVICE_ID_NS_SATURN 0x0035 #define PCI_DEVICE_ID_NS_SCx200_BRIDGE 0x0500 #define PCI_DEVICE_ID_NS_SCx200_SMI 0x0501 #define PCI_DEVICE_ID_NS_SCx200_IDE 0x0502 @@ -559,6 +561,7 @@ #define PCI_VENDOR_ID_DELL 0x1028 #define PCI_DEVICE_ID_DELL_RACIII 0x0008 #define PCI_DEVICE_ID_DELL_RAC4 0x0012 +#define PCI_DEVICE_ID_DELL_PERC5 0x0015 #define PCI_VENDOR_ID_MATROX 0x102B #define PCI_DEVICE_ID_MATROX_MGA_2 0x0518 @@ -720,6 +723,7 @@ #define PCI_DEVICE_ID_HP_DIVA_EVEREST 0x1282 #define PCI_DEVICE_ID_HP_DIVA_AUX 0x1290 #define PCI_DEVICE_ID_HP_DIVA_RMP3 0x1301 +#define PCI_DEVICE_ID_HP_DIVA_HURRICANE 0x132a #define PCI_DEVICE_ID_HP_CISS 0x3210 #define PCI_DEVICE_ID_HP_CISSA 0x3220 #define PCI_DEVICE_ID_HP_CISSB 0x3222 @@ -769,6 +773,8 @@ #define PCI_DEVICE_ID_TI_TVP4010 0x3d04 #define PCI_DEVICE_ID_TI_TVP4020 0x3d07 #define PCI_DEVICE_ID_TI_4450 0x8011 +#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031 +#define PCI_DEVICE_ID_TI_X515 0x8036 #define PCI_DEVICE_ID_TI_1130 0xac12 #define PCI_DEVICE_ID_TI_1031 0xac13 #define PCI_DEVICE_ID_TI_1131 0xac15 @@ -785,12 +791,17 @@ #define PCI_DEVICE_ID_TI_4451 0xac42 #define PCI_DEVICE_ID_TI_4510 0xac44 #define PCI_DEVICE_ID_TI_4520 0xac46 +#define PCI_DEVICE_ID_TI_7510 0xac47 +#define PCI_DEVICE_ID_TI_7610 0xac48 +#define PCI_DEVICE_ID_TI_7410 0xac49 #define PCI_DEVICE_ID_TI_1410 0xac50 #define PCI_DEVICE_ID_TI_1420 0xac51 #define PCI_DEVICE_ID_TI_1451A 0xac52 #define PCI_DEVICE_ID_TI_1620 0xac54 #define PCI_DEVICE_ID_TI_1520 0xac55 #define PCI_DEVICE_ID_TI_1510 0xac56 +#define PCI_DEVICE_ID_TI_X620 0xac8d +#define PCI_DEVICE_ID_TI_X420 0xac8e #define PCI_VENDOR_ID_SONY 0x104d #define PCI_DEVICE_ID_SONY_CXD3222 0x8039 @@ -976,6 +987,7 @@ #define PCI_DEVICE_ID_SUN_SABRE 0xa000 #define PCI_DEVICE_ID_SUN_HUMMINGBIRD 0xa001 #define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801 +#define PCI_DEVICE_ID_SUN_CASSINI 0xabba #define PCI_VENDOR_ID_CMD 0x1095 #define PCI_DEVICE_ID_CMD_640 0x0640 @@ -1268,7 +1280,8 @@ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA 0x0266 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2 0x0267 #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E -#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x036F +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E +#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2 0x037F #define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268 #define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269 #define PCI_DEVICE_ID_NVIDIA_MCP51_AUDIO 0x026B @@ -2186,7 +2199,12 @@ #define PCI_DEVICE_ID_ENE_1211 0x1211 #define PCI_DEVICE_ID_ENE_1225 0x1225 #define PCI_DEVICE_ID_ENE_1410 0x1410 +#define PCI_DEVICE_ID_ENE_710 0x1411 +#define PCI_DEVICE_ID_ENE_712 0x1412 #define PCI_DEVICE_ID_ENE_1420 0x1420 +#define PCI_DEVICE_ID_ENE_720 0x1421 +#define PCI_DEVICE_ID_ENE_722 0x1422 + #define PCI_VENDOR_ID_CHELSIO 0x1425 #define PCI_VENDOR_ID_MIPS 0x153f @@ -2679,6 +2697,7 @@ #define PCI_SUBVENDOR_ID_EXSYS 0xd84d #define PCI_SUBDEVICE_ID_EXSYS_4014 0x4014 +#define PCI_SUBDEVICE_ID_EXSYS_4055 0x4055 #define PCI_VENDOR_ID_TIGERJET 0xe159 #define PCI_DEVICE_ID_TIGERJET_300 0x0001 diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index 4caedddaa033..4bc241290c24 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h @@ -71,11 +71,11 @@ posix_acl_release(struct posix_acl *acl) /* posix_acl.c */ -extern struct posix_acl *posix_acl_alloc(int, unsigned int __nocast); -extern struct posix_acl *posix_acl_clone(const struct posix_acl *, unsigned int __nocast); +extern struct posix_acl *posix_acl_alloc(int, gfp_t); +extern struct posix_acl *posix_acl_clone(const struct posix_acl *, gfp_t); extern int posix_acl_valid(const struct posix_acl *); extern int posix_acl_permission(struct inode *, const struct posix_acl *, int); -extern struct posix_acl *posix_acl_from_mode(mode_t, unsigned int __nocast); +extern struct posix_acl *posix_acl_from_mode(mode_t, gfp_t); extern int posix_acl_equiv_mode(const struct posix_acl *, mode_t *); extern int posix_acl_create_masq(struct posix_acl *, mode_t *); extern int posix_acl_chmod_masq(struct posix_acl *, mode_t); diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 9c51917b1cce..9f0f9281f42a 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -24,7 +24,7 @@ struct radix_tree_root { unsigned int height; - unsigned int gfp_mask; + gfp_t gfp_mask; struct radix_tree_node *rnode; }; @@ -50,7 +50,7 @@ void *radix_tree_delete(struct radix_tree_root *, unsigned long); unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, void **results, unsigned long first_index, unsigned int max_items); -int radix_tree_preload(unsigned int __nocast gfp_mask); +int radix_tree_preload(gfp_t gfp_mask); void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *root, unsigned long index, int tag); diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 4e65eb44adfd..70191a5a148f 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -94,6 +94,7 @@ struct rcu_data { long batch; /* Batch # for current RCU batch */ struct rcu_head *nxtlist; struct rcu_head **nxttail; + long count; /* # of queued items */ struct rcu_head *curlist; struct rcu_head **curtail; struct rcu_head *donelist; diff --git a/include/linux/reboot.h b/include/linux/reboot.h index 3b3266ff1a95..7ab2cdb83ef0 100644 --- a/include/linux/reboot.h +++ b/include/linux/reboot.h @@ -59,6 +59,10 @@ extern void machine_crash_shutdown(struct pt_regs *); * Architecture independent implemenations of sys_reboot commands. */ +extern void kernel_restart_prepare(char *cmd); +extern void kernel_halt_prepare(void); +extern void kernel_power_off_prepare(void); + extern void kernel_restart(char *cmd); extern void kernel_halt(void); extern void kernel_power_off(void); diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index af00b10294cd..001ab82df051 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -1972,7 +1972,7 @@ extern struct address_space_operations reiserfs_address_space_operations; /* fix_nodes.c */ #ifdef CONFIG_REISERFS_CHECK -void *reiserfs_kmalloc(size_t size, int flags, struct super_block *s); +void *reiserfs_kmalloc(size_t size, gfp_t flags, struct super_block *s); void reiserfs_kfree(const void *vp, size_t size, struct super_block *s); #else static inline void *reiserfs_kmalloc(size_t size, int flags, diff --git a/include/linux/sched.h b/include/linux/sched.h index 49e617fa0f66..27519df0f987 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -107,13 +107,25 @@ extern unsigned long nr_iowait(void); #include +/* + * Task state bitmask. NOTE! These bits are also + * encoded in fs/proc/array.c: get_task_state(). + * + * We have two separate sets of flags: task->state + * is about runnability, while task->exit_state are + * about the task exiting. Confusing, but this way + * modifying one set can't modify the other one by + * mistake. + */ #define TASK_RUNNING 0 #define TASK_INTERRUPTIBLE 1 #define TASK_UNINTERRUPTIBLE 2 #define TASK_STOPPED 4 #define TASK_TRACED 8 +/* in tsk->exit_state */ #define EXIT_ZOMBIE 16 #define EXIT_DEAD 32 +/* in tsk->state again */ #define TASK_NONINTERACTIVE 64 #define __set_task_state(tsk, state_value) \ @@ -1006,6 +1018,7 @@ extern int force_sig_info(int, struct siginfo *, struct task_struct *); extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp); extern int kill_pg_info(int, struct siginfo *, pid_t); extern int kill_proc_info(int, struct siginfo *, pid_t); +extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t); extern void do_notify_parent(struct task_struct *, int); extern void force_sig(int, struct task_struct *); extern void force_sig_specific(int, struct task_struct *); diff --git a/include/linux/sdladrv.h b/include/linux/sdladrv.h index 78f634007fc6..c85e103d5e7b 100644 --- a/include/linux/sdladrv.h +++ b/include/linux/sdladrv.h @@ -52,12 +52,8 @@ typedef struct sdlahw extern int sdla_setup (sdlahw_t* hw, void* sfm, unsigned len); extern int sdla_down (sdlahw_t* hw); -extern int sdla_inten (sdlahw_t* hw); -extern int sdla_intde (sdlahw_t* hw); -extern int sdla_intack (sdlahw_t* hw); extern void S514_intack (sdlahw_t* hw, u32 int_status); extern void read_S514_int_stat (sdlahw_t* hw, u32* int_status); -extern int sdla_intr (sdlahw_t* hw); extern int sdla_mapmem (sdlahw_t* hw, unsigned long addr); extern int sdla_peek (sdlahw_t* hw, unsigned long addr, void* buf, unsigned len); diff --git a/include/linux/security.h b/include/linux/security.h index 0e43460d374e..dac956ed98f0 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1210,7 +1210,7 @@ struct security_operations { int (*socket_shutdown) (struct socket * sock, int how); int (*socket_sock_rcv_skb) (struct sock * sk, struct sk_buff * skb); int (*socket_getpeersec) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len); - int (*sk_alloc_security) (struct sock *sk, int family, int priority); + int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority); void (*sk_free_security) (struct sock *sk); #endif /* CONFIG_SECURITY_NETWORK */ }; @@ -2634,8 +2634,7 @@ static inline int security_socket_getpeersec(struct socket *sock, char __user *o return security_ops->socket_getpeersec(sock, optval, optlen, len); } -static inline int security_sk_alloc(struct sock *sk, int family, - unsigned int __nocast priority) +static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority) { return security_ops->sk_alloc_security(sk, family, priority); } @@ -2752,8 +2751,7 @@ static inline int security_socket_getpeersec(struct socket *sock, char __user *o return -ENOPROTOOPT; } -static inline int security_sk_alloc(struct sock *sk, int family, - unsigned int __nocast priority) +static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority) { return 0; } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2741c0c55e83..b756935da9c8 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -155,8 +155,6 @@ struct skb_shared_info { #define SKB_DATAREF_SHIFT 16 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) -extern struct timeval skb_tv_base; - struct skb_timeval { u32 off_sec; u32 off_usec; @@ -173,9 +171,8 @@ enum { * struct sk_buff - socket buffer * @next: Next buffer in list * @prev: Previous buffer in list - * @list: List we are on * @sk: Socket we are owned by - * @tstamp: Time we arrived stored as offset to skb_tv_base + * @tstamp: Time we arrived * @dev: Device we arrived on/are leaving by * @input_dev: Device we arrived on * @h: Transport layer header @@ -192,6 +189,7 @@ enum { * @cloned: Head may be cloned (check refcnt to be sure) * @nohdr: Payload reference only, must not modify header * @pkt_type: Packet class + * @fclone: skbuff clone status * @ip_summed: Driver fed us an IP checksum * @priority: Packet queueing priority * @users: User count - see {datagram,tcp}.c @@ -204,6 +202,7 @@ enum { * @destructor: Destruct function * @nfmark: Can be used for communication between hooks * @nfct: Associated connection, if any + * @ipvs_property: skbuff is owned by ipvs * @nfctinfo: Relationship of this skb to the connection * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c * @tc_index: Traffic control index @@ -304,37 +303,37 @@ struct sk_buff { extern void __kfree_skb(struct sk_buff *skb); extern struct sk_buff *__alloc_skb(unsigned int size, - unsigned int __nocast priority, int fclone); + gfp_t priority, int fclone); static inline struct sk_buff *alloc_skb(unsigned int size, - unsigned int __nocast priority) + gfp_t priority) { return __alloc_skb(size, priority, 0); } static inline struct sk_buff *alloc_skb_fclone(unsigned int size, - unsigned int __nocast priority) + gfp_t priority) { return __alloc_skb(size, priority, 1); } extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, unsigned int size, - unsigned int __nocast priority); + gfp_t priority); extern void kfree_skbmem(struct sk_buff *skb); extern struct sk_buff *skb_clone(struct sk_buff *skb, - unsigned int __nocast priority); + gfp_t priority); extern struct sk_buff *skb_copy(const struct sk_buff *skb, - unsigned int __nocast priority); + gfp_t priority); extern struct sk_buff *pskb_copy(struct sk_buff *skb, - unsigned int __nocast gfp_mask); + gfp_t gfp_mask); extern int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, - unsigned int __nocast gfp_mask); + gfp_t gfp_mask); extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom); extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, - unsigned int __nocast priority); + gfp_t priority); extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad); #define dev_kfree_skb(a) kfree_skb(a) extern void skb_over_panic(struct sk_buff *skb, int len, @@ -486,7 +485,7 @@ static inline int skb_shared(const struct sk_buff *skb) * NULL is returned on a memory allocation failure. */ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, - unsigned int __nocast pri) + gfp_t pri) { might_sleep_if(pri & __GFP_WAIT); if (skb_shared(skb)) { @@ -518,7 +517,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, * %NULL is returned on a memory allocation failure. */ static inline struct sk_buff *skb_unshare(struct sk_buff *skb, - unsigned int __nocast pri) + gfp_t pri) { might_sleep_if(pri & __GFP_WAIT); if (skb_cloned(skb)) { @@ -1019,7 +1018,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) * %NULL is returned in there is no free memory. */ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, - unsigned int __nocast gfp_mask) + gfp_t gfp_mask) { struct sk_buff *skb = alloc_skb(length + 16, gfp_mask); if (likely(skb)) @@ -1132,8 +1131,8 @@ static inline int skb_can_coalesce(struct sk_buff *skb, int i, * If there is no free memory -ENOMEM is returned, otherwise zero * is returned and the old skb data released. */ -extern int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp); -static inline int skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp) +extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp); +static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp) { return __skb_linearize(skb, gfp); } @@ -1255,10 +1254,6 @@ static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval * { stamp->tv_sec = skb->tstamp.off_sec; stamp->tv_usec = skb->tstamp.off_usec; - if (skb->tstamp.off_sec) { - stamp->tv_sec += skb_tv_base.tv_sec; - stamp->tv_usec += skb_tv_base.tv_usec; - } } /** @@ -1272,8 +1267,8 @@ static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval * */ static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp) { - skb->tstamp.off_sec = stamp->tv_sec - skb_tv_base.tv_sec; - skb->tstamp.off_usec = stamp->tv_usec - skb_tv_base.tv_usec; + skb->tstamp.off_sec = stamp->tv_sec; + skb->tstamp.off_usec = stamp->tv_usec; } extern void __net_timestamp(struct sk_buff *skb); diff --git a/include/linux/slab.h b/include/linux/slab.h index 1f356f3bbc64..09b9aa60063d 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -61,11 +61,11 @@ extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned lo void (*)(void *, kmem_cache_t *, unsigned long)); extern int kmem_cache_destroy(kmem_cache_t *); extern int kmem_cache_shrink(kmem_cache_t *); -extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast); +extern void *kmem_cache_alloc(kmem_cache_t *, gfp_t); extern void kmem_cache_free(kmem_cache_t *, void *); extern unsigned int kmem_cache_size(kmem_cache_t *); extern const char *kmem_cache_name(kmem_cache_t *); -extern kmem_cache_t *kmem_find_general_cachep(size_t size, unsigned int __nocast gfpflags); +extern kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags); /* Size description struct for general caches. */ struct cache_sizes { @@ -74,9 +74,9 @@ struct cache_sizes { kmem_cache_t *cs_dmacachep; }; extern struct cache_sizes malloc_sizes[]; -extern void *__kmalloc(size_t, unsigned int __nocast); +extern void *__kmalloc(size_t, gfp_t); -static inline void *kmalloc(size_t size, unsigned int __nocast flags) +static inline void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { int i = 0; @@ -99,7 +99,7 @@ found: return __kmalloc(size, flags); } -extern void *kzalloc(size_t, unsigned int __nocast); +extern void *kzalloc(size_t, gfp_t); /** * kcalloc - allocate memory for an array. The memory is set to zero. @@ -107,7 +107,7 @@ extern void *kzalloc(size_t, unsigned int __nocast); * @size: element size. * @flags: the type of memory to allocate. */ -static inline void *kcalloc(size_t n, size_t size, unsigned int __nocast flags) +static inline void *kcalloc(size_t n, size_t size, gfp_t flags) { if (n != 0 && size > INT_MAX / n) return NULL; @@ -118,15 +118,14 @@ extern void kfree(const void *); extern unsigned int ksize(const void *); #ifdef CONFIG_NUMA -extern void *kmem_cache_alloc_node(kmem_cache_t *, - unsigned int __nocast flags, int node); -extern void *kmalloc_node(size_t size, unsigned int __nocast flags, int node); +extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node); +extern void *kmalloc_node(size_t size, gfp_t flags, int node); #else -static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node) +static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node) { return kmem_cache_alloc(cachep, flags); } -static inline void *kmalloc_node(size_t size, unsigned int __nocast flags, int node) +static inline void *kmalloc_node(size_t size, gfp_t flags, int node) { return kmalloc(size, flags); } diff --git a/include/linux/string.h b/include/linux/string.h index dab2652acbd8..369be3264a55 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -88,7 +88,7 @@ extern int memcmp(const void *,const void *,__kernel_size_t); extern void * memchr(const void *,int,__kernel_size_t); #endif -extern char *kstrdup(const char *s, unsigned int __nocast gfp); +extern char *kstrdup(const char *s, gfp_t gfp); #ifdef __cplusplus } diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 04ebc24db348..b68c11a2d6dd 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -66,7 +66,12 @@ struct rpc_cred_cache { struct rpc_auth { unsigned int au_cslack; /* call cred size estimate */ - unsigned int au_rslack; /* reply verf size guess */ + /* guess at number of u32's auth adds before + * reply data; normally the verifier size: */ + unsigned int au_rslack; + /* for gss, used to calculate au_rslack: */ + unsigned int au_verfsize; + unsigned int au_flags; /* various flags */ struct rpc_authops * au_ops; /* operations */ rpc_authflavor_t au_flavor; /* pseudoflavor (note may diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index eadb31e3c198..1a42d902bc11 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h @@ -32,6 +32,7 @@ #define RPCDBG_AUTH 0x0010 #define RPCDBG_PMAP 0x0020 #define RPCDBG_SCHED 0x0040 +#define RPCDBG_TRANS 0x0080 #define RPCDBG_SVCSOCK 0x0100 #define RPCDBG_SVCDSP 0x0200 #define RPCDBG_MISC 0x0400 @@ -94,6 +95,8 @@ enum { CTL_NLMDEBUG, CTL_SLOTTABLE_UDP, CTL_SLOTTABLE_TCP, + CTL_MIN_RESVPORT, + CTL_MAX_RESVPORT, }; #endif /* _LINUX_SUNRPC_DEBUG_H_ */ diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h index 689262f63059..9b8bcf125c18 100644 --- a/include/linux/sunrpc/gss_api.h +++ b/include/linux/sunrpc/gss_api.h @@ -40,14 +40,21 @@ int gss_import_sec_context( struct gss_ctx **ctx_id); u32 gss_get_mic( struct gss_ctx *ctx_id, - u32 qop, struct xdr_buf *message, struct xdr_netobj *mic_token); u32 gss_verify_mic( struct gss_ctx *ctx_id, struct xdr_buf *message, - struct xdr_netobj *mic_token, - u32 *qstate); + struct xdr_netobj *mic_token); +u32 gss_wrap( + struct gss_ctx *ctx_id, + int offset, + struct xdr_buf *outbuf, + struct page **inpages); +u32 gss_unwrap( + struct gss_ctx *ctx_id, + int offset, + struct xdr_buf *inbuf); u32 gss_delete_sec_context( struct gss_ctx **ctx_id); @@ -56,7 +63,6 @@ char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service); struct pf_desc { u32 pseudoflavor; - u32 qop; u32 service; char *name; char *auth_domain_name; @@ -85,14 +91,21 @@ struct gss_api_ops { struct gss_ctx *ctx_id); u32 (*gss_get_mic)( struct gss_ctx *ctx_id, - u32 qop, struct xdr_buf *message, struct xdr_netobj *mic_token); u32 (*gss_verify_mic)( struct gss_ctx *ctx_id, struct xdr_buf *message, - struct xdr_netobj *mic_token, - u32 *qstate); + struct xdr_netobj *mic_token); + u32 (*gss_wrap)( + struct gss_ctx *ctx_id, + int offset, + struct xdr_buf *outbuf, + struct page **inpages); + u32 (*gss_unwrap)( + struct gss_ctx *ctx_id, + int offset, + struct xdr_buf *buf); void (*gss_delete_sec_context)( void *internal_ctx_id); }; diff --git a/include/linux/sunrpc/gss_err.h b/include/linux/sunrpc/gss_err.h index 92608a2e574c..a6807867bd21 100644 --- a/include/linux/sunrpc/gss_err.h +++ b/include/linux/sunrpc/gss_err.h @@ -65,16 +65,6 @@ typedef unsigned int OM_uint32; #define GSS_C_MECH_CODE 2 -/* - * Define the default Quality of Protection for per-message services. Note - * that an implementation that offers multiple levels of QOP may either reserve - * a value (for example zero, as assumed here) to mean "default protection", or - * alternatively may simply equate GSS_C_QOP_DEFAULT to a specific explicit - * QOP value. However a value of 0 should always be interpreted by a GSSAPI - * implementation as a request for the default protection level. - */ -#define GSS_C_QOP_DEFAULT 0 - /* * Expiration time of 2^32-1 seconds means infinite lifetime for a * credential or security context diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h index ffe31d2eb9ec..2c3601d31045 100644 --- a/include/linux/sunrpc/gss_krb5.h +++ b/include/linux/sunrpc/gss_krb5.h @@ -116,18 +116,22 @@ enum seal_alg { s32 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, - struct xdr_netobj *cksum); + int body_offset, struct xdr_netobj *cksum); + +u32 gss_get_mic_kerberos(struct gss_ctx *, struct xdr_buf *, + struct xdr_netobj *); + +u32 gss_verify_mic_kerberos(struct gss_ctx *, struct xdr_buf *, + struct xdr_netobj *); u32 -krb5_make_token(struct krb5_ctx *context_handle, int qop_req, - struct xdr_buf *input_message_buffer, - struct xdr_netobj *output_message_buffer, int toktype); +gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset, + struct xdr_buf *outbuf, struct page **pages); u32 -krb5_read_token(struct krb5_ctx *context_handle, - struct xdr_netobj *input_token_buffer, - struct xdr_buf *message_buffer, - int *qop_state, int toktype); +gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, + struct xdr_buf *buf); + u32 krb5_encrypt(struct crypto_tfm * key, @@ -137,6 +141,13 @@ u32 krb5_decrypt(struct crypto_tfm * key, void *iv, void *in, void *out, int length); +int +gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *outbuf, int offset, + struct page **pages); + +int +gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *inbuf, int offset); + s32 krb5_make_seq_num(struct crypto_tfm * key, int direction, diff --git a/include/linux/sunrpc/gss_spkm3.h b/include/linux/sunrpc/gss_spkm3.h index b5c9968c3c17..0beb2cf00a84 100644 --- a/include/linux/sunrpc/gss_spkm3.h +++ b/include/linux/sunrpc/gss_spkm3.h @@ -41,9 +41,9 @@ struct spkm3_ctx { #define SPKM_WRAP_TOK 5 #define SPKM_DEL_TOK 6 -u32 spkm3_make_token(struct spkm3_ctx *ctx, int qop_req, struct xdr_buf * text, struct xdr_netobj * token, int toktype); +u32 spkm3_make_token(struct spkm3_ctx *ctx, struct xdr_buf * text, struct xdr_netobj * token, int toktype); -u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, struct xdr_buf *message_buffer, int *qop_state, int toktype); +u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, struct xdr_buf *message_buffer, int toktype); #define CKSUMTYPE_RSA_MD5 0x0007 diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h index 15f115332389..f43f237360ae 100644 --- a/include/linux/sunrpc/msg_prot.h +++ b/include/linux/sunrpc/msg_prot.h @@ -76,5 +76,30 @@ enum rpc_auth_stat { #define RPC_MAXNETNAMELEN 256 +/* + * From RFC 1831: + * + * "A record is composed of one or more record fragments. A record + * fragment is a four-byte header followed by 0 to (2**31) - 1 bytes of + * fragment data. The bytes encode an unsigned binary number; as with + * XDR integers, the byte order is from highest to lowest. The number + * encodes two values -- a boolean which indicates whether the fragment + * is the last fragment of the record (bit value 1 implies the fragment + * is the last fragment) and a 31-bit unsigned binary value which is the + * length in bytes of the fragment's data. The boolean value is the + * highest-order bit of the header; the length is the 31 low-order bits. + * (Note that this record specification is NOT in XDR standard form!)" + * + * The Linux RPC client always sends its requests in a single record + * fragment, limiting the maximum payload size for stream transports to + * 2GB. + */ + +typedef u32 rpc_fraghdr; + +#define RPC_LAST_STREAM_FRAGMENT (1U << 31) +#define RPC_FRAGMENT_SIZE_MASK (~RPC_LAST_STREAM_FRAGMENT) +#define RPC_MAX_FRAGMENT_SIZE ((1U << 31) - 1) + #endif /* __KERNEL__ */ #endif /* _LINUX_SUNRPC_MSGPROT_H_ */ diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 23448d0fb5bc..5da968729cf8 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -161,14 +161,10 @@ typedef struct { typedef size_t (*skb_read_actor_t)(skb_reader_t *desc, void *to, size_t len); +extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *); extern ssize_t xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int, skb_reader_t *, skb_read_actor_t); -struct socket; -struct sockaddr; -extern int xdr_sendpages(struct socket *, struct sockaddr *, int, - struct xdr_buf *, unsigned int, int); - extern int xdr_encode_word(struct xdr_buf *, int, u32); extern int xdr_decode_word(struct xdr_buf *, int, u32 *); diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index e618c1649814..3b8b6e823c70 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -1,5 +1,5 @@ /* - * linux/include/linux/sunrpc/clnt_xprt.h + * linux/include/linux/sunrpc/xprt.h * * Declarations for the RPC transport interface. * @@ -15,20 +15,6 @@ #include #include -/* - * The transport code maintains an estimate on the maximum number of out- - * standing RPC requests, using a smoothed version of the congestion - * avoidance implemented in 44BSD. This is basically the Van Jacobson - * congestion algorithm: If a retransmit occurs, the congestion window is - * halved; otherwise, it is incremented by 1/cwnd when - * - * - a reply is received and - * - a full number of requests are outstanding and - * - the congestion window hasn't been updated recently. - * - * Upper procedures may check whether a request would block waiting for - * a free RPC slot by using the RPC_CONGESTED() macro. - */ extern unsigned int xprt_udp_slot_table_entries; extern unsigned int xprt_tcp_slot_table_entries; @@ -36,35 +22,24 @@ extern unsigned int xprt_tcp_slot_table_entries; #define RPC_DEF_SLOT_TABLE (16U) #define RPC_MAX_SLOT_TABLE (128U) -#define RPC_CWNDSHIFT (8U) -#define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT) -#define RPC_INITCWND RPC_CWNDSCALE -#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) -#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) - -/* Default timeout values */ -#define RPC_MAX_UDP_TIMEOUT (60*HZ) -#define RPC_MAX_TCP_TIMEOUT (600*HZ) - /* - * Wait duration for an RPC TCP connection to be established. Solaris - * NFS over TCP uses 60 seconds, for example, which is in line with how - * long a server takes to reboot. - */ -#define RPC_CONNECT_TIMEOUT (60*HZ) - -/* - * Delay an arbitrary number of seconds before attempting to reconnect - * after an error. - */ -#define RPC_REESTABLISH_TIMEOUT (15*HZ) - -/* RPC call and reply header size as number of 32bit words (verifier + * RPC call and reply header size as number of 32bit words (verifier * size computed separately) */ #define RPC_CALLHDRSIZE 6 #define RPC_REPHDRSIZE 4 +/* + * Parameters for choosing a free port + */ +extern unsigned int xprt_min_resvport; +extern unsigned int xprt_max_resvport; + +#define RPC_MIN_RESVPORT (1U) +#define RPC_MAX_RESVPORT (65535U) +#define RPC_DEF_MIN_RESVPORT (650U) +#define RPC_DEF_MAX_RESVPORT (1023U) + /* * This describes a timeout strategy */ @@ -76,6 +51,9 @@ struct rpc_timeout { unsigned char to_exponential; }; +struct rpc_task; +struct rpc_xprt; + /* * This describes a complete RPC request */ @@ -95,7 +73,10 @@ struct rpc_rqst { int rq_cong; /* has incremented xprt->cong */ int rq_received; /* receive completed */ u32 rq_seqno; /* gss seq no. used on req. */ - + int rq_enc_pages_num; + struct page **rq_enc_pages; /* scratch pages for use by + gss privacy code */ + void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */ struct list_head rq_list; struct xdr_buf rq_private_buf; /* The receive buffer @@ -121,12 +102,21 @@ struct rpc_rqst { #define rq_svec rq_snd_buf.head #define rq_slen rq_snd_buf.len -#define XPRT_LAST_FRAG (1 << 0) -#define XPRT_COPY_RECM (1 << 1) -#define XPRT_COPY_XID (1 << 2) -#define XPRT_COPY_DATA (1 << 3) +struct rpc_xprt_ops { + void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize); + int (*reserve_xprt)(struct rpc_task *task); + void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); + void (*connect)(struct rpc_task *task); + int (*send_request)(struct rpc_task *task); + void (*set_retrans_timeout)(struct rpc_task *task); + void (*timer)(struct rpc_task *task); + void (*release_request)(struct rpc_task *task); + void (*close)(struct rpc_xprt *xprt); + void (*destroy)(struct rpc_xprt *xprt); +}; struct rpc_xprt { + struct rpc_xprt_ops * ops; /* transport methods */ struct socket * sock; /* BSD socket layer */ struct sock * inet; /* INET layer */ @@ -137,11 +127,13 @@ struct rpc_xprt { unsigned long cong; /* current congestion */ unsigned long cwnd; /* congestion window */ - unsigned int rcvsize, /* socket receive buffer size */ - sndsize; /* socket send buffer size */ + size_t rcvsize, /* transport rcv buffer size */ + sndsize; /* transport send buffer size */ size_t max_payload; /* largest RPC payload size, in bytes */ + unsigned int tsh_size; /* size of transport specific + header */ struct rpc_wait_queue sending; /* requests waiting to send */ struct rpc_wait_queue resend; /* requests waiting to resend */ @@ -150,11 +142,9 @@ struct rpc_xprt { struct list_head free; /* free slots */ struct rpc_rqst * slot; /* slot table storage */ unsigned int max_reqs; /* total slots */ - unsigned long sockstate; /* Socket state */ + unsigned long state; /* transport state */ unsigned char shutdown : 1, /* being shut down */ - nocong : 1, /* no congestion control */ - resvport : 1, /* use a reserved port */ - stream : 1; /* TCP */ + resvport : 1; /* use a reserved port */ /* * XID @@ -171,22 +161,27 @@ struct rpc_xprt { unsigned long tcp_copied, /* copied to request */ tcp_flags; /* - * Connection of sockets + * Connection of transports */ - struct work_struct sock_connect; + unsigned long connect_timeout, + bind_timeout, + reestablish_timeout; + struct work_struct connect_worker; unsigned short port; + /* - * Disconnection of idle sockets + * Disconnection of idle transports */ struct work_struct task_cleanup; struct timer_list timer; - unsigned long last_used; + unsigned long last_used, + idle_timeout; /* * Send stuff */ - spinlock_t sock_lock; /* lock socket info */ - spinlock_t xprt_lock; /* lock xprt info */ + spinlock_t transport_lock; /* lock transport info */ + spinlock_t reserve_lock; /* lock slot table */ struct rpc_task * snd_task; /* Task blocked in send */ struct list_head recv; @@ -195,37 +190,111 @@ struct rpc_xprt { void (*old_data_ready)(struct sock *, int); void (*old_state_change)(struct sock *); void (*old_write_space)(struct sock *); - - wait_queue_head_t cong_wait; }; +#define XPRT_LAST_FRAG (1 << 0) +#define XPRT_COPY_RECM (1 << 1) +#define XPRT_COPY_XID (1 << 2) +#define XPRT_COPY_DATA (1 << 3) + #ifdef __KERNEL__ -struct rpc_xprt * xprt_create_proto(int proto, struct sockaddr_in *addr, - struct rpc_timeout *toparms); -int xprt_destroy(struct rpc_xprt *); -void xprt_set_timeout(struct rpc_timeout *, unsigned int, - unsigned long); +/* + * Transport operations used by ULPs + */ +struct rpc_xprt * xprt_create_proto(int proto, struct sockaddr_in *addr, struct rpc_timeout *to); +void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr); -void xprt_reserve(struct rpc_task *); -int xprt_prepare_transmit(struct rpc_task *); -void xprt_transmit(struct rpc_task *); -void xprt_receive(struct rpc_task *); +/* + * Generic internal transport functions + */ +void xprt_connect(struct rpc_task *task); +void xprt_reserve(struct rpc_task *task); +int xprt_reserve_xprt(struct rpc_task *task); +int xprt_reserve_xprt_cong(struct rpc_task *task); +int xprt_prepare_transmit(struct rpc_task *task); +void xprt_transmit(struct rpc_task *task); +void xprt_abort_transmit(struct rpc_task *task); int xprt_adjust_timeout(struct rpc_rqst *req); -void xprt_release(struct rpc_task *); -void xprt_connect(struct rpc_task *); -void xprt_sock_setbufsize(struct rpc_xprt *); +void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task); +void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); +void xprt_release(struct rpc_task *task); +int xprt_destroy(struct rpc_xprt *xprt); -#define XPRT_LOCKED 0 -#define XPRT_CONNECT 1 -#define XPRT_CONNECTING 2 +static inline u32 *xprt_skip_transport_header(struct rpc_xprt *xprt, u32 *p) +{ + return p + xprt->tsh_size; +} -#define xprt_connected(xp) (test_bit(XPRT_CONNECT, &(xp)->sockstate)) -#define xprt_set_connected(xp) (set_bit(XPRT_CONNECT, &(xp)->sockstate)) -#define xprt_test_and_set_connected(xp) (test_and_set_bit(XPRT_CONNECT, &(xp)->sockstate)) -#define xprt_test_and_clear_connected(xp) \ - (test_and_clear_bit(XPRT_CONNECT, &(xp)->sockstate)) -#define xprt_clear_connected(xp) (clear_bit(XPRT_CONNECT, &(xp)->sockstate)) +/* + * Transport switch helper functions + */ +void xprt_set_retrans_timeout_def(struct rpc_task *task); +void xprt_set_retrans_timeout_rtt(struct rpc_task *task); +void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status); +void xprt_wait_for_buffer_space(struct rpc_task *task); +void xprt_write_space(struct rpc_xprt *xprt); +void xprt_update_rtt(struct rpc_task *task); +void xprt_adjust_cwnd(struct rpc_task *task, int result); +struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid); +void xprt_complete_rqst(struct rpc_task *task, int copied); +void xprt_release_rqst_cong(struct rpc_task *task); +void xprt_disconnect(struct rpc_xprt *xprt); + +/* + * Socket transport setup operations + */ +int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to); +int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to); + +/* + * Reserved bit positions in xprt->state + */ +#define XPRT_LOCKED (0) +#define XPRT_CONNECTED (1) +#define XPRT_CONNECTING (2) + +static inline void xprt_set_connected(struct rpc_xprt *xprt) +{ + set_bit(XPRT_CONNECTED, &xprt->state); +} + +static inline void xprt_clear_connected(struct rpc_xprt *xprt) +{ + clear_bit(XPRT_CONNECTED, &xprt->state); +} + +static inline int xprt_connected(struct rpc_xprt *xprt) +{ + return test_bit(XPRT_CONNECTED, &xprt->state); +} + +static inline int xprt_test_and_set_connected(struct rpc_xprt *xprt) +{ + return test_and_set_bit(XPRT_CONNECTED, &xprt->state); +} + +static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt) +{ + return test_and_clear_bit(XPRT_CONNECTED, &xprt->state); +} + +static inline void xprt_clear_connecting(struct rpc_xprt *xprt) +{ + smp_mb__before_clear_bit(); + clear_bit(XPRT_CONNECTING, &xprt->state); + smp_mb__after_clear_bit(); +} + +static inline int xprt_connecting(struct rpc_xprt *xprt) +{ + return test_bit(XPRT_CONNECTING, &xprt->state); +} + +static inline int xprt_test_and_set_connecting(struct rpc_xprt *xprt) +{ + return test_and_set_bit(XPRT_CONNECTING, &xprt->state); +} #endif /* __KERNEL__*/ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index f2e96fdfaae0..ba448c760168 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -71,5 +71,7 @@ void restore_processor_state(void); struct saved_context; void __save_processor_state(struct saved_context *ctxt); void __restore_processor_state(struct saved_context *ctxt); +extern unsigned long get_usable_page(gfp_t gfp_mask); +extern void free_eaten_memory(void); #endif /* _LINUX_SWSUSP_H */ diff --git a/include/linux/swap.h b/include/linux/swap.h index 3c9ff0048153..20c975642cab 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -147,7 +147,7 @@ struct swap_list_t { #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages) /* linux/mm/oom_kill.c */ -extern void out_of_memory(unsigned int __nocast gfp_mask, int order); +extern void out_of_memory(gfp_t gfp_mask, int order); /* linux/mm/memory.c */ extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *); @@ -171,8 +171,8 @@ extern int rotate_reclaimable_page(struct page *page); extern void swap_setup(void); /* linux/mm/vmscan.c */ -extern int try_to_free_pages(struct zone **, unsigned int); -extern int zone_reclaim(struct zone *, unsigned int, unsigned int); +extern int try_to_free_pages(struct zone **, gfp_t); +extern int zone_reclaim(struct zone *, gfp_t, unsigned int); extern int shrink_all_memory(int); extern int vm_swappiness; diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 3a29a9f9b451..fc8e367f671e 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -202,7 +202,8 @@ enum NET_TR=14, NET_DECNET=15, NET_ECONET=16, - NET_SCTP=17, + NET_SCTP=17, + NET_LLC=18, }; /* /proc/sys/kernel/random */ @@ -522,6 +523,29 @@ enum { NET_IPX_FORWARDING=2 }; +/* /proc/sys/net/llc */ +enum { + NET_LLC2=1, + NET_LLC_STATION=2, +}; + +/* /proc/sys/net/llc/llc2 */ +enum { + NET_LLC2_TIMEOUT=1, +}; + +/* /proc/sys/net/llc/station */ +enum { + NET_LLC_STATION_ACK_TIMEOUT=1, +}; + +/* /proc/sys/net/llc/llc2/timeout */ +enum { + NET_LLC2_ACK_TIMEOUT=1, + NET_LLC2_P_TIMEOUT=2, + NET_LLC2_REJ_TIMEOUT=3, + NET_LLC2_BUSY_TIMEOUT=4, +}; /* /proc/sys/net/appletalk */ enum { diff --git a/include/linux/tc_ematch/tc_em_meta.h b/include/linux/tc_ematch/tc_em_meta.h index 081b1ee8516e..e21937cf91d0 100644 --- a/include/linux/tc_ematch/tc_em_meta.h +++ b/include/linux/tc_ematch/tc_em_meta.h @@ -71,7 +71,7 @@ enum TCF_META_ID_SK_SNDBUF, TCF_META_ID_SK_ALLOCS, TCF_META_ID_SK_ROUTE_CAPS, - TCF_META_ID_SK_HASHENT, + TCF_META_ID_SK_HASH, TCF_META_ID_SK_LINGERTIME, TCF_META_ID_SK_ACK_BACKLOG, TCF_META_ID_SK_MAX_ACK_BACKLOG, diff --git a/include/linux/textsearch.h b/include/linux/textsearch.h index 941f45ac117a..fc5bb4e91a58 100644 --- a/include/linux/textsearch.h +++ b/include/linux/textsearch.h @@ -40,7 +40,7 @@ struct ts_state struct ts_ops { const char *name; - struct ts_config * (*init)(const void *, unsigned int, int); + struct ts_config * (*init)(const void *, unsigned int, gfp_t); unsigned int (*find)(struct ts_config *, struct ts_state *); void (*destroy)(struct ts_config *); @@ -148,7 +148,7 @@ static inline unsigned int textsearch_get_pattern_len(struct ts_config *conf) extern int textsearch_register(struct ts_ops *); extern int textsearch_unregister(struct ts_ops *); extern struct ts_config *textsearch_prepare(const char *, const void *, - unsigned int, int, int); + unsigned int, gfp_t, int); extern void textsearch_destroy(struct ts_config *conf); extern unsigned int textsearch_find_continuous(struct ts_config *, struct ts_state *, @@ -158,7 +158,8 @@ extern unsigned int textsearch_find_continuous(struct ts_config *, #define TS_PRIV_ALIGNTO 8 #define TS_PRIV_ALIGN(len) (((len) + TS_PRIV_ALIGNTO-1) & ~(TS_PRIV_ALIGNTO-1)) -static inline struct ts_config *alloc_ts_config(size_t payload, int gfp_mask) +static inline struct ts_config *alloc_ts_config(size_t payload, + gfp_t gfp_mask) { struct ts_config *conf; diff --git a/include/linux/types.h b/include/linux/types.h index 2b678c22ca4a..21b9ce803644 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -151,7 +151,12 @@ typedef unsigned long sector_t; */ #ifdef __CHECKER__ -#define __bitwise __attribute__((bitwise)) +#define __bitwise__ __attribute__((bitwise)) +#else +#define __bitwise__ +#endif +#ifdef __CHECK_ENDIAN__ +#define __bitwise __bitwise__ #else #define __bitwise #endif @@ -165,6 +170,10 @@ typedef __u64 __bitwise __le64; typedef __u64 __bitwise __be64; #endif +#ifdef __KERNEL__ +typedef unsigned __bitwise__ gfp_t; +#endif + struct ustat { __kernel_daddr_t f_tfree; __kernel_ino_t f_tinode; diff --git a/include/linux/usb.h b/include/linux/usb.h index 4dbe580f9335..8f731e8f2821 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -933,17 +933,17 @@ static inline void usb_fill_int_urb (struct urb *urb, } extern void usb_init_urb(struct urb *urb); -extern struct urb *usb_alloc_urb(int iso_packets, unsigned mem_flags); +extern struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags); extern void usb_free_urb(struct urb *urb); #define usb_put_urb usb_free_urb extern struct urb *usb_get_urb(struct urb *urb); -extern int usb_submit_urb(struct urb *urb, unsigned mem_flags); +extern int usb_submit_urb(struct urb *urb, gfp_t mem_flags); extern int usb_unlink_urb(struct urb *urb); extern void usb_kill_urb(struct urb *urb); #define HAVE_USB_BUFFERS void *usb_buffer_alloc (struct usb_device *dev, size_t size, - unsigned mem_flags, dma_addr_t *dma); + gfp_t mem_flags, dma_addr_t *dma); void usb_buffer_free (struct usb_device *dev, size_t size, void *addr, dma_addr_t dma); @@ -1050,7 +1050,7 @@ int usb_sg_init ( struct scatterlist *sg, int nents, size_t length, - unsigned mem_flags + gfp_t mem_flags ); void usb_sg_cancel (struct usb_sg_request *io); void usb_sg_wait (struct usb_sg_request *io); diff --git a/include/linux/usb_gadget.h b/include/linux/usb_gadget.h index 71e608607324..ff81117eb733 100644 --- a/include/linux/usb_gadget.h +++ b/include/linux/usb_gadget.h @@ -107,18 +107,18 @@ struct usb_ep_ops { int (*disable) (struct usb_ep *ep); struct usb_request *(*alloc_request) (struct usb_ep *ep, - unsigned gfp_flags); + gfp_t gfp_flags); void (*free_request) (struct usb_ep *ep, struct usb_request *req); void *(*alloc_buffer) (struct usb_ep *ep, unsigned bytes, - dma_addr_t *dma, unsigned gfp_flags); + dma_addr_t *dma, gfp_t gfp_flags); void (*free_buffer) (struct usb_ep *ep, void *buf, dma_addr_t dma, unsigned bytes); // NOTE: on 2.6, drivers may also use dma_map() and // dma_sync_single_*() to directly manage dma overhead. int (*queue) (struct usb_ep *ep, struct usb_request *req, - unsigned gfp_flags); + gfp_t gfp_flags); int (*dequeue) (struct usb_ep *ep, struct usb_request *req); int (*set_halt) (struct usb_ep *ep, int value); @@ -214,7 +214,7 @@ usb_ep_disable (struct usb_ep *ep) * Returns the request, or null if one could not be allocated. */ static inline struct usb_request * -usb_ep_alloc_request (struct usb_ep *ep, unsigned gfp_flags) +usb_ep_alloc_request (struct usb_ep *ep, gfp_t gfp_flags) { return ep->ops->alloc_request (ep, gfp_flags); } @@ -254,7 +254,7 @@ usb_ep_free_request (struct usb_ep *ep, struct usb_request *req) */ static inline void * usb_ep_alloc_buffer (struct usb_ep *ep, unsigned len, dma_addr_t *dma, - unsigned gfp_flags) + gfp_t gfp_flags) { return ep->ops->alloc_buffer (ep, len, dma, gfp_flags); } @@ -330,7 +330,7 @@ usb_ep_free_buffer (struct usb_ep *ep, void *buf, dma_addr_t dma, unsigned len) * reported when the usb peripheral is disconnected. */ static inline int -usb_ep_queue (struct usb_ep *ep, struct usb_request *req, unsigned gfp_flags) +usb_ep_queue (struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { return ep->ops->queue (ep, req, gfp_flags); } diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index b244f69ef682..3701a0673d2c 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -34,8 +34,8 @@ struct vm_struct { extern void *vmalloc(unsigned long size); extern void *vmalloc_exec(unsigned long size); extern void *vmalloc_32(unsigned long size); -extern void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t prot); -extern void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgprot_t prot); +extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); +extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot); extern void vfree(void *addr); extern void *vmap(struct page **pages, unsigned int count, diff --git a/include/linux/wanpipe.h b/include/linux/wanpipe.h index 167d956c492b..dae9860091dd 100644 --- a/include/linux/wanpipe.h +++ b/include/linux/wanpipe.h @@ -265,15 +265,6 @@ typedef struct { #include #include - -#define is_digit(ch) (((ch)>=(unsigned)'0'&&(ch)<=(unsigned)'9')?1:0) -#define is_alpha(ch) ((((ch)>=(unsigned)'a'&&(ch)<=(unsigned)'z')||\ - ((ch)>=(unsigned)'A'&&(ch)<=(unsigned)'Z'))?1:0) -#define is_hex_digit(ch) ((((ch)>=(unsigned)'0'&&(ch)<=(unsigned)'9')||\ - ((ch)>=(unsigned)'a'&&(ch)<=(unsigned)'f')||\ - ((ch)>=(unsigned)'A'&&(ch)<=(unsigned)'F'))?1:0) - - /****** Data Structures *****************************************************/ /* Adapter Data Space. diff --git a/include/net/ax25.h b/include/net/ax25.h index 9dbcd9e51c00..30bb4a893237 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -171,7 +171,7 @@ typedef struct { ax25_address calls[AX25_MAX_DIGIS]; unsigned char repeated[AX25_MAX_DIGIS]; unsigned char ndigi; - char lastrepeat; + signed char lastrepeat; } ax25_digi; typedef struct ax25_route { diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 6dfa4a61ffd0..210458624840 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -136,7 +136,7 @@ struct bt_skb_cb { }; #define bt_cb(skb) ((struct bt_skb_cb *)(skb->cb)) -static inline struct sk_buff *bt_skb_alloc(unsigned int len, unsigned int __nocast how) +static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how) { struct sk_buff *skb; diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h index ffea9d54071f..fbe557f7ea1d 100644 --- a/include/net/bluetooth/rfcomm.h +++ b/include/net/bluetooth/rfcomm.h @@ -230,7 +230,7 @@ int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci, u8 xon_char, u8 xoff_char, u16 param_mask); /* ---- RFCOMM DLCs (channels) ---- */ -struct rfcomm_dlc *rfcomm_dlc_alloc(unsigned int __nocast prio); +struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio); void rfcomm_dlc_free(struct rfcomm_dlc *d); int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel); int rfcomm_dlc_close(struct rfcomm_dlc *d, int reason); diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h index 6bbeafa73e8b..1ba03be0af3a 100644 --- a/include/net/dn_nsp.h +++ b/include/net/dn_nsp.h @@ -19,9 +19,9 @@ extern void dn_nsp_send_data_ack(struct sock *sk); extern void dn_nsp_send_oth_ack(struct sock *sk); extern void dn_nsp_delayed_ack(struct sock *sk); extern void dn_send_conn_ack(struct sock *sk); -extern void dn_send_conn_conf(struct sock *sk, int gfp); +extern void dn_send_conn_conf(struct sock *sk, gfp_t gfp); extern void dn_nsp_send_disc(struct sock *sk, unsigned char type, - unsigned short reason, int gfp); + unsigned short reason, gfp_t gfp); extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type, unsigned short reason); extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval); @@ -29,14 +29,14 @@ extern void dn_nsp_send_conninit(struct sock *sk, unsigned char flags); extern void dn_nsp_output(struct sock *sk); extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum); -extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, int gfp, int oob); +extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp, int oob); extern unsigned long dn_nsp_persist(struct sock *sk); extern int dn_nsp_xmit_timeout(struct sock *sk); extern int dn_nsp_rx(struct sk_buff *); extern int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb); -extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, int pri); +extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri); extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err); #define NSP_REASON_OK 0 /* No error */ diff --git a/include/net/dn_route.h b/include/net/dn_route.h index d084721db198..5122da3f2eb3 100644 --- a/include/net/dn_route.h +++ b/include/net/dn_route.h @@ -15,7 +15,7 @@ GNU General Public License for more details. *******************************************************************************/ -extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, int pri); +extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri); extern int dn_route_output_sock(struct dst_entry **pprt, struct flowi *, struct sock *sk, int flags); extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb); extern int dn_cache_getroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); diff --git a/include/net/dst.h b/include/net/dst.h index 4a056a682435..6c196a5baf24 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -94,7 +94,6 @@ struct dst_ops struct dst_entry * (*negative_advice)(struct dst_entry *); void (*link_failure)(struct sk_buff *); void (*update_pmtu)(struct dst_entry *dst, u32 mtu); - int (*get_mss)(struct dst_entry *dst, u32 mtu); int entry_size; atomic_t entries; diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h index dc36b1be6745..5e38dca1d082 100644 --- a/include/net/ieee80211.h +++ b/include/net/ieee80211.h @@ -11,19 +11,26 @@ * * Adaption to a generic IEEE 802.11 stack by James Ketrenos * - * Copyright (c) 2004, Intel Corporation + * Copyright (c) 2004-2005, Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. + * + * API Version History + * 1.0.x -- Initial version + * 1.1.x -- Added radiotap, QoS, TIM, ieee80211_geo APIs, + * various structure changes, and crypto API init method */ #ifndef IEEE80211_H #define IEEE80211_H -#include /* ETH_ALEN */ -#include /* ARRAY_SIZE */ +#include /* ETH_ALEN */ +#include /* ARRAY_SIZE */ #include +#define IEEE80211_VERSION "git-1.1.6" + #define IEEE80211_DATA_LEN 2304 /* Maximum size for the MA-UNITDATA primitive, 802.11 standard section 6.2.1.1.2. @@ -33,34 +40,13 @@ represents the 2304 bytes of real data, plus a possible 8 bytes of WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */ - -#define IEEE80211_HLEN 30 -#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) - -struct ieee80211_hdr { - __le16 frame_ctl; - __le16 duration_id; - u8 addr1[ETH_ALEN]; - u8 addr2[ETH_ALEN]; - u8 addr3[ETH_ALEN]; - __le16 seq_ctl; - u8 addr4[ETH_ALEN]; -} __attribute__ ((packed)); - -struct ieee80211_hdr_3addr { - __le16 frame_ctl; - __le16 duration_id; - u8 addr1[ETH_ALEN]; - u8 addr2[ETH_ALEN]; - u8 addr3[ETH_ALEN]; - __le16 seq_ctl; -} __attribute__ ((packed)); - #define IEEE80211_1ADDR_LEN 10 #define IEEE80211_2ADDR_LEN 16 #define IEEE80211_3ADDR_LEN 24 #define IEEE80211_4ADDR_LEN 30 #define IEEE80211_FCS_LEN 4 +#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) +#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) #define MIN_FRAG_THRESHOLD 256U #define MAX_FRAG_THRESHOLD 2346U @@ -113,11 +99,11 @@ struct ieee80211_hdr_3addr { #define IEEE80211_STYPE_CFACK 0x0050 #define IEEE80211_STYPE_CFPOLL 0x0060 #define IEEE80211_STYPE_CFACKPOLL 0x0070 +#define IEEE80211_STYPE_QOS_DATA 0x0080 #define IEEE80211_SCTL_FRAG 0x000F #define IEEE80211_SCTL_SEQ 0xFFF0 - /* debug macros */ #ifdef CONFIG_IEEE80211_DEBUG @@ -128,8 +114,7 @@ do { if (ieee80211_debug_level & (level)) \ in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) #else #define IEEE80211_DEBUG(level, fmt, args...) do {} while (0) -#endif /* CONFIG_IEEE80211_DEBUG */ - +#endif /* CONFIG_IEEE80211_DEBUG */ /* debug macros not dependent on CONFIG_IEEE80211_DEBUG */ @@ -140,7 +125,6 @@ do { if (ieee80211_debug_level & (level)) \ * messages. It should never be used for passing essid to user space. */ const char *escape_essid(const char *essid, u8 essid_len); - /* * To use the debug system: * @@ -177,6 +161,7 @@ const char *escape_essid(const char *essid, u8 essid_len); #define IEEE80211_DL_TX (1<<8) #define IEEE80211_DL_RX (1<<9) +#define IEEE80211_DL_QOS (1<<31) #define IEEE80211_ERROR(f, a...) printk(KERN_ERR "ieee80211: " f, ## a) #define IEEE80211_WARNING(f, a...) printk(KERN_WARNING "ieee80211: " f, ## a) @@ -190,9 +175,10 @@ const char *escape_essid(const char *essid, u8 essid_len); #define IEEE80211_DEBUG_DROP(f, a...) IEEE80211_DEBUG(IEEE80211_DL_DROP, f, ## a) #define IEEE80211_DEBUG_TX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_TX, f, ## a) #define IEEE80211_DEBUG_RX(f, a...) IEEE80211_DEBUG(IEEE80211_DL_RX, f, ## a) +#define IEEE80211_DEBUG_QOS(f, a...) IEEE80211_DEBUG(IEEE80211_DL_QOS, f, ## a) #include #include -#include /* ARPHRD_ETHER */ +#include /* ARPHRD_ETHER */ #ifndef WIRELESS_SPY #define WIRELESS_SPY /* enable iwspy support */ @@ -200,10 +186,10 @@ const char *escape_essid(const char *essid, u8 essid_len); #include /* new driver API */ #ifndef ETH_P_PAE -#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ -#endif /* ETH_P_PAE */ +#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ +#endif /* ETH_P_PAE */ -#define ETH_P_PREAUTH 0x88C7 /* IEEE 802.11i pre-authentication */ +#define ETH_P_PREAUTH 0x88C7 /* IEEE 802.11i pre-authentication */ #ifndef ETH_P_80211_RAW #define ETH_P_80211_RAW (ETH_P_ECONET + 1) @@ -215,10 +201,10 @@ const char *escape_essid(const char *essid, u8 essid_len); struct ieee80211_snap_hdr { - u8 dsap; /* always 0xAA */ - u8 ssap; /* always 0xAA */ - u8 ctrl; /* always 0x03 */ - u8 oui[P80211_OUI_LEN]; /* organizational universal id */ + u8 dsap; /* always 0xAA */ + u8 ssap; /* always 0xAA */ + u8 ctrl; /* always 0x03 */ + u8 oui[P80211_OUI_LEN]; /* organizational universal id */ } __attribute__ ((packed)); @@ -246,8 +232,9 @@ struct ieee80211_snap_hdr { #define WLAN_CAPABILITY_PBCC (1<<6) #define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7) #define WLAN_CAPABILITY_SPECTRUM_MGMT (1<<8) +#define WLAN_CAPABILITY_QOS (1<<9) #define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10) -#define WLAN_CAPABILITY_OSSS_OFDM (1<<13) +#define WLAN_CAPABILITY_DSSS_OFDM (1<<13) /* Status codes */ enum ieee80211_statuscode { @@ -312,14 +299,12 @@ enum ieee80211_reasoncode { WLAN_REASON_CIPHER_SUITE_REJECTED = 24, }; - #define IEEE80211_STATMASK_SIGNAL (1<<0) #define IEEE80211_STATMASK_RSSI (1<<1) #define IEEE80211_STATMASK_NOISE (1<<2) #define IEEE80211_STATMASK_RATE (1<<3) #define IEEE80211_STATMASK_WEMASK 0x7 - #define IEEE80211_CCK_MODULATION (1<<0) #define IEEE80211_OFDM_MODULATION (1<<1) @@ -377,9 +362,6 @@ enum ieee80211_reasoncode { #define IEEE80211_NUM_CCK_RATES 4 #define IEEE80211_OFDM_SHIFT_MASK_A 4 - - - /* NOTE: This data is for statistical purposes; not all hardware provides this * information for frames received. Not setting these will not cause * any adverse affects. */ @@ -388,7 +370,7 @@ struct ieee80211_rx_stats { s8 rssi; u8 signal; u8 noise; - u16 rate; /* in 100 kbps */ + u16 rate; /* in 100 kbps */ u8 received_channel; u8 control; u8 mask; @@ -439,38 +421,44 @@ struct ieee80211_device; #include "ieee80211_crypt.h" -#define SEC_KEY_1 (1<<0) -#define SEC_KEY_2 (1<<1) -#define SEC_KEY_3 (1<<2) -#define SEC_KEY_4 (1<<3) -#define SEC_ACTIVE_KEY (1<<4) -#define SEC_AUTH_MODE (1<<5) -#define SEC_UNICAST_GROUP (1<<6) -#define SEC_LEVEL (1<<7) -#define SEC_ENABLED (1<<8) +#define SEC_KEY_1 (1<<0) +#define SEC_KEY_2 (1<<1) +#define SEC_KEY_3 (1<<2) +#define SEC_KEY_4 (1<<3) +#define SEC_ACTIVE_KEY (1<<4) +#define SEC_AUTH_MODE (1<<5) +#define SEC_UNICAST_GROUP (1<<6) +#define SEC_LEVEL (1<<7) +#define SEC_ENABLED (1<<8) +#define SEC_ENCRYPT (1<<9) -#define SEC_LEVEL_0 0 /* None */ -#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */ -#define SEC_LEVEL_2 2 /* Level 1 + TKIP */ -#define SEC_LEVEL_2_CKIP 3 /* Level 1 + CKIP */ -#define SEC_LEVEL_3 4 /* Level 2 + CCMP */ +#define SEC_LEVEL_0 0 /* None */ +#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */ +#define SEC_LEVEL_2 2 /* Level 1 + TKIP */ +#define SEC_LEVEL_2_CKIP 3 /* Level 1 + CKIP */ +#define SEC_LEVEL_3 4 /* Level 2 + CCMP */ -#define WEP_KEYS 4 -#define WEP_KEY_LEN 13 +#define SEC_ALG_NONE 0 +#define SEC_ALG_WEP 1 +#define SEC_ALG_TKIP 2 +#define SEC_ALG_CCMP 3 + +#define WEP_KEYS 4 +#define WEP_KEY_LEN 13 +#define SCM_KEY_LEN 32 +#define SCM_TEMPORAL_KEY_LENGTH 16 struct ieee80211_security { u16 active_key:2, - enabled:1, - auth_mode:2, - auth_algo:4, - unicast_uses_group:1; + enabled:1, + auth_mode:2, auth_algo:4, unicast_uses_group:1, encrypt:1; + u8 encode_alg[WEP_KEYS]; u8 key_sizes[WEP_KEYS]; - u8 keys[WEP_KEYS][WEP_KEY_LEN]; + u8 keys[WEP_KEYS][SCM_KEY_LEN]; u8 level; u16 flags; } __attribute__ ((packed)); - /* 802.11 data frame from AP @@ -494,7 +482,7 @@ enum ieee80211_mfie { MFIE_TYPE_RATES = 1, MFIE_TYPE_FH_SET = 2, MFIE_TYPE_DS_SET = 3, - MFIE_TYPE_CF_SET = 4, + MFIE_TYPE_CF_SET = 4, MFIE_TYPE_TIM = 5, MFIE_TYPE_IBSS_SET = 6, MFIE_TYPE_COUNTRY = 7, @@ -516,11 +504,75 @@ enum ieee80211_mfie { MFIE_TYPE_RSN = 48, MFIE_TYPE_RATES_EX = 50, MFIE_TYPE_GENERIC = 221, + MFIE_TYPE_QOS_PARAMETER = 222, }; -struct ieee80211_info_element_hdr { - u8 id; - u8 len; +/* Minimal header; can be used for passing 802.11 frames with sufficient + * information to determine what type of underlying data type is actually + * stored in the data. */ +struct ieee80211_hdr { + __le16 frame_ctl; + __le16 duration_id; + u8 payload[0]; +} __attribute__ ((packed)); + +struct ieee80211_hdr_1addr { + __le16 frame_ctl; + __le16 duration_id; + u8 addr1[ETH_ALEN]; + u8 payload[0]; +} __attribute__ ((packed)); + +struct ieee80211_hdr_2addr { + __le16 frame_ctl; + __le16 duration_id; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 payload[0]; +} __attribute__ ((packed)); + +struct ieee80211_hdr_3addr { + __le16 frame_ctl; + __le16 duration_id; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + __le16 seq_ctl; + u8 payload[0]; +} __attribute__ ((packed)); + +struct ieee80211_hdr_4addr { + __le16 frame_ctl; + __le16 duration_id; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + __le16 seq_ctl; + u8 addr4[ETH_ALEN]; + u8 payload[0]; +} __attribute__ ((packed)); + +struct ieee80211_hdr_3addrqos { + __le16 frame_ctl; + __le16 duration_id; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + __le16 seq_ctl; + u8 payload[0]; + __le16 qos_ctl; +} __attribute__ ((packed)); + +struct ieee80211_hdr_4addrqos { + __le16 frame_ctl; + __le16 duration_id; + u8 addr1[ETH_ALEN]; + u8 addr2[ETH_ALEN]; + u8 addr3[ETH_ALEN]; + __le16 seq_ctl; + u8 addr4[ETH_ALEN]; + u8 payload[0]; + __le16 qos_ctl; } __attribute__ ((packed)); struct ieee80211_info_element { @@ -546,49 +598,77 @@ struct ieee80211_info_element { u16 status; */ -struct ieee80211_authentication { +struct ieee80211_auth { struct ieee80211_hdr_3addr header; __le16 algorithm; __le16 transaction; __le16 status; - struct ieee80211_info_element info_element; + /* challenge */ + struct ieee80211_info_element info_element[0]; } __attribute__ ((packed)); +struct ieee80211_disassoc { + struct ieee80211_hdr_3addr header; + __le16 reason; +} __attribute__ ((packed)); + +/* Alias deauth for disassoc */ +#define ieee80211_deauth ieee80211_disassoc + +struct ieee80211_probe_request { + struct ieee80211_hdr_3addr header; + /* SSID, supported rates */ + struct ieee80211_info_element info_element[0]; +} __attribute__ ((packed)); struct ieee80211_probe_response { struct ieee80211_hdr_3addr header; u32 time_stamp[2]; __le16 beacon_interval; __le16 capability; - struct ieee80211_info_element info_element; + /* SSID, supported rates, FH params, DS params, + * CF params, IBSS params, TIM (if beacon), RSN */ + struct ieee80211_info_element info_element[0]; } __attribute__ ((packed)); -struct ieee80211_assoc_request_frame { +/* Alias beacon for probe_response */ +#define ieee80211_beacon ieee80211_probe_response + +struct ieee80211_assoc_request { + struct ieee80211_hdr_3addr header; + __le16 capability; + __le16 listen_interval; + /* SSID, supported rates, RSN */ + struct ieee80211_info_element info_element[0]; +} __attribute__ ((packed)); + +struct ieee80211_reassoc_request { + struct ieee80211_hdr_3addr header; __le16 capability; __le16 listen_interval; u8 current_ap[ETH_ALEN]; - struct ieee80211_info_element info_element; + struct ieee80211_info_element info_element[0]; } __attribute__ ((packed)); -struct ieee80211_assoc_response_frame { +struct ieee80211_assoc_response { struct ieee80211_hdr_3addr header; __le16 capability; __le16 status; __le16 aid; - struct ieee80211_info_element info_element; /* supported rates */ + /* supported rates */ + struct ieee80211_info_element info_element[0]; } __attribute__ ((packed)); - struct ieee80211_txb { u8 nr_frags; u8 encrypted; - u16 reserved; - u16 frag_size; - u16 payload_size; + u8 rts_included; + u8 reserved; + __le16 frag_size; + __le16 payload_size; struct sk_buff *fragments[0]; }; - /* SWEEP TABLE ENTRIES NUMBER */ #define MAX_SWEEP_TAB_ENTRIES 42 #define MAX_SWEEP_TAB_ENTRIES_PER_PACKET 7 @@ -604,9 +684,68 @@ struct ieee80211_txb { #define MAX_WPA_IE_LEN 64 -#define NETWORK_EMPTY_ESSID (1<<0) -#define NETWORK_HAS_OFDM (1<<1) -#define NETWORK_HAS_CCK (1<<2) +#define NETWORK_EMPTY_ESSID (1<<0) +#define NETWORK_HAS_OFDM (1<<1) +#define NETWORK_HAS_CCK (1<<2) + +/* QoS structure */ +#define NETWORK_HAS_QOS_PARAMETERS (1<<3) +#define NETWORK_HAS_QOS_INFORMATION (1<<4) +#define NETWORK_HAS_QOS_MASK (NETWORK_HAS_QOS_PARAMETERS | NETWORK_HAS_QOS_INFORMATION) + +#define QOS_QUEUE_NUM 4 +#define QOS_OUI_LEN 3 +#define QOS_OUI_TYPE 2 +#define QOS_ELEMENT_ID 221 +#define QOS_OUI_INFO_SUB_TYPE 0 +#define QOS_OUI_PARAM_SUB_TYPE 1 +#define QOS_VERSION_1 1 +#define QOS_AIFSN_MIN_VALUE 2 + +struct ieee80211_qos_information_element { + u8 elementID; + u8 length; + u8 qui[QOS_OUI_LEN]; + u8 qui_type; + u8 qui_subtype; + u8 version; + u8 ac_info; +} __attribute__ ((packed)); + +struct ieee80211_qos_ac_parameter { + u8 aci_aifsn; + u8 ecw_min_max; + __le16 tx_op_limit; +} __attribute__ ((packed)); + +struct ieee80211_qos_parameter_info { + struct ieee80211_qos_information_element info_element; + u8 reserved; + struct ieee80211_qos_ac_parameter ac_params_record[QOS_QUEUE_NUM]; +} __attribute__ ((packed)); + +struct ieee80211_qos_parameters { + __le16 cw_min[QOS_QUEUE_NUM]; + __le16 cw_max[QOS_QUEUE_NUM]; + u8 aifs[QOS_QUEUE_NUM]; + u8 flag[QOS_QUEUE_NUM]; + __le16 tx_op_limit[QOS_QUEUE_NUM]; +} __attribute__ ((packed)); + +struct ieee80211_qos_data { + struct ieee80211_qos_parameters parameters; + int active; + int supported; + u8 param_count; + u8 old_param_count; +}; + +struct ieee80211_tim_parameters { + u8 tim_count; + u8 tim_period; +} __attribute__ ((packed)); + +/*******************************************************/ struct ieee80211_network { /* These entries are used to identify a unique network */ @@ -616,6 +755,8 @@ struct ieee80211_network { u8 ssid[IW_ESSID_MAX_SIZE + 1]; u8 ssid_len; + struct ieee80211_qos_data qos_data; + /* These are network statistics */ struct ieee80211_rx_stats stats; u16 capability; @@ -631,10 +772,12 @@ struct ieee80211_network { u16 beacon_interval; u16 listen_interval; u16 atim_window; + u8 erp_value; u8 wpa_ie[MAX_WPA_IE_LEN]; size_t wpa_ie_len; u8 rsn_ie[MAX_WPA_IE_LEN]; size_t rsn_ie_len; + struct ieee80211_tim_parameters tim; struct list_head list; }; @@ -651,17 +794,52 @@ enum ieee80211_state { #define DEFAULT_MAX_SCAN_AGE (15 * HZ) #define DEFAULT_FTS 2346 - #define CFG_IEEE80211_RESERVE_FCS (1<<0) #define CFG_IEEE80211_COMPUTE_FCS (1<<1) +#define CFG_IEEE80211_RTS (1<<2) + +#define IEEE80211_24GHZ_MIN_CHANNEL 1 +#define IEEE80211_24GHZ_MAX_CHANNEL 14 +#define IEEE80211_24GHZ_CHANNELS 14 + +#define IEEE80211_52GHZ_MIN_CHANNEL 36 +#define IEEE80211_52GHZ_MAX_CHANNEL 165 +#define IEEE80211_52GHZ_CHANNELS 32 + +enum { + IEEE80211_CH_PASSIVE_ONLY = (1 << 0), + IEEE80211_CH_B_ONLY = (1 << 2), + IEEE80211_CH_NO_IBSS = (1 << 3), + IEEE80211_CH_UNIFORM_SPREADING = (1 << 4), + IEEE80211_CH_RADAR_DETECT = (1 << 5), + IEEE80211_CH_INVALID = (1 << 6), +}; + +struct ieee80211_channel { + u32 freq; + u8 channel; + u8 flags; + u8 max_power; +}; + +struct ieee80211_geo { + u8 name[4]; + u8 bg_channels; + u8 a_channels; + struct ieee80211_channel bg[IEEE80211_24GHZ_CHANNELS]; + struct ieee80211_channel a[IEEE80211_52GHZ_CHANNELS]; +}; struct ieee80211_device { struct net_device *dev; + struct ieee80211_security sec; /* Bookkeeping structures */ struct net_device_stats stats; struct ieee80211_stats ieee_stats; + struct ieee80211_geo geo; + /* Probe / Beacon management */ struct list_head network_free_list; struct list_head network_list; @@ -669,62 +847,102 @@ struct ieee80211_device { int scans; int scan_age; - int iw_mode; /* operating mode (IW_MODE_*) */ + int iw_mode; /* operating mode (IW_MODE_*) */ + struct iw_spy_data spy_data; /* iwspy support */ spinlock_t lock; - int tx_headroom; /* Set to size of any additional room needed at front - * of allocated Tx SKBs */ + int tx_headroom; /* Set to size of any additional room needed at front + * of allocated Tx SKBs */ u32 config; /* WEP and other encryption related settings at the device level */ - int open_wep; /* Set to 1 to allow unencrypted frames */ + int open_wep; /* Set to 1 to allow unencrypted frames */ - int reset_on_keychange; /* Set to 1 if the HW needs to be reset on + int reset_on_keychange; /* Set to 1 if the HW needs to be reset on * WEP key changes */ /* If the host performs {en,de}cryption, then set to 1 */ int host_encrypt; + int host_encrypt_msdu; int host_decrypt; - int ieee802_1x; /* is IEEE 802.1X used */ + /* host performs multicast decryption */ + int host_mc_decrypt; + + int host_open_frag; + int host_build_iv; + int ieee802_1x; /* is IEEE 802.1X used */ /* WPA data */ int wpa_enabled; int drop_unencrypted; - int tkip_countermeasures; int privacy_invoked; size_t wpa_ie_len; u8 *wpa_ie; struct list_head crypt_deinit_list; struct ieee80211_crypt_data *crypt[WEP_KEYS]; - int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */ + int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */ struct timer_list crypt_deinit_timer; + int crypt_quiesced; - int bcrx_sta_key; /* use individual keys to override default keys even - * with RX of broad/multicast frames */ + int bcrx_sta_key; /* use individual keys to override default keys even + * with RX of broad/multicast frames */ /* Fragmentation structures */ struct ieee80211_frag_entry frag_cache[IEEE80211_FRAG_CACHE_LEN]; unsigned int frag_next_idx; - u16 fts; /* Fragmentation Threshold */ + u16 fts; /* Fragmentation Threshold */ + u16 rts; /* RTS threshold */ /* Association info */ u8 bssid[ETH_ALEN]; enum ieee80211_state state; - int mode; /* A, B, G */ - int modulation; /* CCK, OFDM */ - int freq_band; /* 2.4Ghz, 5.2Ghz, Mixed */ - int abg_ture; /* ABG flag */ + int mode; /* A, B, G */ + int modulation; /* CCK, OFDM */ + int freq_band; /* 2.4Ghz, 5.2Ghz, Mixed */ + int abg_true; /* ABG flag */ + + int perfect_rssi; + int worst_rssi; /* Callback functions */ - void (*set_security)(struct net_device *dev, - struct ieee80211_security *sec); - int (*hard_start_xmit)(struct ieee80211_txb *txb, - struct net_device *dev); - int (*reset_port)(struct net_device *dev); + void (*set_security) (struct net_device * dev, + struct ieee80211_security * sec); + int (*hard_start_xmit) (struct ieee80211_txb * txb, + struct net_device * dev, int pri); + int (*reset_port) (struct net_device * dev); + int (*is_queue_full) (struct net_device * dev, int pri); + + int (*handle_management) (struct net_device * dev, + struct ieee80211_network * network, u16 type); + + /* Typical STA methods */ + int (*handle_auth) (struct net_device * dev, + struct ieee80211_auth * auth); + int (*handle_deauth) (struct net_device * dev, + struct ieee80211_auth * auth); + int (*handle_disassoc) (struct net_device * dev, + struct ieee80211_disassoc * assoc); + int (*handle_beacon) (struct net_device * dev, + struct ieee80211_beacon * beacon, + struct ieee80211_network * network); + int (*handle_probe_response) (struct net_device * dev, + struct ieee80211_probe_response * resp, + struct ieee80211_network * network); + int (*handle_probe_request) (struct net_device * dev, + struct ieee80211_probe_request * req, + struct ieee80211_rx_stats * stats); + int (*handle_assoc_response) (struct net_device * dev, + struct ieee80211_assoc_response * resp, + struct ieee80211_network * network); + + /* Typical AP methods */ + int (*handle_assoc_request) (struct net_device * dev); + int (*handle_reassoc_request) (struct net_device * dev, + struct ieee80211_reassoc_request * req); /* This must be the last item so that it points to the data * allocated beyond this structure by alloc_ieee80211 */ @@ -736,12 +954,12 @@ struct ieee80211_device { #define IEEE_G (1<<2) #define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G) -extern inline void *ieee80211_priv(struct net_device *dev) +static inline void *ieee80211_priv(struct net_device *dev) { return ((struct ieee80211_device *)netdev_priv(dev))->priv; } -extern inline int ieee80211_is_empty_essid(const char *essid, int essid_len) +static inline int ieee80211_is_empty_essid(const char *essid, int essid_len) { /* Single white space is for Linksys APs */ if (essid_len == 1 && essid[0] == ' ') @@ -757,7 +975,8 @@ extern inline int ieee80211_is_empty_essid(const char *essid, int essid_len) return 1; } -extern inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mode) +static inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, + int mode) { /* * It is possible for both access points and our device to support @@ -783,14 +1002,17 @@ extern inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mod return 0; } -extern inline int ieee80211_get_hdrlen(u16 fc) +static inline int ieee80211_get_hdrlen(u16 fc) { int hdrlen = IEEE80211_3ADDR_LEN; + u16 stype = WLAN_FC_GET_STYPE(fc); switch (WLAN_FC_GET_TYPE(fc)) { case IEEE80211_FTYPE_DATA: if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS)) hdrlen = IEEE80211_4ADDR_LEN; + if (stype & IEEE80211_STYPE_QOS_DATA) + hdrlen += 2; break; case IEEE80211_FTYPE_CTL: switch (WLAN_FC_GET_STYPE(fc)) { @@ -808,7 +1030,48 @@ extern inline int ieee80211_get_hdrlen(u16 fc) return hdrlen; } +static inline u8 *ieee80211_get_payload(struct ieee80211_hdr *hdr) +{ + switch (ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl))) { + case IEEE80211_1ADDR_LEN: + return ((struct ieee80211_hdr_1addr *)hdr)->payload; + case IEEE80211_2ADDR_LEN: + return ((struct ieee80211_hdr_2addr *)hdr)->payload; + case IEEE80211_3ADDR_LEN: + return ((struct ieee80211_hdr_3addr *)hdr)->payload; + case IEEE80211_4ADDR_LEN: + return ((struct ieee80211_hdr_4addr *)hdr)->payload; + } +} + +static inline int ieee80211_is_ofdm_rate(u8 rate) +{ + switch (rate & ~IEEE80211_BASIC_RATE_MASK) { + case IEEE80211_OFDM_RATE_6MB: + case IEEE80211_OFDM_RATE_9MB: + case IEEE80211_OFDM_RATE_12MB: + case IEEE80211_OFDM_RATE_18MB: + case IEEE80211_OFDM_RATE_24MB: + case IEEE80211_OFDM_RATE_36MB: + case IEEE80211_OFDM_RATE_48MB: + case IEEE80211_OFDM_RATE_54MB: + return 1; + } + return 0; +} + +static inline int ieee80211_is_cck_rate(u8 rate) +{ + switch (rate & ~IEEE80211_BASIC_RATE_MASK) { + case IEEE80211_CCK_RATE_1MB: + case IEEE80211_CCK_RATE_2MB: + case IEEE80211_CCK_RATE_5MB: + case IEEE80211_CCK_RATE_11MB: + return 1; + } + return 0; +} /* ieee80211.c */ extern void free_ieee80211(struct net_device *dev); @@ -817,18 +1080,30 @@ extern struct net_device *alloc_ieee80211(int sizeof_priv); extern int ieee80211_set_encryption(struct ieee80211_device *ieee); /* ieee80211_tx.c */ -extern int ieee80211_xmit(struct sk_buff *skb, - struct net_device *dev); +extern int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev); extern void ieee80211_txb_free(struct ieee80211_txb *); - +extern int ieee80211_tx_frame(struct ieee80211_device *ieee, + struct ieee80211_hdr *frame, int len); /* ieee80211_rx.c */ extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats); extern void ieee80211_rx_mgt(struct ieee80211_device *ieee, - struct ieee80211_hdr *header, + struct ieee80211_hdr_4addr *header, struct ieee80211_rx_stats *stats); +/* ieee80211_geo.c */ +extern const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device + *ieee); +extern int ieee80211_set_geo(struct ieee80211_device *ieee, + const struct ieee80211_geo *geo); + +extern int ieee80211_is_valid_channel(struct ieee80211_device *ieee, + u8 channel); +extern int ieee80211_channel_to_index(struct ieee80211_device *ieee, + u8 channel); +extern u8 ieee80211_freq_to_channel(struct ieee80211_device *ieee, u32 freq); + /* ieee80211_wx.c */ extern int ieee80211_wx_get_scan(struct ieee80211_device *ieee, struct iw_request_info *info, @@ -839,17 +1114,21 @@ extern int ieee80211_wx_set_encode(struct ieee80211_device *ieee, extern int ieee80211_wx_get_encode(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *key); +extern int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra); +extern int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra); - -extern inline void ieee80211_increment_scans(struct ieee80211_device *ieee) +static inline void ieee80211_increment_scans(struct ieee80211_device *ieee) { ieee->scans++; } -extern inline int ieee80211_get_scans(struct ieee80211_device *ieee) +static inline int ieee80211_get_scans(struct ieee80211_device *ieee) { return ieee->scans; } - -#endif /* IEEE80211_H */ +#endif /* IEEE80211_H */ diff --git a/include/net/ieee80211_crypt.h b/include/net/ieee80211_crypt.h index b58a3bcc0dc0..0a1c2d82ca4b 100644 --- a/include/net/ieee80211_crypt.h +++ b/include/net/ieee80211_crypt.h @@ -25,16 +25,22 @@ #include +enum { + IEEE80211_CRYPTO_TKIP_COUNTERMEASURES = (1 << 0), +}; + struct ieee80211_crypto_ops { const char *name; /* init new crypto context (e.g., allocate private data space, * select IV, etc.); returns NULL on failure or pointer to allocated * private data on success */ - void * (*init)(int keyidx); + void *(*init) (int keyidx); /* deinitialize crypto context and free allocated private data */ - void (*deinit)(void *priv); + void (*deinit) (void *priv); + + int (*build_iv) (struct sk_buff * skb, int hdr_len, void *priv); /* encrypt/decrypt return < 0 on error or >= 0 on success. The return * value from decrypt_mpdu is passed as the keyidx value for @@ -42,34 +48,39 @@ struct ieee80211_crypto_ops { * encryption; if not, error will be returned; these functions are * called for all MPDUs (i.e., fragments). */ - int (*encrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv); - int (*decrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv); + int (*encrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv); + int (*decrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv); /* These functions are called for full MSDUs, i.e. full frames. * These can be NULL if full MSDU operations are not needed. */ - int (*encrypt_msdu)(struct sk_buff *skb, int hdr_len, void *priv); - int (*decrypt_msdu)(struct sk_buff *skb, int keyidx, int hdr_len, - void *priv); + int (*encrypt_msdu) (struct sk_buff * skb, int hdr_len, void *priv); + int (*decrypt_msdu) (struct sk_buff * skb, int keyidx, int hdr_len, + void *priv); - int (*set_key)(void *key, int len, u8 *seq, void *priv); - int (*get_key)(void *key, int len, u8 *seq, void *priv); + int (*set_key) (void *key, int len, u8 * seq, void *priv); + int (*get_key) (void *key, int len, u8 * seq, void *priv); /* procfs handler for printing out key information and possible * statistics */ - char * (*print_stats)(char *p, void *priv); + char *(*print_stats) (char *p, void *priv); + + /* Crypto specific flag get/set for configuration settings */ + unsigned long (*get_flags) (void *priv); + unsigned long (*set_flags) (unsigned long flags, void *priv); /* maximum number of bytes added by encryption; encrypt buf is * allocated with extra_prefix_len bytes, copy of in_buf, and * extra_postfix_len; encrypt need not use all this space, but * the result must start at the beginning of the buffer and correct * length must be returned */ - int extra_prefix_len, extra_postfix_len; + int extra_mpdu_prefix_len, extra_mpdu_postfix_len; + int extra_msdu_prefix_len, extra_msdu_postfix_len; struct module *owner; }; struct ieee80211_crypt_data { - struct list_head list; /* delayed deletion list */ + struct list_head list; /* delayed deletion list */ struct ieee80211_crypto_ops *ops; void *priv; atomic_t refcnt; @@ -77,10 +88,11 @@ struct ieee80211_crypt_data { int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops); int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops); -struct ieee80211_crypto_ops * ieee80211_get_crypto_ops(const char *name); +struct ieee80211_crypto_ops *ieee80211_get_crypto_ops(const char *name); void ieee80211_crypt_deinit_entries(struct ieee80211_device *, int); void ieee80211_crypt_deinit_handler(unsigned long); void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee, struct ieee80211_crypt_data **crypt); +void ieee80211_crypt_quiescing(struct ieee80211_device *ieee); #endif diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h new file mode 100644 index 000000000000..429b73892a5f --- /dev/null +++ b/include/net/ieee80211_radiotap.h @@ -0,0 +1,231 @@ +/* $FreeBSD: src/sys/net80211/ieee80211_radiotap.h,v 1.5 2005/01/22 20:12:05 sam Exp $ */ +/* $NetBSD: ieee80211_radiotap.h,v 1.11 2005/06/22 06:16:02 dyoung Exp $ */ + +/*- + * Copyright (c) 2003, 2004 David Young. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of David Young may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY DAVID YOUNG ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DAVID + * YOUNG BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + */ + +/* + * Modifications to fit into the linux IEEE 802.11 stack, + * Mike Kershaw (dragorn@kismetwireless.net) + */ + +#ifndef IEEE80211RADIOTAP_H +#define IEEE80211RADIOTAP_H + +#include +#include + +/* Radiotap header version (from official NetBSD feed) */ +#define IEEE80211RADIOTAP_VERSION "1.5" +/* Base version of the radiotap packet header data */ +#define PKTHDR_RADIOTAP_VERSION 0 + +/* A generic radio capture format is desirable. There is one for + * Linux, but it is neither rigidly defined (there were not even + * units given for some fields) nor easily extensible. + * + * I suggest the following extensible radio capture format. It is + * based on a bitmap indicating which fields are present. + * + * I am trying to describe precisely what the application programmer + * should expect in the following, and for that reason I tell the + * units and origin of each measurement (where it applies), or else I + * use sufficiently weaselly language ("is a monotonically nondecreasing + * function of...") that I cannot set false expectations for lawyerly + * readers. + */ + +/* XXX tcpdump/libpcap do not tolerate variable-length headers, + * yet, so we pad every radiotap header to 64 bytes. Ugh. + */ +#define IEEE80211_RADIOTAP_HDRLEN 64 + +/* The radio capture header precedes the 802.11 header. */ +struct ieee80211_radiotap_header { + u8 it_version; /* Version 0. Only increases + * for drastic changes, + * introduction of compatible + * new fields does not count. + */ + u8 it_pad; + u16 it_len; /* length of the whole + * header in bytes, including + * it_version, it_pad, + * it_len, and data fields. + */ + u32 it_present; /* A bitmap telling which + * fields are present. Set bit 31 + * (0x80000000) to extend the + * bitmap by another 32 bits. + * Additional extensions are made + * by setting bit 31. + */ +}; + +/* Name Data type Units + * ---- --------- ----- + * + * IEEE80211_RADIOTAP_TSFT u64 microseconds + * + * Value in microseconds of the MAC's 64-bit 802.11 Time + * Synchronization Function timer when the first bit of the + * MPDU arrived at the MAC. For received frames, only. + * + * IEEE80211_RADIOTAP_CHANNEL 2 x u16 MHz, bitmap + * + * Tx/Rx frequency in MHz, followed by flags (see below). + * + * IEEE80211_RADIOTAP_FHSS u16 see below + * + * For frequency-hopping radios, the hop set (first byte) + * and pattern (second byte). + * + * IEEE80211_RADIOTAP_RATE u8 500kb/s + * + * Tx/Rx data rate + * + * IEEE80211_RADIOTAP_DBM_ANTSIGNAL int8_t decibels from + * one milliwatt (dBm) + * + * RF signal power at the antenna, decibel difference from + * one milliwatt. + * + * IEEE80211_RADIOTAP_DBM_ANTNOISE int8_t decibels from + * one milliwatt (dBm) + * + * RF noise power at the antenna, decibel difference from one + * milliwatt. + * + * IEEE80211_RADIOTAP_DB_ANTSIGNAL u8 decibel (dB) + * + * RF signal power at the antenna, decibel difference from an + * arbitrary, fixed reference. + * + * IEEE80211_RADIOTAP_DB_ANTNOISE u8 decibel (dB) + * + * RF noise power at the antenna, decibel difference from an + * arbitrary, fixed reference point. + * + * IEEE80211_RADIOTAP_LOCK_QUALITY u16 unitless + * + * Quality of Barker code lock. Unitless. Monotonically + * nondecreasing with "better" lock strength. Called "Signal + * Quality" in datasheets. (Is there a standard way to measure + * this?) + * + * IEEE80211_RADIOTAP_TX_ATTENUATION u16 unitless + * + * Transmit power expressed as unitless distance from max + * power set at factory calibration. 0 is max power. + * Monotonically nondecreasing with lower power levels. + * + * IEEE80211_RADIOTAP_DB_TX_ATTENUATION u16 decibels (dB) + * + * Transmit power expressed as decibel distance from max power + * set at factory calibration. 0 is max power. Monotonically + * nondecreasing with lower power levels. + * + * IEEE80211_RADIOTAP_DBM_TX_POWER int8_t decibels from + * one milliwatt (dBm) + * + * Transmit power expressed as dBm (decibels from a 1 milliwatt + * reference). This is the absolute power level measured at + * the antenna port. + * + * IEEE80211_RADIOTAP_FLAGS u8 bitmap + * + * Properties of transmitted and received frames. See flags + * defined below. + * + * IEEE80211_RADIOTAP_ANTENNA u8 antenna index + * + * Unitless indication of the Rx/Tx antenna for this packet. + * The first antenna is antenna 0. + * + * IEEE80211_RADIOTAP_FCS u32 data + * + * FCS from frame in network byte order. + */ +enum ieee80211_radiotap_type { + IEEE80211_RADIOTAP_TSFT = 0, + IEEE80211_RADIOTAP_FLAGS = 1, + IEEE80211_RADIOTAP_RATE = 2, + IEEE80211_RADIOTAP_CHANNEL = 3, + IEEE80211_RADIOTAP_FHSS = 4, + IEEE80211_RADIOTAP_DBM_ANTSIGNAL = 5, + IEEE80211_RADIOTAP_DBM_ANTNOISE = 6, + IEEE80211_RADIOTAP_LOCK_QUALITY = 7, + IEEE80211_RADIOTAP_TX_ATTENUATION = 8, + IEEE80211_RADIOTAP_DB_TX_ATTENUATION = 9, + IEEE80211_RADIOTAP_DBM_TX_POWER = 10, + IEEE80211_RADIOTAP_ANTENNA = 11, + IEEE80211_RADIOTAP_DB_ANTSIGNAL = 12, + IEEE80211_RADIOTAP_DB_ANTNOISE = 13, + IEEE80211_RADIOTAP_EXT = 31, +}; + +/* Channel flags. */ +#define IEEE80211_CHAN_TURBO 0x0010 /* Turbo channel */ +#define IEEE80211_CHAN_CCK 0x0020 /* CCK channel */ +#define IEEE80211_CHAN_OFDM 0x0040 /* OFDM channel */ +#define IEEE80211_CHAN_2GHZ 0x0080 /* 2 GHz spectrum channel. */ +#define IEEE80211_CHAN_5GHZ 0x0100 /* 5 GHz spectrum channel */ +#define IEEE80211_CHAN_PASSIVE 0x0200 /* Only passive scan allowed */ +#define IEEE80211_CHAN_DYN 0x0400 /* Dynamic CCK-OFDM channel */ +#define IEEE80211_CHAN_GFSK 0x0800 /* GFSK channel (FHSS PHY) */ + +/* For IEEE80211_RADIOTAP_FLAGS */ +#define IEEE80211_RADIOTAP_F_CFP 0x01 /* sent/received + * during CFP + */ +#define IEEE80211_RADIOTAP_F_SHORTPRE 0x02 /* sent/received + * with short + * preamble + */ +#define IEEE80211_RADIOTAP_F_WEP 0x04 /* sent/received + * with WEP encryption + */ +#define IEEE80211_RADIOTAP_F_FRAG 0x08 /* sent/received + * with fragmentation + */ +#define IEEE80211_RADIOTAP_F_FCS 0x10 /* frame includes FCS */ +#define IEEE80211_RADIOTAP_F_DATAPAD 0x20 /* frame has padding between + * 802.11 header and payload + * (to 32-bit boundary) + */ + +/* Ugly macro to convert literal channel numbers into their mhz equivalents + * There are certianly some conditions that will break this (like feeding it '30') + * but they shouldn't arise since nothing talks on channel 30. */ +#define ieee80211chan2mhz(x) \ + (((x) <= 14) ? \ + (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \ + ((x) + 1000) * 5) + +#endif /* IEEE80211_RADIOTAP_H */ diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index 03df3b157960..5a2beed5a770 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -26,19 +26,18 @@ struct inet_hashinfo; /* I have no idea if this is a good hash for v6 or not. -DaveM */ -static inline int inet6_ehashfn(const struct in6_addr *laddr, const u16 lport, - const struct in6_addr *faddr, const u16 fport, - const int ehash_size) +static inline unsigned int inet6_ehashfn(const struct in6_addr *laddr, const u16 lport, + const struct in6_addr *faddr, const u16 fport) { - int hashent = (lport ^ fport); + unsigned int hashent = (lport ^ fport); hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]); hashent ^= hashent >> 16; hashent ^= hashent >> 8; - return (hashent & (ehash_size - 1)); + return hashent; } -static inline int inet6_sk_ehashfn(const struct sock *sk, const int ehash_size) +static inline int inet6_sk_ehashfn(const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); const struct ipv6_pinfo *np = inet6_sk(sk); @@ -46,7 +45,7 @@ static inline int inet6_sk_ehashfn(const struct sock *sk, const int ehash_size) const struct in6_addr *faddr = &np->daddr; const __u16 lport = inet->num; const __u16 fport = inet->dport; - return inet6_ehashfn(laddr, lport, faddr, fport, ehash_size); + return inet6_ehashfn(laddr, lport, faddr, fport); } /* @@ -69,14 +68,14 @@ static inline struct sock * /* Optimize here for direct hit, only listening connections can * have wildcards anyways. */ - const int hash = inet6_ehashfn(daddr, hnum, saddr, sport, - hashinfo->ehash_size); - struct inet_ehash_bucket *head = &hashinfo->ehash[hash]; + unsigned int hash = inet6_ehashfn(daddr, hnum, saddr, sport); + struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash); + prefetch(head->chain.first); read_lock(&head->lock); sk_for_each(sk, node, &head->chain) { /* For IPV6 do the cheaper port and family tests first. */ - if (INET6_MATCH(sk, saddr, daddr, ports, dif)) + if (INET6_MATCH(sk, hash, saddr, daddr, ports, dif)) goto hit; /* You sunk my battleship! */ } /* Must check for a TIME_WAIT'er before going to listener hash. */ diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 651f824c1008..b0c99060b78d 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -94,7 +94,7 @@ static inline void *inet_csk_ca(const struct sock *sk) extern struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req, - const unsigned int __nocast priority); + const gfp_t priority); enum inet_csk_ack_state_t { ICSK_ACK_SCHED = 1, diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 646b6ea7fe26..f50f95968340 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -40,7 +40,7 @@ struct inet_ehash_bucket { rwlock_t lock; struct hlist_head chain; -} __attribute__((__aligned__(8))); +}; /* There are a few simple rules, which allow for local port reuse by * an application. In essence: @@ -108,7 +108,7 @@ struct inet_hashinfo { struct inet_bind_hashbucket *bhash; int bhash_size; - int ehash_size; + unsigned int ehash_size; /* All sockets in TCP_LISTEN state will be in here. This is the only * table where wildcard'd TCP sockets can exist. Hash function here @@ -130,17 +130,16 @@ struct inet_hashinfo { int port_rover; }; -static inline int inet_ehashfn(const __u32 laddr, const __u16 lport, - const __u32 faddr, const __u16 fport, - const int ehash_size) +static inline unsigned int inet_ehashfn(const __u32 laddr, const __u16 lport, + const __u32 faddr, const __u16 fport) { - int h = (laddr ^ lport) ^ (faddr ^ fport); + unsigned int h = (laddr ^ lport) ^ (faddr ^ fport); h ^= h >> 16; h ^= h >> 8; - return h & (ehash_size - 1); + return h; } -static inline int inet_sk_ehashfn(const struct sock *sk, const int ehash_size) +static inline int inet_sk_ehashfn(const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); const __u32 laddr = inet->rcv_saddr; @@ -148,7 +147,14 @@ static inline int inet_sk_ehashfn(const struct sock *sk, const int ehash_size) const __u32 faddr = inet->daddr; const __u16 fport = inet->dport; - return inet_ehashfn(laddr, lport, faddr, fport, ehash_size); + return inet_ehashfn(laddr, lport, faddr, fport); +} + +static inline struct inet_ehash_bucket *inet_ehash_bucket( + struct inet_hashinfo *hashinfo, + unsigned int hash) +{ + return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)]; } extern struct inet_bind_bucket * @@ -235,9 +241,11 @@ static inline void __inet_hash(struct inet_hashinfo *hashinfo, lock = &hashinfo->lhash_lock; inet_listen_wlock(hashinfo); } else { - sk->sk_hashent = inet_sk_ehashfn(sk, hashinfo->ehash_size); - list = &hashinfo->ehash[sk->sk_hashent].chain; - lock = &hashinfo->ehash[sk->sk_hashent].lock; + struct inet_ehash_bucket *head; + sk->sk_hash = inet_sk_ehashfn(sk); + head = inet_ehash_bucket(hashinfo, sk->sk_hash); + list = &head->chain; + lock = &head->lock; write_lock(lock); } __sk_add_node(sk, list); @@ -268,9 +276,8 @@ static inline void inet_unhash(struct inet_hashinfo *hashinfo, struct sock *sk) inet_listen_wlock(hashinfo); lock = &hashinfo->lhash_lock; } else { - struct inet_ehash_bucket *head = &hashinfo->ehash[sk->sk_hashent]; - lock = &head->lock; - write_lock_bh(&head->lock); + lock = &inet_ehash_bucket(hashinfo, sk->sk_hash)->lock; + write_lock_bh(lock); } if (__sk_del_node_init(sk)) @@ -337,23 +344,27 @@ sherry_cache: #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ const __u64 __name = (((__u64)(__daddr)) << 32) | ((__u64)(__saddr)); #endif /* __BIG_ENDIAN */ -#define INET_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ - (((*((__u64 *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \ +#define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ + (((__sk)->sk_hash == (__hash)) && \ + ((*((__u64 *)&(inet_sk(__sk)->daddr))) == (__cookie)) && \ ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) -#define INET_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ - (((*((__u64 *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \ +#define INET_TW_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\ + (((__sk)->sk_hash == (__hash)) && \ + ((*((__u64 *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) && \ ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) #else /* 32-bit arch */ #define INET_ADDR_COOKIE(__name, __saddr, __daddr) -#define INET_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif) \ - ((inet_sk(__sk)->daddr == (__saddr)) && \ +#define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif) \ + (((__sk)->sk_hash == (__hash)) && \ + (inet_sk(__sk)->daddr == (__saddr)) && \ (inet_sk(__sk)->rcv_saddr == (__daddr)) && \ ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports)) && \ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) -#define INET_TW_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif) \ - ((inet_twsk(__sk)->tw_daddr == (__saddr)) && \ +#define INET_TW_MATCH(__sk, __hash,__cookie, __saddr, __daddr, __ports, __dif) \ + (((__sk)->sk_hash == (__hash)) && \ + (inet_twsk(__sk)->tw_daddr == (__saddr)) && \ (inet_twsk(__sk)->tw_rcv_saddr == (__daddr)) && \ ((*((__u32 *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) && \ (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) @@ -378,18 +389,19 @@ static inline struct sock * /* Optimize here for direct hit, only listening connections can * have wildcards anyways. */ - const int hash = inet_ehashfn(daddr, hnum, saddr, sport, hashinfo->ehash_size); - struct inet_ehash_bucket *head = &hashinfo->ehash[hash]; + unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport); + struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash); + prefetch(head->chain.first); read_lock(&head->lock); sk_for_each(sk, node, &head->chain) { - if (INET_MATCH(sk, acookie, saddr, daddr, ports, dif)) + if (INET_MATCH(sk, hash, acookie, saddr, daddr, ports, dif)) goto hit; /* You sunk my battleship! */ } /* Must check for a TIME_WAIT'er before going to listener hash. */ sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) { - if (INET_TW_MATCH(sk, acookie, saddr, daddr, ports, dif)) + if (INET_TW_MATCH(sk, hash, acookie, saddr, daddr, ports, dif)) goto hit; } sk = NULL; diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 3b070352e869..28f7b2103505 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -112,6 +113,7 @@ struct inet_timewait_sock { #define tw_node __tw_common.skc_node #define tw_bind_node __tw_common.skc_bind_node #define tw_refcnt __tw_common.skc_refcnt +#define tw_hash __tw_common.skc_hash #define tw_prot __tw_common.skc_prot volatile unsigned char tw_substate; /* 3 bits hole, try to pack */ @@ -126,7 +128,6 @@ struct inet_timewait_sock { /* And these are ours. */ __u8 tw_ipv6only:1; /* 31 bits hole, try to pack */ - int tw_hashent; int tw_timeout; unsigned long tw_ttd; struct inet_bind_bucket *tw_tb; @@ -193,11 +194,13 @@ static inline u32 inet_rcv_saddr(const struct sock *sk) static inline void inet_twsk_put(struct inet_timewait_sock *tw) { if (atomic_dec_and_test(&tw->tw_refcnt)) { + struct module *owner = tw->tw_prot->owner; #ifdef SOCK_REFCNT_DEBUG printk(KERN_DEBUG "%s timewait_sock %p released\n", tw->tw_prot->name, tw); #endif kmem_cache_free(tw->tw_prot->twsk_slab, tw); + module_put(owner); } } diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 06b4235aa016..3b5559a023a4 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -832,7 +832,7 @@ extern void ip_vs_app_inc_put(struct ip_vs_app *inc); extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff **pskb); extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff **pskb); -extern int ip_vs_skb_replace(struct sk_buff *skb, int pri, +extern int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri, char *o_buf, int o_len, char *n_buf, int n_len); extern int ip_vs_app_init(void); extern void ip_vs_app_cleanup(void); diff --git a/include/net/llc.h b/include/net/llc.h index 71769a5aeef3..1adb2ef3f6f7 100644 --- a/include/net/llc.h +++ b/include/net/llc.h @@ -17,6 +17,8 @@ #include #include +#include + struct net_device; struct packet_type; struct sk_buff; @@ -44,6 +46,7 @@ struct llc_sap { unsigned char state; unsigned char p_bit; unsigned char f_bit; + atomic_t refcnt; int (*rcv_func)(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, @@ -81,13 +84,27 @@ extern struct llc_sap *llc_sap_open(unsigned char lsap, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)); +static inline void llc_sap_hold(struct llc_sap *sap) +{ + atomic_inc(&sap->refcnt); +} + extern void llc_sap_close(struct llc_sap *sap); +static inline void llc_sap_put(struct llc_sap *sap) +{ + if (atomic_dec_and_test(&sap->refcnt)) + llc_sap_close(sap); +} + extern struct llc_sap *llc_sap_find(unsigned char sap_value); extern int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb, unsigned char *dmac, unsigned char dsap); +extern void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb); +extern void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb); + extern int llc_station_init(void); extern void llc_station_exit(void); @@ -98,4 +115,17 @@ extern void llc_proc_exit(void); #define llc_proc_init() (0) #define llc_proc_exit() do { } while(0) #endif /* CONFIG_PROC_FS */ +#ifdef CONFIG_SYSCTL +extern int llc_sysctl_init(void); +extern void llc_sysctl_exit(void); + +extern int sysctl_llc2_ack_timeout; +extern int sysctl_llc2_busy_timeout; +extern int sysctl_llc2_p_timeout; +extern int sysctl_llc2_rej_timeout; +extern int sysctl_llc_station_ack_timeout; +#else +#define llc_sysctl_init() (0) +#define llc_sysctl_exit() do { } while(0) +#endif /* CONFIG_SYSCTL */ #endif /* LLC_H */ diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h index 8ad3bc2c23d7..00730d21b522 100644 --- a/include/net/llc_conn.h +++ b/include/net/llc_conn.h @@ -19,14 +19,14 @@ #define LLC_EVENT 1 #define LLC_PACKET 2 -#define LLC_P_TIME 2 -#define LLC_ACK_TIME 1 -#define LLC_REJ_TIME 3 -#define LLC_BUSY_TIME 3 +#define LLC2_P_TIME 2 +#define LLC2_ACK_TIME 1 +#define LLC2_REJ_TIME 3 +#define LLC2_BUSY_TIME 3 struct llc_timer { struct timer_list timer; - u16 expire; /* timer expire time */ + unsigned long expire; /* timer expire time */ }; struct llc_sock { @@ -38,6 +38,7 @@ struct llc_sock { struct llc_addr laddr; /* lsap/mac pair */ struct llc_addr daddr; /* dsap/mac pair */ struct net_device *dev; /* device to send to remote */ + u32 copied_seq; /* head of yet unread data */ u8 retry_count; /* number of retries */ u8 ack_must_be_send; u8 first_pdu_Ns; @@ -92,7 +93,8 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb) return skb->cb[sizeof(skb->cb) - 1]; } -extern struct sock *llc_sk_alloc(int family, int priority, struct proto *prot); +extern struct sock *llc_sk_alloc(int family, gfp_t priority, + struct proto *prot); extern void llc_sk_free(struct sock *sk); extern void llc_sk_reset(struct sock *sk); @@ -115,5 +117,4 @@ extern void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk); extern u8 llc_data_accept_state(u8 state); extern void llc_build_offset_table(void); -extern int llc_release_sockets(struct llc_sap *sap); #endif /* LLC_CONN_H */ diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index f45c37d89cf7..c7a959428b4f 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h @@ -254,8 +254,10 @@ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa) { if (skb->protocol == ntohs(ETH_P_802_2)) memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN); - else if (skb->protocol == ntohs(ETH_P_TR_802_2)) + else if (skb->protocol == ntohs(ETH_P_TR_802_2)) { memcpy(sa, tr_hdr(skb)->saddr, ETH_ALEN); + *sa &= 0x7F; + } } /** diff --git a/include/net/llc_sap.h b/include/net/llc_sap.h index 353baaa627f3..2c56dbece729 100644 --- a/include/net/llc_sap.h +++ b/include/net/llc_sap.h @@ -12,11 +12,15 @@ * See the GNU General Public License for more details. */ struct llc_sap; +struct net_device; struct sk_buff; +struct sock; extern void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb); -extern void llc_save_primitive(struct sk_buff* skb, unsigned char prim); -extern struct sk_buff *llc_alloc_frame(void); +extern void llc_save_primitive(struct sock *sk, struct sk_buff* skb, + unsigned char prim); +extern struct sk_buff *llc_alloc_frame(struct sock *sk, + struct net_device *dev); extern void llc_build_and_send_test_pkt(struct llc_sap *sap, struct sk_buff *skb, diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index e1d5ec1c23c0..8f241216f46b 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -125,7 +125,7 @@ */ extern struct sock *sctp_get_ctl_sock(void); extern int sctp_copy_local_addr_list(struct sctp_bind_addr *, - sctp_scope_t, unsigned int __nocast gfp, + sctp_scope_t, gfp_t gfp, int flags); extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family); extern int sctp_register_pf(struct sctp_pf *, sa_family_t); diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 58462164d960..1eac3d0eb7a9 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -181,17 +181,17 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t, int sctp_chunk_iif(const struct sctp_chunk *); struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *, struct sctp_chunk *, - unsigned int __nocast gfp); + gfp_t gfp); __u32 sctp_generate_verification_tag(void); void sctp_populate_tie_tags(__u8 *cookie, __u32 curTag, __u32 hisTag); /* Prototypes for chunk-building functions. */ struct sctp_chunk *sctp_make_init(const struct sctp_association *, const struct sctp_bind_addr *, - unsigned int __nocast gfp, int vparam_len); + gfp_t gfp, int vparam_len); struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *, const struct sctp_chunk *, - const unsigned int __nocast gfp, + const gfp_t gfp, const int unkparam_len); struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *, const struct sctp_chunk *); @@ -265,7 +265,7 @@ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype, struct sctp_endpoint *, struct sctp_association *asoc, void *event_arg, - unsigned int __nocast gfp); + gfp_t gfp); /* 2nd level prototypes */ void sctp_generate_t3_rtx_event(unsigned long peer); @@ -276,7 +276,7 @@ void sctp_ootb_pkt_free(struct sctp_packet *); struct sctp_association *sctp_unpack_cookie(const struct sctp_endpoint *, const struct sctp_association *, struct sctp_chunk *, - unsigned int __nocast gfp, int *err, + gfp_t gfp, int *err, struct sctp_chunk **err_chk_p); int sctp_addip_addr_config(struct sctp_association *, sctp_param_t, struct sockaddr_storage*, int); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 994009bbe3b4..9c385b6417c7 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -446,7 +446,7 @@ struct sctp_ssnmap { }; struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, - unsigned int __nocast gfp); + gfp_t gfp); void sctp_ssnmap_free(struct sctp_ssnmap *map); void sctp_ssnmap_clear(struct sctp_ssnmap *map); @@ -947,7 +947,7 @@ struct sctp_transport { }; struct sctp_transport *sctp_transport_new(const union sctp_addr *, - unsigned int __nocast); + gfp_t); void sctp_transport_set_owner(struct sctp_transport *, struct sctp_association *); void sctp_transport_route(struct sctp_transport *, union sctp_addr *, @@ -1095,10 +1095,10 @@ void sctp_bind_addr_init(struct sctp_bind_addr *, __u16 port); void sctp_bind_addr_free(struct sctp_bind_addr *); int sctp_bind_addr_copy(struct sctp_bind_addr *dest, const struct sctp_bind_addr *src, - sctp_scope_t scope, unsigned int __nocast gfp, + sctp_scope_t scope, gfp_t gfp, int flags); int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *, - unsigned int __nocast gfp); + gfp_t gfp); int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *); int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *, struct sctp_sock *); @@ -1108,9 +1108,9 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp, struct sctp_sock *opt); union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp, int *addrs_len, - unsigned int __nocast gfp); + gfp_t gfp); int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len, - __u16 port, unsigned int __nocast gfp); + __u16 port, gfp_t gfp); sctp_scope_t sctp_scope(const union sctp_addr *); int sctp_in_scope(const union sctp_addr *addr, const sctp_scope_t scope); @@ -1239,7 +1239,7 @@ static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base) } /* These are function signatures for manipulating endpoints. */ -struct sctp_endpoint *sctp_endpoint_new(struct sock *, unsigned int __nocast); +struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t); void sctp_endpoint_free(struct sctp_endpoint *); void sctp_endpoint_put(struct sctp_endpoint *); void sctp_endpoint_hold(struct sctp_endpoint *); @@ -1260,7 +1260,7 @@ int sctp_verify_init(const struct sctp_association *asoc, sctp_cid_t, struct sctp_chunk **err_chunk); int sctp_process_init(struct sctp_association *, sctp_cid_t cid, const union sctp_addr *peer, - sctp_init_chunk_t *init, unsigned int __nocast gfp); + sctp_init_chunk_t *init, gfp_t gfp); __u32 sctp_generate_tag(const struct sctp_endpoint *); __u32 sctp_generate_tsn(const struct sctp_endpoint *); @@ -1723,7 +1723,7 @@ static inline struct sctp_association *sctp_assoc(struct sctp_ep_common *base) struct sctp_association * sctp_association_new(const struct sctp_endpoint *, const struct sock *, - sctp_scope_t scope, unsigned int __nocast gfp); + sctp_scope_t scope, gfp_t gfp); void sctp_association_free(struct sctp_association *); void sctp_association_put(struct sctp_association *); void sctp_association_hold(struct sctp_association *); @@ -1739,7 +1739,7 @@ int sctp_assoc_lookup_laddr(struct sctp_association *asoc, const union sctp_addr *laddr); struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *, const union sctp_addr *address, - const unsigned int __nocast gfp, + const gfp_t gfp, const int peer_state); void sctp_assoc_del_peer(struct sctp_association *asoc, const union sctp_addr *addr); @@ -1764,10 +1764,10 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned); void sctp_assoc_set_primary(struct sctp_association *, struct sctp_transport *); int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *, - unsigned int __nocast); + gfp_t); int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *, struct sctp_cookie*, - unsigned int __nocast gfp); + gfp_t gfp); int sctp_cmp_addr_exact(const union sctp_addr *ss1, const union sctp_addr *ss2); diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h index 90fe4bf6754f..6c40cfc4832d 100644 --- a/include/net/sctp/ulpevent.h +++ b/include/net/sctp/ulpevent.h @@ -88,7 +88,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( __u16 error, __u16 outbound, __u16 inbound, - unsigned int __nocast gfp); + gfp_t gfp); struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change( const struct sctp_association *asoc, @@ -96,35 +96,35 @@ struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change( int flags, int state, int error, - unsigned int __nocast gfp); + gfp_t gfp); struct sctp_ulpevent *sctp_ulpevent_make_remote_error( const struct sctp_association *asoc, struct sctp_chunk *chunk, __u16 flags, - unsigned int __nocast gfp); + gfp_t gfp); struct sctp_ulpevent *sctp_ulpevent_make_send_failed( const struct sctp_association *asoc, struct sctp_chunk *chunk, __u16 flags, __u32 error, - unsigned int __nocast gfp); + gfp_t gfp); struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event( const struct sctp_association *asoc, __u16 flags, - unsigned int __nocast gfp); + gfp_t gfp); struct sctp_ulpevent *sctp_ulpevent_make_pdapi( const struct sctp_association *asoc, - __u32 indication, unsigned int __nocast gfp); + __u32 indication, gfp_t gfp); struct sctp_ulpevent *sctp_ulpevent_make_adaption_indication( - const struct sctp_association *asoc, unsigned int __nocast gfp); + const struct sctp_association *asoc, gfp_t gfp); struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, struct sctp_chunk *chunk, - unsigned int __nocast gfp); + gfp_t gfp); void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, struct msghdr *); diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h index 1a60c6d943c1..a43c8788b650 100644 --- a/include/net/sctp/ulpqueue.h +++ b/include/net/sctp/ulpqueue.h @@ -62,22 +62,19 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *, void sctp_ulpq_free(struct sctp_ulpq *); /* Add a new DATA chunk for processing. */ -int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, - unsigned int __nocast); +int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); /* Add a new event for propagation to the ULP. */ int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev); /* Renege previously received chunks. */ -void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, - unsigned int __nocast); +void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); /* Perform partial delivery. */ -void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, - unsigned int __nocast); +void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, gfp_t); /* Abort the partial delivery. */ -void sctp_ulpq_abort_pd(struct sctp_ulpq *, unsigned int __nocast); +void sctp_ulpq_abort_pd(struct sctp_ulpq *, gfp_t); /* Clear the partial data delivery condition on this socket. */ int sctp_clear_pd(struct sock *sk); diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h index f6328aeddcce..1c5f19f995ad 100644 --- a/include/net/sctp/user.h +++ b/include/net/sctp/user.h @@ -103,16 +103,20 @@ enum sctp_optname { #define SCTP_SOCKOPT_BINDX_REM SCTP_SOCKOPT_BINDX_REM SCTP_SOCKOPT_PEELOFF, /* peel off association. */ #define SCTP_SOCKOPT_PEELOFF SCTP_SOCKOPT_PEELOFF - SCTP_GET_PEER_ADDRS_NUM, /* Get number of peer addresss. */ -#define SCTP_GET_PEER_ADDRS_NUM SCTP_GET_PEER_ADDRS_NUM - SCTP_GET_PEER_ADDRS, /* Get all peer addresss. */ -#define SCTP_GET_PEER_ADDRS SCTP_GET_PEER_ADDRS - SCTP_GET_LOCAL_ADDRS_NUM, /* Get number of local addresss. */ -#define SCTP_GET_LOCAL_ADDRS_NUM SCTP_GET_LOCAL_ADDRS_NUM - SCTP_GET_LOCAL_ADDRS, /* Get all local addresss. */ -#define SCTP_GET_LOCAL_ADDRS SCTP_GET_LOCAL_ADDRS + SCTP_GET_PEER_ADDRS_NUM_OLD, /* Get number of peer addresss. */ +#define SCTP_GET_PEER_ADDRS_NUM_OLD SCTP_GET_PEER_ADDRS_NUM_OLD + SCTP_GET_PEER_ADDRS_OLD, /* Get all peer addresss. */ +#define SCTP_GET_PEER_ADDRS_OLD SCTP_GET_PEER_ADDRS_OLD + SCTP_GET_LOCAL_ADDRS_NUM_OLD, /* Get number of local addresss. */ +#define SCTP_GET_LOCAL_ADDRS_NUM_OLD SCTP_GET_LOCAL_ADDRS_NUM_OLD + SCTP_GET_LOCAL_ADDRS_OLD, /* Get all local addresss. */ +#define SCTP_GET_LOCAL_ADDRS_OLD SCTP_GET_LOCAL_ADDRS_OLD SCTP_SOCKOPT_CONNECTX, /* CONNECTX requests. */ #define SCTP_SOCKOPT_CONNECTX SCTP_SOCKOPT_CONNECTX + SCTP_GET_PEER_ADDRS, /* Get all peer addresss. */ +#define SCTP_GET_PEER_ADDRS SCTP_GET_PEER_ADDRS + SCTP_GET_LOCAL_ADDRS, /* Get all local addresss. */ +#define SCTP_GET_LOCAL_ADDRS SCTP_GET_LOCAL_ADDRS }; /* @@ -239,7 +243,7 @@ struct sctp_paddr_change { int spc_state; int spc_error; sctp_assoc_t spc_assoc_id; -}; +} __attribute__((packed, aligned(4))); /* * spc_state: 32 bits (signed integer) @@ -464,7 +468,7 @@ struct sctp_assocparams { struct sctp_setpeerprim { sctp_assoc_t sspp_assoc_id; struct sockaddr_storage sspp_addr; -}; +} __attribute__((packed, aligned(4))); /* * 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) @@ -477,7 +481,7 @@ struct sctp_setpeerprim { struct sctp_prim { sctp_assoc_t ssp_assoc_id; struct sockaddr_storage ssp_addr; -}; +} __attribute__((packed, aligned(4))); /* * 7.1.11 Set Adaption Layer Indicator (SCTP_ADAPTION_LAYER) @@ -504,7 +508,7 @@ struct sctp_paddrparams { struct sockaddr_storage spp_address; __u32 spp_hbinterval; __u16 spp_pathmaxrxt; -}; +} __attribute__((packed, aligned(4))); /* * 7.2.2 Peer Address Information @@ -523,7 +527,7 @@ struct sctp_paddrinfo { __u32 spinfo_srtt; __u32 spinfo_rto; __u32 spinfo_mtu; -}; +} __attribute__((packed, aligned(4))); /* Peer addresses's state. */ enum sctp_spinfo_state { @@ -559,11 +563,16 @@ struct sctp_status { * SCTP_GET_LOCAL_ADDRS socket options used internally to implement * sctp_getpaddrs() and sctp_getladdrs() API. */ -struct sctp_getaddrs { +struct sctp_getaddrs_old { sctp_assoc_t assoc_id; int addr_num; struct sockaddr __user *addrs; }; +struct sctp_getaddrs { + sctp_assoc_t assoc_id; /*input*/ + __u32 addr_num; /*output*/ + __u8 addrs[0]; /*output, variable size*/ +}; /* These are bit fields for msghdr->msg_flags. See section 5.1. */ /* On user space Linux, these live in as an enum. */ diff --git a/include/net/sock.h b/include/net/sock.h index 8c48fbecb7cf..e0498bd36004 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -99,6 +99,7 @@ struct proto; * @skc_node: main hash linkage for various protocol lookup tables * @skc_bind_node: bind hash linkage for various protocol lookup tables * @skc_refcnt: reference count + * @skc_hash: hash value used with various protocol lookup tables * @skc_prot: protocol handlers inside a network family * * This is the minimal network layer representation of sockets, the header @@ -112,6 +113,7 @@ struct sock_common { struct hlist_node skc_node; struct hlist_node skc_bind_node; atomic_t skc_refcnt; + unsigned int skc_hash; struct proto *skc_prot; }; @@ -139,7 +141,6 @@ struct sock_common { * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO) * @sk_lingertime: %SO_LINGER l_linger setting - * @sk_hashent: hash entry in several tables (e.g. inet_hashinfo.ehash) * @sk_backlog: always used with the per-socket spinlock held * @sk_callback_lock: used with the callbacks in the end of this struct * @sk_error_queue: rarely used @@ -186,6 +187,7 @@ struct sock { #define sk_node __sk_common.skc_node #define sk_bind_node __sk_common.skc_bind_node #define sk_refcnt __sk_common.skc_refcnt +#define sk_hash __sk_common.skc_hash #define sk_prot __sk_common.skc_prot unsigned char sk_shutdown : 2, sk_no_check : 2, @@ -205,10 +207,9 @@ struct sock { struct sk_buff_head sk_write_queue; int sk_wmem_queued; int sk_forward_alloc; - unsigned int sk_allocation; + gfp_t sk_allocation; int sk_sndbuf; int sk_route_caps; - int sk_hashent; unsigned long sk_flags; unsigned long sk_lingertime; /* @@ -738,18 +739,18 @@ extern void FASTCALL(release_sock(struct sock *sk)); #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) extern struct sock *sk_alloc(int family, - unsigned int __nocast priority, + gfp_t priority, struct proto *prot, int zero_it); extern void sk_free(struct sock *sk); extern struct sock *sk_clone(const struct sock *sk, - const unsigned int __nocast priority); + const gfp_t priority); extern struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, - unsigned int __nocast priority); + gfp_t priority); extern struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, - unsigned int __nocast priority); + gfp_t priority); extern void sock_wfree(struct sk_buff *skb); extern void sock_rfree(struct sk_buff *skb); @@ -765,7 +766,7 @@ extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, int noblock, int *errcode); extern void *sock_kmalloc(struct sock *sk, int size, - unsigned int __nocast priority); + gfp_t priority); extern void sock_kfree_s(struct sock *sk, void *mem, int size); extern void sk_send_sigurg(struct sock *sk); @@ -1200,7 +1201,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk) static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, int size, int mem, - unsigned int __nocast gfp) + gfp_t gfp) { struct sk_buff *skb; int hdr_len; @@ -1223,7 +1224,7 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, - unsigned int __nocast gfp) + gfp_t gfp) { return sk_stream_alloc_pskb(sk, size, 0, gfp); } @@ -1254,7 +1255,7 @@ static inline int sock_writeable(const struct sock *sk) return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2); } -static inline unsigned int __nocast gfp_any(void) +static inline gfp_t gfp_any(void) { return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; } diff --git a/include/net/syncppp.h b/include/net/syncppp.h index 614cb6ba564e..877efa434700 100644 --- a/include/net/syncppp.h +++ b/include/net/syncppp.h @@ -86,7 +86,6 @@ static inline struct sppp *sppp_of(struct net_device *dev) void sppp_attach (struct ppp_device *pd); void sppp_detach (struct net_device *dev); -void sppp_input (struct net_device *dev, struct sk_buff *m); int sppp_do_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd); struct sk_buff *sppp_dequeue (struct net_device *dev); int sppp_isempty (struct net_device *dev); diff --git a/include/net/tcp.h b/include/net/tcp.h index 97af77c4d096..c24339c4e310 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -460,8 +460,7 @@ extern void tcp_send_probe0(struct sock *); extern void tcp_send_partial(struct sock *); extern int tcp_write_wakeup(struct sock *); extern void tcp_send_fin(struct sock *sk); -extern void tcp_send_active_reset(struct sock *sk, - unsigned int __nocast priority); +extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); extern int tcp_send_synack(struct sock *); extern void tcp_push_one(struct sock *, unsigned int mss_now); extern void tcp_send_ack(struct sock *sk); diff --git a/include/net/xfrm.h b/include/net/xfrm.h index a9d0d8c5dfbf..5beae1ccd574 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -875,7 +875,7 @@ static inline int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsig } #endif -struct xfrm_policy *xfrm_policy_alloc(int gfp); +struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp); extern int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), void *); int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); struct xfrm_policy *xfrm_policy_bysel(int dir, struct xfrm_selector *sel, @@ -931,4 +931,9 @@ static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b, } } +static inline int xfrm_policy_id2dir(u32 index) +{ + return index & 7; +} + #endif /* _NET_XFRM_H */ diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h index 0f7aacc33fe9..c8592c7e8eaa 100644 --- a/include/pcmcia/ss.h +++ b/include/pcmcia/ss.h @@ -21,6 +21,9 @@ #include #include #include +#ifdef CONFIG_CARDBUS +#include +#endif /* Definitions for card status flags for GetStatus */ #define SS_WRPROT 0x0001 @@ -233,7 +236,11 @@ struct pcmcia_socket { /* so is power hook */ int (*power_hook)(struct pcmcia_socket *sock, int operation); - +#ifdef CONFIG_CARDBUS + /* allows tuning the CB bridge before loading driver for the CB card */ + void (*tune_bridge)(struct pcmcia_socket *sock, struct pci_bus *bus); +#endif + /* state thread */ struct semaphore skt_sem; /* protects socket h/w state */ diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 0e293fe733b0..4172e6841e3d 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -596,7 +596,7 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, u32 remote_qpn, u16 pkey_index, struct ib_ah *ah, int rmpp_active, int hdr_len, int data_len, - unsigned int __nocast gfp_mask); + gfp_t gfp_mask); /** * ib_free_send_mad - Returns data buffers used to send a MAD. diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index a7555c800ecf..f404fe21cc21 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -285,7 +285,7 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query); int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, struct ib_sa_path_rec *rec, ib_sa_comp_mask comp_mask, - int timeout_ms, unsigned int __nocast gfp_mask, + int timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct ib_sa_path_rec *resp, void *context), @@ -296,7 +296,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, u8 method, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, - int timeout_ms, unsigned int __nocast gfp_mask, + int timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct ib_sa_mcmember_rec *resp, void *context), @@ -307,7 +307,7 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, struct ib_sa_service_rec *rec, ib_sa_comp_mask comp_mask, - int timeout_ms, unsigned int __nocast gfp_mask, + int timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct ib_sa_service_rec *resp, void *context), @@ -342,7 +342,7 @@ static inline int ib_sa_mcmember_rec_set(struct ib_device *device, u8 port_num, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, - int timeout_ms, unsigned int __nocast gfp_mask, + int timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct ib_sa_mcmember_rec *resp, void *context), @@ -384,7 +384,7 @@ static inline int ib_sa_mcmember_rec_delete(struct ib_device *device, u8 port_num, struct ib_sa_mcmember_rec *rec, ib_sa_comp_mask comp_mask, - int timeout_ms, unsigned int __nocast gfp_mask, + int timeout_ms, gfp_t gfp_mask, void (*callback)(int status, struct ib_sa_mcmember_rec *resp, void *context), diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index e16cf94870f2..e6f4c9e55df7 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -665,7 +665,6 @@ struct ib_ucontext { struct list_head qp_list; struct list_head srq_list; struct list_head ah_list; - spinlock_t lock; }; struct ib_uobject { diff --git a/include/rxrpc/call.h b/include/rxrpc/call.h index f48f27e9e0ab..b86f83743510 100644 --- a/include/rxrpc/call.h +++ b/include/rxrpc/call.h @@ -203,7 +203,7 @@ extern int rxrpc_call_write_data(struct rxrpc_call *call, size_t sioc, struct kvec *siov, uint8_t rxhdr_flags, - int alloc_flags, + gfp_t alloc_flags, int dup_data, size_t *size_sent); diff --git a/include/rxrpc/message.h b/include/rxrpc/message.h index 3a59df6870b2..b318f273d4f2 100644 --- a/include/rxrpc/message.h +++ b/include/rxrpc/message.h @@ -63,7 +63,7 @@ extern int rxrpc_conn_newmsg(struct rxrpc_connection *conn, uint8_t type, int count, struct kvec *diov, - int alloc_flags, + gfp_t alloc_flags, struct rxrpc_message **_msg); extern int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg); diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index bed4b7c9be99..e6b61fab66dd 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -146,7 +146,7 @@ struct scsi_cmnd { #define SCSI_STATE_MLQUEUE 0x100b -extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, int); +extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t); extern void scsi_put_command(struct scsi_cmnd *); extern void scsi_io_completion(struct scsi_cmnd *, unsigned int, unsigned int); extern void scsi_finish_command(struct scsi_cmnd *cmd); diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index c0e4c67d836f..7ece05666feb 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -163,6 +163,7 @@ struct scsi_target { unsigned int id; /* target id ... replace * scsi_device.id eventually */ unsigned long create:1; /* signal that it needs to be added */ + char scsi_level; void *hostdata; /* available to low-level driver */ unsigned long starget_data[0]; /* for the transport */ /* starget_data must be the last element!!!! */ diff --git a/include/scsi/scsi_request.h b/include/scsi/scsi_request.h index 6a140020d7cb..2539debb7993 100644 --- a/include/scsi/scsi_request.h +++ b/include/scsi/scsi_request.h @@ -45,7 +45,7 @@ struct scsi_request { level driver) of this request */ }; -extern struct scsi_request *scsi_allocate_request(struct scsi_device *, int); +extern struct scsi_request *scsi_allocate_request(struct scsi_device *, gfp_t); extern void scsi_release_request(struct scsi_request *); extern void scsi_wait_req(struct scsi_request *, const void *cmnd, void *buffer, unsigned bufflen, diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h index 2857cf0472df..d11f34832a97 100644 --- a/include/sound/ac97_codec.h +++ b/include/sound/ac97_codec.h @@ -527,6 +527,8 @@ struct _snd_ac97 { struct device dev; }; +#define to_ac97_t(d) container_of(d, struct _snd_ac97, dev) + /* conditions */ static inline int ac97_is_audio(ac97_t * ac97) { diff --git a/include/sound/core.h b/include/sound/core.h index 26160adcdffc..6d971a4c4ca0 100644 --- a/include/sound/core.h +++ b/include/sound/core.h @@ -290,13 +290,13 @@ void snd_memory_init(void); void snd_memory_done(void); int snd_memory_info_init(void); int snd_memory_info_done(void); -void *snd_hidden_kmalloc(size_t size, unsigned int __nocast flags); -void *snd_hidden_kzalloc(size_t size, unsigned int __nocast flags); -void *snd_hidden_kcalloc(size_t n, size_t size, unsigned int __nocast flags); +void *snd_hidden_kmalloc(size_t size, gfp_t flags); +void *snd_hidden_kzalloc(size_t size, gfp_t flags); +void *snd_hidden_kcalloc(size_t n, size_t size, gfp_t flags); void snd_hidden_kfree(const void *obj); void *snd_hidden_vmalloc(unsigned long size); void snd_hidden_vfree(void *obj); -char *snd_hidden_kstrdup(const char *s, unsigned int __nocast flags); +char *snd_hidden_kstrdup(const char *s, gfp_t flags); #define kmalloc(size, flags) snd_hidden_kmalloc(size, flags) #define kzalloc(size, flags) snd_hidden_kzalloc(size, flags) #define kcalloc(n, size, flags) snd_hidden_kcalloc(n, size, flags) diff --git a/include/sound/driver.h b/include/sound/driver.h index 0d12456ec3ae..1ec2fae050a6 100644 --- a/include/sound/driver.h +++ b/include/sound/driver.h @@ -51,7 +51,7 @@ #ifdef CONFIG_SND_DEBUG_MEMORY #include #include -void *snd_wrapper_kmalloc(size_t, unsigned int __nocast); +void *snd_wrapper_kmalloc(size_t, gfp_t); #undef kmalloc void snd_wrapper_kfree(const void *); #undef kfree diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h index 67bf3f18e96a..14cb2718cb77 100644 --- a/include/sound/emu10k1.h +++ b/include/sound/emu10k1.h @@ -1059,7 +1059,7 @@ typedef struct { unsigned char spk71; /* Has 7.1 speakers */ unsigned char sblive51; /* SBLive! 5.1 - extout 0x11 -> center, 0x12 -> lfe */ unsigned char spdif_bug; /* Has Spdif phasing bug */ - unsigned char ac97_chip; /* Has an AC97 chip */ + unsigned char ac97_chip; /* Has an AC97 chip: 1 = mandatory, 2 = optional */ unsigned char ecard; /* APS EEPROM */ const char *driver; const char *name; diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h index 3a2fd2cc9f19..83489c3abbaf 100644 --- a/include/sound/memalloc.h +++ b/include/sound/memalloc.h @@ -111,7 +111,7 @@ size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id); int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id); /* basic memory allocation functions */ -void *snd_malloc_pages(size_t size, unsigned int gfp_flags); +void *snd_malloc_pages(size_t size, gfp_t gfp_flags); void snd_free_pages(void *ptr, size_t size); #endif /* __SOUND_MEMALLOC_H */ diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 3a926011507b..a0f18c9cc89d 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -611,6 +611,7 @@ static struct file *do_create(struct dentry *dir, struct dentry *dentry, dentry->d_fsdata = &attr; } + mode &= ~current->fs->umask; ret = vfs_create(dir->d_inode, dentry, mode, NULL); dentry->d_fsdata = NULL; if (ret) diff --git a/kernel/audit.c b/kernel/audit.c index 83096b67510a..0c56320d38dc 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -133,7 +133,7 @@ struct audit_buffer { struct list_head list; struct sk_buff *skb; /* formatted skb ready to send */ struct audit_context *ctx; /* NULL or associated context */ - int gfp_mask; + gfp_t gfp_mask; }; static void audit_set_pid(struct audit_buffer *ab, pid_t pid) @@ -560,7 +560,7 @@ static void audit_buffer_free(struct audit_buffer *ab) } static struct audit_buffer * audit_buffer_alloc(struct audit_context *ctx, - unsigned int __nocast gfp_mask, int type) + gfp_t gfp_mask, int type) { unsigned long flags; struct audit_buffer *ab = NULL; @@ -647,7 +647,7 @@ static inline void audit_get_stamp(struct audit_context *ctx, * will be written at syscall exit. If there is no associated task, tsk * should be NULL. */ -struct audit_buffer *audit_log_start(struct audit_context *ctx, int gfp_mask, +struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type) { struct audit_buffer *ab = NULL; @@ -879,7 +879,7 @@ void audit_log_end(struct audit_buffer *ab) /* Log an audit record. This is a convenience function that calls * audit_log_start, audit_log_vformat, and audit_log_end. It may be * called in any context. */ -void audit_log(struct audit_context *ctx, int gfp_mask, int type, +void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, const char *fmt, ...) { struct audit_buffer *ab; diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 88696f639aab..d8a68509e729 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -803,7 +803,7 @@ static void audit_log_task_info(struct audit_buffer *ab) up_read(&mm->mmap_sem); } -static void audit_log_exit(struct audit_context *context, unsigned int gfp_mask) +static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask) { int i; struct audit_buffer *ab; diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 79866bc6b3a1..28176d083f7b 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -968,8 +968,6 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf, char *page; ssize_t retval = 0; char *s; - char *start; - size_t n; if (!(page = (char *)__get_free_page(GFP_KERNEL))) return -ENOMEM; @@ -999,14 +997,7 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf, *s++ = '\n'; *s = '\0'; - /* Do nothing if *ppos is at the eof or beyond the eof. */ - if (s - page <= *ppos) - return 0; - - start = page + *ppos; - n = s - start; - retval = n - copy_to_user(buf, start, min(n, nbytes)); - *ppos += retval; + retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page); out: free_page((unsigned long)page); return retval; @@ -1679,7 +1670,7 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs) * GFP_USER - only nodes in current tasks mems allowed ok. **/ -int cpuset_zone_allowed(struct zone *z, unsigned int __nocast gfp_mask) +int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) { int node; /* node that zone z is on */ const struct cpuset *cs; /* current cpuset ancestors */ diff --git a/kernel/exit.c b/kernel/exit.c index ee6d8b8abef5..3b25b182d2be 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -843,6 +843,7 @@ fastcall NORET_TYPE void do_exit(long code) group_dead = atomic_dec_and_test(&tsk->signal->live); if (group_dead) { del_timer_sync(&tsk->signal->real_timer); + exit_itimers(tsk->signal); acct_process(code); } exit_mm(tsk); @@ -1203,7 +1204,7 @@ static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap, exit_code = p->exit_code; if (unlikely(!exit_code) || - unlikely(p->state > TASK_STOPPED)) + unlikely(p->state & TASK_TRACED)) goto bail_ref; return wait_noreap_copyout(p, pid, uid, why, (exit_code << 8) | 0x7f, diff --git a/kernel/fork.c b/kernel/fork.c index 533ce27f4b2c..280bd44ac441 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -848,7 +848,7 @@ static inline void copy_flags(unsigned long clone_flags, struct task_struct *p) { unsigned long new_flags = p->flags; - new_flags &= ~PF_SUPERPRIV; + new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE); new_flags |= PF_FORKNOEXEC; if (!(clone_flags & CLONE_PTRACE)) p->ptrace = 0; diff --git a/kernel/kexec.c b/kernel/kexec.c index cdd4dcd8fb63..36c5d9cd4cc1 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -90,7 +90,7 @@ int kexec_should_crash(struct task_struct *p) static int kimage_is_destination_range(struct kimage *image, unsigned long start, unsigned long end); static struct page *kimage_alloc_page(struct kimage *image, - unsigned int gfp_mask, + gfp_t gfp_mask, unsigned long dest); static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, @@ -326,8 +326,7 @@ static int kimage_is_destination_range(struct kimage *image, return 0; } -static struct page *kimage_alloc_pages(unsigned int gfp_mask, - unsigned int order) +static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) { struct page *pages; @@ -654,7 +653,7 @@ static kimage_entry_t *kimage_dst_used(struct kimage *image, } static struct page *kimage_alloc_page(struct kimage *image, - unsigned int gfp_mask, + gfp_t gfp_mask, unsigned long destination) { /* diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 179baafcdd96..64ab045c3d9d 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c @@ -36,7 +36,7 @@ * struct kfifo with kfree(). */ struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, - unsigned int __nocast gfp_mask, spinlock_t *lock) + gfp_t gfp_mask, spinlock_t *lock) { struct kfifo *fifo; @@ -64,7 +64,7 @@ EXPORT_SYMBOL(kfifo_init); * * The size will be rounded-up to a power of 2. */ -struct kfifo *kfifo_alloc(unsigned int size, unsigned int __nocast gfp_mask, spinlock_t *lock) +struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock) { unsigned char *buffer; struct kfifo *ret; diff --git a/kernel/params.c b/kernel/params.c index fbf173215fd2..1a8614bac5d5 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -80,8 +80,6 @@ static char *next_arg(char *args, char **param, char **val) int in_quote = 0, quoted = 0; char *next; - /* Chew any extra spaces */ - while (*args == ' ') args++; if (*args == '"') { args++; in_quote = 1; @@ -121,6 +119,10 @@ static char *next_arg(char *args, char **param, char **val) next = args + i + 1; } else next = args + i; + + /* Chew up trailing spaces. */ + while (*next == ' ') + next++; return next; } @@ -135,6 +137,10 @@ int parse_args(const char *name, DEBUGP("Parsing ARGS: %s\n", args); + /* Chew leading spaces */ + while (*args == ' ') + args++; + while (*args) { int ret; diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index ad85d3f0dcc4..bf374fceb39c 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -91,7 +91,7 @@ static inline union cpu_time_count cpu_time_sub(clockid_t which_clock, * Update expiry time from increment, and increase overrun count, * given the current clock sample. */ -static inline void bump_cpu_timer(struct k_itimer *timer, +static void bump_cpu_timer(struct k_itimer *timer, union cpu_time_count now) { int i; @@ -110,7 +110,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer, for (i = 0; incr < delta - incr; i++) incr = incr << 1; for (; i >= 0; incr >>= 1, i--) { - if (delta <= incr) + if (delta < incr) continue; timer->it.cpu.expires.sched += incr; timer->it_overrun += 1 << i; @@ -128,7 +128,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer, for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++) incr = cputime_add(incr, incr); for (; i >= 0; incr = cputime_halve(incr), i--) { - if (cputime_le(delta, incr)) + if (cputime_lt(delta, incr)) continue; timer->it.cpu.expires.cpu = cputime_add(timer->it.cpu.expires.cpu, incr); @@ -380,14 +380,9 @@ int posix_cpu_timer_create(struct k_itimer *new_timer) int posix_cpu_timer_del(struct k_itimer *timer) { struct task_struct *p = timer->it.cpu.task; + int ret = 0; - if (timer->it.cpu.firing) - return TIMER_RETRY; - - if (unlikely(p == NULL)) - return 0; - - if (!list_empty(&timer->it.cpu.entry)) { + if (likely(p != NULL)) { read_lock(&tasklist_lock); if (unlikely(p->signal == NULL)) { /* @@ -396,18 +391,20 @@ int posix_cpu_timer_del(struct k_itimer *timer) */ BUG_ON(!list_empty(&timer->it.cpu.entry)); } else { - /* - * Take us off the task's timer list. - */ spin_lock(&p->sighand->siglock); - list_del(&timer->it.cpu.entry); + if (timer->it.cpu.firing) + ret = TIMER_RETRY; + else + list_del(&timer->it.cpu.entry); spin_unlock(&p->sighand->siglock); } read_unlock(&tasklist_lock); - } - put_task_struct(p); - return 0; + if (!ret) + put_task_struct(p); + } + + return ret; } /* @@ -424,7 +421,6 @@ static void cleanup_timers(struct list_head *head, cputime_t ptime = cputime_add(utime, stime); list_for_each_entry_safe(timer, next, head, entry) { - timer->task = NULL; list_del_init(&timer->entry); if (cputime_lt(timer->expires.cpu, ptime)) { timer->expires.cpu = cputime_zero; @@ -436,7 +432,6 @@ static void cleanup_timers(struct list_head *head, ++head; list_for_each_entry_safe(timer, next, head, entry) { - timer->task = NULL; list_del_init(&timer->entry); if (cputime_lt(timer->expires.cpu, utime)) { timer->expires.cpu = cputime_zero; @@ -448,7 +443,6 @@ static void cleanup_timers(struct list_head *head, ++head; list_for_each_entry_safe(timer, next, head, entry) { - timer->task = NULL; list_del_init(&timer->entry); if (timer->expires.sched < sched_time) { timer->expires.sched = 0; @@ -492,6 +486,9 @@ static void process_timer_rebalance(struct task_struct *p, struct task_struct *t = p; unsigned int nthreads = atomic_read(&p->signal->live); + if (!nthreads) + return; + switch (clock_idx) { default: BUG(); @@ -500,7 +497,7 @@ static void process_timer_rebalance(struct task_struct *p, left = cputime_div(cputime_sub(expires.cpu, val.cpu), nthreads); do { - if (!unlikely(t->exit_state)) { + if (!unlikely(t->flags & PF_EXITING)) { ticks = cputime_add(prof_ticks(t), left); if (cputime_eq(t->it_prof_expires, cputime_zero) || @@ -515,7 +512,7 @@ static void process_timer_rebalance(struct task_struct *p, left = cputime_div(cputime_sub(expires.cpu, val.cpu), nthreads); do { - if (!unlikely(t->exit_state)) { + if (!unlikely(t->flags & PF_EXITING)) { ticks = cputime_add(virt_ticks(t), left); if (cputime_eq(t->it_virt_expires, cputime_zero) || @@ -530,7 +527,7 @@ static void process_timer_rebalance(struct task_struct *p, nsleft = expires.sched - val.sched; do_div(nsleft, nthreads); do { - if (!unlikely(t->exit_state)) { + if (!unlikely(t->flags & PF_EXITING)) { ns = t->sched_time + nsleft; if (t->it_sched_expires == 0 || t->it_sched_expires > ns) { @@ -569,6 +566,9 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) struct cpu_timer_list *next; unsigned long i; + if (CPUCLOCK_PERTHREAD(timer->it_clock) && (p->flags & PF_EXITING)) + return; + head = (CPUCLOCK_PERTHREAD(timer->it_clock) ? p->cpu_timers : p->signal->cpu_timers); head += CPUCLOCK_WHICH(timer->it_clock); @@ -579,17 +579,15 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) listpos = head; if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { list_for_each_entry(next, head, entry) { - if (next->expires.sched > nt->expires.sched) { - listpos = &next->entry; + if (next->expires.sched > nt->expires.sched) break; - } + listpos = &next->entry; } } else { list_for_each_entry(next, head, entry) { - if (cputime_gt(next->expires.cpu, nt->expires.cpu)) { - listpos = &next->entry; + if (cputime_gt(next->expires.cpu, nt->expires.cpu)) break; - } + listpos = &next->entry; } } list_add(&nt->entry, listpos); @@ -733,9 +731,15 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, * Disarm any old timer after extracting its expiry time. */ BUG_ON(!irqs_disabled()); + + ret = 0; spin_lock(&p->sighand->siglock); old_expires = timer->it.cpu.expires; - list_del_init(&timer->it.cpu.entry); + if (unlikely(timer->it.cpu.firing)) { + timer->it.cpu.firing = -1; + ret = TIMER_RETRY; + } else + list_del_init(&timer->it.cpu.entry); spin_unlock(&p->sighand->siglock); /* @@ -783,7 +787,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, } } - if (unlikely(timer->it.cpu.firing)) { + if (unlikely(ret)) { /* * We are colliding with the timer actually firing. * Punt after filling in the timer's old value, and @@ -791,8 +795,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, * it as an overrun (thanks to bump_cpu_timer above). */ read_unlock(&tasklist_lock); - timer->it.cpu.firing = -1; - ret = TIMER_RETRY; goto out; } @@ -958,14 +960,16 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) static void check_thread_timers(struct task_struct *tsk, struct list_head *firing) { + int maxfire; struct list_head *timers = tsk->cpu_timers; + maxfire = 20; tsk->it_prof_expires = cputime_zero; while (!list_empty(timers)) { struct cpu_timer_list *t = list_entry(timers->next, struct cpu_timer_list, entry); - if (cputime_lt(prof_ticks(tsk), t->expires.cpu)) { + if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) { tsk->it_prof_expires = t->expires.cpu; break; } @@ -974,12 +978,13 @@ static void check_thread_timers(struct task_struct *tsk, } ++timers; + maxfire = 20; tsk->it_virt_expires = cputime_zero; while (!list_empty(timers)) { struct cpu_timer_list *t = list_entry(timers->next, struct cpu_timer_list, entry); - if (cputime_lt(virt_ticks(tsk), t->expires.cpu)) { + if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) { tsk->it_virt_expires = t->expires.cpu; break; } @@ -988,12 +993,13 @@ static void check_thread_timers(struct task_struct *tsk, } ++timers; + maxfire = 20; tsk->it_sched_expires = 0; while (!list_empty(timers)) { struct cpu_timer_list *t = list_entry(timers->next, struct cpu_timer_list, entry); - if (tsk->sched_time < t->expires.sched) { + if (!--maxfire || tsk->sched_time < t->expires.sched) { tsk->it_sched_expires = t->expires.sched; break; } @@ -1010,6 +1016,7 @@ static void check_thread_timers(struct task_struct *tsk, static void check_process_timers(struct task_struct *tsk, struct list_head *firing) { + int maxfire; struct signal_struct *const sig = tsk->signal; cputime_t utime, stime, ptime, virt_expires, prof_expires; unsigned long long sched_time, sched_expires; @@ -1042,12 +1049,13 @@ static void check_process_timers(struct task_struct *tsk, } while (t != tsk); ptime = cputime_add(utime, stime); + maxfire = 20; prof_expires = cputime_zero; while (!list_empty(timers)) { struct cpu_timer_list *t = list_entry(timers->next, struct cpu_timer_list, entry); - if (cputime_lt(ptime, t->expires.cpu)) { + if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) { prof_expires = t->expires.cpu; break; } @@ -1056,12 +1064,13 @@ static void check_process_timers(struct task_struct *tsk, } ++timers; + maxfire = 20; virt_expires = cputime_zero; while (!list_empty(timers)) { struct cpu_timer_list *t = list_entry(timers->next, struct cpu_timer_list, entry); - if (cputime_lt(utime, t->expires.cpu)) { + if (!--maxfire || cputime_lt(utime, t->expires.cpu)) { virt_expires = t->expires.cpu; break; } @@ -1070,12 +1079,13 @@ static void check_process_timers(struct task_struct *tsk, } ++timers; + maxfire = 20; sched_expires = 0; while (!list_empty(timers)) { struct cpu_timer_list *t = list_entry(timers->next, struct cpu_timer_list, entry); - if (sched_time < t->expires.sched) { + if (!--maxfire || sched_time < t->expires.sched) { sched_expires = t->expires.sched; break; } @@ -1158,6 +1168,9 @@ static void check_process_timers(struct task_struct *tsk, unsigned long long sched_left, sched; const unsigned int nthreads = atomic_read(&sig->live); + if (!nthreads) + return; + prof_left = cputime_sub(prof_expires, utime); prof_left = cputime_sub(prof_left, stime); prof_left = cputime_div(prof_left, nthreads); @@ -1194,7 +1207,7 @@ static void check_process_timers(struct task_struct *tsk, do { t = next_thread(t); - } while (unlikely(t->exit_state)); + } while (unlikely(t->flags & PF_EXITING)); } while (t != tsk); } } diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index b7b532acd9fc..dda3cda73c77 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -1157,7 +1157,7 @@ retry_delete: } /* - * This is called by __exit_signal, only when there are no more + * This is called by do_exit or de_thread, only when there are no more * references to the shared signal_struct. */ void exit_itimers(struct signal_struct *sig) diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 396c7873e804..46a5e5acff97 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -29,7 +29,7 @@ config PM_DEBUG config SOFTWARE_SUSPEND bool "Software Suspend" - depends on PM && SWAP && (X86 || ((FVR || PPC32) && !SMP)) + depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FVR || PPC32) && !SMP) ---help--- Enable the possibility of suspending the machine. It doesn't need APM. diff --git a/kernel/power/disk.c b/kernel/power/disk.c index 2d8bf054d036..761956e813f5 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c @@ -17,12 +17,12 @@ #include #include #include +#include #include "power.h" extern suspend_disk_method_t pm_disk_mode; -extern struct pm_ops * pm_ops; extern int swsusp_suspend(void); extern int swsusp_write(void); @@ -49,13 +49,11 @@ dev_t swsusp_resume_device; static void power_down(suspend_disk_method_t mode) { - unsigned long flags; int error = 0; - local_irq_save(flags); switch(mode) { case PM_DISK_PLATFORM: - device_shutdown(); + kernel_power_off_prepare(); error = pm_ops->enter(PM_SUSPEND_DISK); break; case PM_DISK_SHUTDOWN: diff --git a/kernel/power/power.h b/kernel/power/power.h index cd6a3493cc0d..6748de23e83c 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -1,7 +1,7 @@ #include #include -/* With SUSPEND_CONSOLE defined, it suspend looks *really* cool, but +/* With SUSPEND_CONSOLE defined suspend looks *really* cool, but we probably do not take enough locks for switching consoles, etc, so bad things might happen. */ @@ -9,6 +9,9 @@ #define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) #endif +#define MAX_PBES ((PAGE_SIZE - sizeof(struct new_utsname) \ + - 4 - 3*sizeof(unsigned long) - sizeof(int) \ + - sizeof(void *)) / sizeof(swp_entry_t)) struct swsusp_info { struct new_utsname uts; @@ -18,7 +21,7 @@ struct swsusp_info { unsigned long image_pages; unsigned long pagedir_pages; suspend_pagedir_t * suspend_pagedir; - swp_entry_t pagedir[768]; + swp_entry_t pagedir[MAX_PBES]; } __attribute__((aligned(PAGE_SIZE))); diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index d967e875ee82..10bc5ec496d7 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -363,7 +363,7 @@ static void lock_swapdevices(void) } /** - * write_swap_page - Write one page to a fresh swap location. + * write_page - Write one page to a fresh swap location. * @addr: Address we're writing. * @loc: Place to store the entry we used. * @@ -402,15 +402,14 @@ static int write_page(unsigned long addr, swp_entry_t * loc) static void data_free(void) { swp_entry_t entry; - int i; + struct pbe * p; - for (i = 0; i < nr_copy_pages; i++) { - entry = (pagedir_nosave + i)->swap_address; + for_each_pbe(p, pagedir_nosave) { + entry = p->swap_address; if (entry.val) swap_free(entry); else break; - (pagedir_nosave + i)->swap_address = (swp_entry_t){0}; } } @@ -863,6 +862,9 @@ static int alloc_image_pages(void) return 0; } +/* Free pages we allocated for suspend. Suspend pages are alocated + * before atomic copy, so we need to free them after resume. + */ void swsusp_free(void) { BUG_ON(PageNosave(virt_to_page(pagedir_save))); @@ -918,6 +920,7 @@ static int swsusp_alloc(void) pagedir_nosave = NULL; nr_copy_pages = calc_nr(nr_copy_pages); + nr_copy_pages_check = nr_copy_pages; pr_debug("suspend: (pages needed: %d + %d free: %d)\n", nr_copy_pages, PAGES_FOR_IO, nr_free_pages()); @@ -928,6 +931,10 @@ static int swsusp_alloc(void) if (!enough_swap()) return -ENOSPC; + if (MAX_PBES < nr_copy_pages / PBES_PER_PAGE + + !!(nr_copy_pages % PBES_PER_PAGE)) + return -ENOSPC; + if (!(pagedir_save = alloc_pagedir(nr_copy_pages))) { printk(KERN_ERR "suspend: Allocating pagedir failed.\n"); return -ENOMEM; @@ -940,7 +947,6 @@ static int swsusp_alloc(void) return error; } - nr_copy_pages_check = nr_copy_pages; return 0; } @@ -1089,7 +1095,7 @@ static inline void eat_page(void *page) *eaten_memory = c; } -static unsigned long get_usable_page(unsigned gfp_mask) +unsigned long get_usable_page(gfp_t gfp_mask) { unsigned long m; @@ -1103,7 +1109,7 @@ static unsigned long get_usable_page(unsigned gfp_mask) return m; } -static void free_eaten_memory(void) +void free_eaten_memory(void) { unsigned long m; void **c; @@ -1213,8 +1219,9 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist) free_pagedir(pblist); free_eaten_memory(); pblist = NULL; - } - else + /* Is this even worth handling? It should never ever happen, and we + have just lost user's state, anyway... */ + } else printk("swsusp: Relocated %d pages\n", rel); return pblist; @@ -1434,9 +1441,9 @@ static int read_pagedir(struct pbe *pblist) } if (error) - free_page((unsigned long)pblist); - - BUG_ON(i != swsusp_info.pagedir_pages); + free_pagedir(pblist); + else + BUG_ON(i != swsusp_info.pagedir_pages); return error; } @@ -1474,11 +1481,12 @@ static int read_suspend_image(void) /* Allocate memory for the image and read the data from swap */ error = check_pagedir(pagedir_nosave); - free_eaten_memory(); + if (!error) error = data_read(pagedir_nosave); if (error) { /* We fail cleanly */ + free_eaten_memory(); for_each_pbe (p, pagedir_nosave) if (p->address) { free_page(p->address); diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index bef3b6901b76..2559d4b8f23f 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -71,7 +71,7 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; /* Fake initialization required by compiler */ static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL}; -static int maxbatch = 10; +static int maxbatch = 10000; #ifndef __HAVE_ARCH_CMPXCHG /* @@ -109,6 +109,10 @@ void fastcall call_rcu(struct rcu_head *head, rdp = &__get_cpu_var(rcu_data); *rdp->nxttail = head; rdp->nxttail = &head->next; + + if (unlikely(++rdp->count > 10000)) + set_need_resched(); + local_irq_restore(flags); } @@ -140,6 +144,12 @@ void fastcall call_rcu_bh(struct rcu_head *head, rdp = &__get_cpu_var(rcu_bh_data); *rdp->nxttail = head; rdp->nxttail = &head->next; + rdp->count++; +/* + * Should we directly call rcu_do_batch() here ? + * if (unlikely(rdp->count > 10000)) + * rcu_do_batch(rdp); + */ local_irq_restore(flags); } @@ -157,6 +167,7 @@ static void rcu_do_batch(struct rcu_data *rdp) next = rdp->donelist = list->next; list->func(list); list = next; + rdp->count--; if (++count >= maxbatch) break; } diff --git a/kernel/sched.c b/kernel/sched.c index 1f31a528fdba..1e5cafdf4e27 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3879,6 +3879,7 @@ EXPORT_SYMBOL(cpu_present_map); #ifndef CONFIG_SMP cpumask_t cpu_online_map = CPU_MASK_ALL; +EXPORT_SYMBOL_GPL(cpu_online_map); cpumask_t cpu_possible_map = CPU_MASK_ALL; #endif diff --git a/kernel/signal.c b/kernel/signal.c index b92c3c9f8b9a..f2b96b08fb44 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -262,7 +262,7 @@ next_signal(struct sigpending *pending, sigset_t *mask) return sig; } -static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __nocast flags, +static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, int override_rlimit) { struct sigqueue *q = NULL; @@ -397,20 +397,8 @@ void __exit_signal(struct task_struct *tsk) flush_sigqueue(&tsk->pending); if (sig) { /* - * We are cleaning up the signal_struct here. We delayed - * calling exit_itimers until after flush_sigqueue, just in - * case our thread-local pending queue contained a queued - * timer signal that would have been cleared in - * exit_itimers. When that called sigqueue_free, it would - * attempt to re-take the tasklist_lock and deadlock. This - * can never happen if we ensure that all queues the - * timer's signal might be queued on have been flushed - * first. The shared_pending queue, and our own pending - * queue are the only queues the timer could be on, since - * there are no other threads left in the group and timer - * signals are constrained to threads inside the group. + * We are cleaning up the signal_struct here. */ - exit_itimers(sig); exit_thread_group_keys(sig); kmem_cache_free(signal_cachep, sig); } @@ -578,7 +566,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) * is to alert stop-signal processing code when another * processor has come along and cleared the flag. */ - tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; + if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) + tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; } if ( signr && ((info->si_code & __SI_MASK) == __SI_TIMER) && @@ -936,34 +925,31 @@ force_sig_specific(int sig, struct task_struct *t) * as soon as they're available, so putting the signal on the shared queue * will be equivalent to sending it to one such thread. */ -#define wants_signal(sig, p, mask) \ - (!sigismember(&(p)->blocked, sig) \ - && !((p)->state & mask) \ - && !((p)->flags & PF_EXITING) \ - && (task_curr(p) || !signal_pending(p))) - +static inline int wants_signal(int sig, struct task_struct *p) +{ + if (sigismember(&p->blocked, sig)) + return 0; + if (p->flags & PF_EXITING) + return 0; + if (sig == SIGKILL) + return 1; + if (p->state & (TASK_STOPPED | TASK_TRACED)) + return 0; + return task_curr(p) || !signal_pending(p); +} static void __group_complete_signal(int sig, struct task_struct *p) { - unsigned int mask; struct task_struct *t; - /* - * Don't bother traced and stopped tasks (but - * SIGKILL will punch through that). - */ - mask = TASK_STOPPED | TASK_TRACED; - if (sig == SIGKILL) - mask = 0; - /* * Now find a thread we can wake up to take the signal off the queue. * * If the main thread wants the signal, it gets first crack. * Probably the least surprising to the average bear. */ - if (wants_signal(sig, p, mask)) + if (wants_signal(sig, p)) t = p; else if (thread_group_empty(p)) /* @@ -981,7 +967,7 @@ __group_complete_signal(int sig, struct task_struct *p) t = p->signal->curr_target = p; BUG_ON(t->tgid != p->tgid); - while (!wants_signal(sig, t, mask)) { + while (!wants_signal(sig, t)) { t = next_thread(t); if (t == p->signal->curr_target) /* @@ -1195,6 +1181,40 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid) return error; } +/* like kill_proc_info(), but doesn't use uid/euid of "current" */ +int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid, + uid_t uid, uid_t euid) +{ + int ret = -EINVAL; + struct task_struct *p; + + if (!valid_signal(sig)) + return ret; + + read_lock(&tasklist_lock); + p = find_task_by_pid(pid); + if (!p) { + ret = -ESRCH; + goto out_unlock; + } + if ((!info || ((unsigned long)info != 1 && + (unsigned long)info != 2 && SI_FROMUSER(info))) + && (euid != p->suid) && (euid != p->uid) + && (uid != p->suid) && (uid != p->uid)) { + ret = -EPERM; + goto out_unlock; + } + if (sig && p->sighand) { + unsigned long flags; + spin_lock_irqsave(&p->sighand->siglock, flags); + ret = __group_send_sig_info(sig, info, p); + spin_unlock_irqrestore(&p->sighand->siglock, flags); + } +out_unlock: + read_unlock(&tasklist_lock); + return ret; +} +EXPORT_SYMBOL_GPL(kill_proc_info_as_uid); /* * kill_something_info() interprets pid in interesting ways just like kill(2). @@ -1766,7 +1786,8 @@ do_signal_stop(int signr) * stop is always done with the siglock held, * so this check has no races. */ - if (t->state < TASK_STOPPED) { + if (!t->exit_state && + !(t->state & (TASK_STOPPED|TASK_TRACED))) { stop_count++; signal_wake_up(t, 0); } diff --git a/kernel/sys.c b/kernel/sys.c index f723522e6986..2fa1ed18123c 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -361,17 +361,35 @@ out_unlock: return retval; } +/** + * emergency_restart - reboot the system + * + * Without shutting down any hardware or taking any locks + * reboot the system. This is called when we know we are in + * trouble so this is our best effort to reboot. This is + * safe to call in interrupt context. + */ void emergency_restart(void) { machine_emergency_restart(); } EXPORT_SYMBOL_GPL(emergency_restart); -void kernel_restart(char *cmd) +/** + * kernel_restart - reboot the system + * + * Shutdown everything and perform a clean reboot. + * This is not safe to call in interrupt context. + */ +void kernel_restart_prepare(char *cmd) { notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); system_state = SYSTEM_RESTART; device_shutdown(); +} +void kernel_restart(char *cmd) +{ + kernel_restart_prepare(cmd); if (!cmd) { printk(KERN_EMERG "Restarting system.\n"); } else { @@ -382,6 +400,12 @@ void kernel_restart(char *cmd) } EXPORT_SYMBOL_GPL(kernel_restart); +/** + * kernel_kexec - reboot the system + * + * Move into place and start executing a preloaded standalone + * executable. If nothing was preloaded return an error. + */ void kernel_kexec(void) { #ifdef CONFIG_KEXEC @@ -390,9 +414,7 @@ void kernel_kexec(void) if (!image) { return; } - notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL); - system_state = SYSTEM_RESTART; - device_shutdown(); + kernel_restart_prepare(NULL); printk(KERN_EMERG "Starting new kernel\n"); machine_shutdown(); machine_kexec(image); @@ -400,21 +422,39 @@ void kernel_kexec(void) } EXPORT_SYMBOL_GPL(kernel_kexec); -void kernel_halt(void) +/** + * kernel_halt - halt the system + * + * Shutdown everything and perform a clean system halt. + */ +void kernel_halt_prepare(void) { notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL); system_state = SYSTEM_HALT; device_shutdown(); +} +void kernel_halt(void) +{ + kernel_halt_prepare(); printk(KERN_EMERG "System halted.\n"); machine_halt(); } EXPORT_SYMBOL_GPL(kernel_halt); -void kernel_power_off(void) +/** + * kernel_power_off - power_off the system + * + * Shutdown everything and perform a clean system power_off. + */ +void kernel_power_off_prepare(void) { notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL); system_state = SYSTEM_POWER_OFF; device_shutdown(); +} +void kernel_power_off(void) +{ + kernel_power_off_prepare(); printk(KERN_EMERG "Power down.\n"); machine_power_off(); } diff --git a/kernel/time.c b/kernel/time.c index dd5ae1162a8f..40c2410ac99a 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -570,6 +570,7 @@ void getnstimeofday(struct timespec *tv) tv->tv_sec = x.tv_sec; tv->tv_nsec = x.tv_usec * NSEC_PER_USEC; } +EXPORT_SYMBOL_GPL(getnstimeofday); #endif #if (BITS_PER_LONG < 64) diff --git a/lib/.gitignore b/lib/.gitignore new file mode 100644 index 000000000000..3bef1ea94c99 --- /dev/null +++ b/lib/.gitignore @@ -0,0 +1,6 @@ +# +# Generated files +# +gen_crc32table +crc32table.h + diff --git a/lib/idr.c b/lib/idr.c index 6415d053e2bf..6414b2fb482d 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -72,7 +72,7 @@ static void free_layer(struct idr *idp, struct idr_layer *p) * If the system is REALLY out of memory this function returns 0, * otherwise 1. */ -int idr_pre_get(struct idr *idp, unsigned gfp_mask) +int idr_pre_get(struct idr *idp, gfp_t gfp_mask) { while (idp->id_free_cnt < IDR_FREE_MAX) { struct idr_layer *new; @@ -345,6 +345,19 @@ void idr_remove(struct idr *idp, int id) } EXPORT_SYMBOL(idr_remove); +/** + * idr_destroy - release all cached layers within an idr tree + * idp: idr handle + */ +void idr_destroy(struct idr *idp) +{ + while (idp->id_free_cnt) { + struct idr_layer *p = alloc_layer(idp); + kmem_cache_free(idr_layer_cache, p); + } +} +EXPORT_SYMBOL(idr_destroy); + /** * idr_find - return pointer for given id * @idp: idr handle diff --git a/lib/kobject.c b/lib/kobject.c index dd0917dd9fa9..253d3004ace9 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -100,7 +100,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length) * @kobj: kobject in question, with which to build the path * @gfp_mask: the allocation type used to allocate the path */ -char *kobject_get_path(struct kobject *kobj, int gfp_mask) +char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask) { char *path; int len; diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 04ca4429ddfa..7ef6f6a17aa6 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -62,7 +62,7 @@ static struct sock *uevent_sock; * @gfp_mask: */ static int send_uevent(const char *signal, const char *obj, - char **envp, int gfp_mask) + char **envp, gfp_t gfp_mask) { struct sk_buff *skb; char *pos; @@ -98,7 +98,7 @@ static int send_uevent(const char *signal, const char *obj, } static int do_kobject_uevent(struct kobject *kobj, enum kobject_action action, - struct attribute *attr, int gfp_mask) + struct attribute *attr, gfp_t gfp_mask) { char *path; char *attrpath; diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 6a8bc6e06431..d1c057e71b68 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -110,7 +110,7 @@ radix_tree_node_free(struct radix_tree_node *node) * success, return zero, with preemption disabled. On error, return -ENOMEM * with preemption not disabled. */ -int radix_tree_preload(unsigned int __nocast gfp_mask) +int radix_tree_preload(gfp_t gfp_mask) { struct radix_tree_preload *rtp; struct radix_tree_node *node; diff --git a/lib/textsearch.c b/lib/textsearch.c index 1e934c196f0f..6f3093efbd7b 100644 --- a/lib/textsearch.c +++ b/lib/textsearch.c @@ -254,7 +254,7 @@ unsigned int textsearch_find_continuous(struct ts_config *conf, * parameters or a ERR_PTR(). */ struct ts_config *textsearch_prepare(const char *algo, const void *pattern, - unsigned int len, int gfp_mask, int flags) + unsigned int len, gfp_t gfp_mask, int flags) { int err = -ENOENT; struct ts_config *conf; diff --git a/lib/ts_bm.c b/lib/ts_bm.c index 2cc79112ecc3..8a8b3a16133e 100644 --- a/lib/ts_bm.c +++ b/lib/ts_bm.c @@ -127,7 +127,7 @@ static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern, } static struct ts_config *bm_init(const void *pattern, unsigned int len, - int gfp_mask) + gfp_t gfp_mask) { struct ts_config *conf; struct ts_bm *bm; diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c index d27c0a072940..ca3211206eef 100644 --- a/lib/ts_fsm.c +++ b/lib/ts_fsm.c @@ -258,7 +258,7 @@ found_match: } static struct ts_config *fsm_init(const void *pattern, unsigned int len, - int gfp_mask) + gfp_t gfp_mask) { int i, err = -EINVAL; struct ts_config *conf; diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c index 73266b975585..7fd45451b44a 100644 --- a/lib/ts_kmp.c +++ b/lib/ts_kmp.c @@ -87,7 +87,7 @@ static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len, } static struct ts_config *kmp_init(const void *pattern, unsigned int len, - int gfp_mask) + gfp_t gfp_mask) { struct ts_config *conf; struct ts_kmp *kmp; diff --git a/mm/bootmem.c b/mm/bootmem.c index 8ec4e4c2a179..a58699b6579e 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -61,17 +61,9 @@ static unsigned long __init init_bootmem_core (pg_data_t *pgdat, { bootmem_data_t *bdata = pgdat->bdata; unsigned long mapsize = ((end - start)+7)/8; - static struct pglist_data *pgdat_last; - pgdat->pgdat_next = NULL; - /* Add new nodes last so that bootmem always starts - searching in the first nodes, not the last ones */ - if (pgdat_last) - pgdat_last->pgdat_next = pgdat; - else { - pgdat_list = pgdat; - pgdat_last = pgdat; - } + pgdat->pgdat_next = pgdat_list; + pgdat_list = pgdat; mapsize = ALIGN(mapsize, sizeof(long)); bdata->node_bootmem_map = phys_to_virt(mapstart << PAGE_SHIFT); @@ -162,10 +154,10 @@ static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, */ static void * __init __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size, - unsigned long align, unsigned long goal) + unsigned long align, unsigned long goal, unsigned long limit) { unsigned long offset, remaining_size, areasize, preferred; - unsigned long i, start = 0, incr, eidx; + unsigned long i, start = 0, incr, eidx, end_pfn = bdata->node_low_pfn; void *ret; if(!size) { @@ -174,7 +166,14 @@ __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size, } BUG_ON(align & (align-1)); - eidx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT); + if (limit && bdata->node_boot_start >= limit) + return NULL; + + limit >>=PAGE_SHIFT; + if (limit && end_pfn > limit) + end_pfn = limit; + + eidx = end_pfn - (bdata->node_boot_start >> PAGE_SHIFT); offset = 0; if (align && (bdata->node_boot_start & (align - 1UL)) != 0) @@ -186,11 +185,12 @@ __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size, * first, then we try to allocate lower pages. */ if (goal && (goal >= bdata->node_boot_start) && - ((goal >> PAGE_SHIFT) < bdata->node_low_pfn)) { + ((goal >> PAGE_SHIFT) < end_pfn)) { preferred = goal - bdata->node_boot_start; if (bdata->last_success >= preferred) - preferred = bdata->last_success; + if (!limit || (limit && limit > bdata->last_success)) + preferred = bdata->last_success; } else preferred = 0; @@ -390,14 +390,15 @@ unsigned long __init free_all_bootmem (void) return(free_all_bootmem_core(NODE_DATA(0))); } -void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal) +void * __init __alloc_bootmem_limit (unsigned long size, unsigned long align, unsigned long goal, + unsigned long limit) { pg_data_t *pgdat = pgdat_list; void *ptr; for_each_pgdat(pgdat) if ((ptr = __alloc_bootmem_core(pgdat->bdata, size, - align, goal))) + align, goal, limit))) return(ptr); /* @@ -408,14 +409,16 @@ void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned return NULL; } -void * __init __alloc_bootmem_node (pg_data_t *pgdat, unsigned long size, unsigned long align, unsigned long goal) + +void * __init __alloc_bootmem_node_limit (pg_data_t *pgdat, unsigned long size, unsigned long align, + unsigned long goal, unsigned long limit) { void *ptr; - ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal); + ptr = __alloc_bootmem_core(pgdat->bdata, size, align, goal, limit); if (ptr) return (ptr); - return __alloc_bootmem(size, align, goal); + return __alloc_bootmem_limit(size, align, goal, limit); } diff --git a/mm/filemap.c b/mm/filemap.c index b5346576e58d..1c31b2fd2ca5 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -377,7 +377,7 @@ int filemap_write_and_wait_range(struct address_space *mapping, * This function does not add the page to the LRU. The caller must do that. */ int add_to_page_cache(struct page *page, struct address_space *mapping, - pgoff_t offset, int gfp_mask) + pgoff_t offset, gfp_t gfp_mask) { int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); @@ -401,7 +401,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping, EXPORT_SYMBOL(add_to_page_cache); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, - pgoff_t offset, int gfp_mask) + pgoff_t offset, gfp_t gfp_mask) { int ret = add_to_page_cache(page, mapping, offset, gfp_mask); if (ret == 0) @@ -591,7 +591,7 @@ EXPORT_SYMBOL(find_lock_page); * memory exhaustion. */ struct page *find_or_create_page(struct address_space *mapping, - unsigned long index, unsigned int gfp_mask) + unsigned long index, gfp_t gfp_mask) { struct page *page, *cached_page = NULL; int err; @@ -683,7 +683,7 @@ struct page * grab_cache_page_nowait(struct address_space *mapping, unsigned long index) { struct page *page = find_get_page(mapping, index); - unsigned int gfp_mask; + gfp_t gfp_mask; if (page) { if (!TestSetPageLocked(page)) diff --git a/mm/fremap.c b/mm/fremap.c index 3235fb77c133..ab23a0673c35 100644 --- a/mm/fremap.c +++ b/mm/fremap.c @@ -89,6 +89,9 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma, size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if (!page->mapping || page->index >= size) goto err_unlock; + err = -ENOMEM; + if (page_mapcount(page) > INT_MAX/2) + goto err_unlock; zap_pte(mm, vma, addr, pte); diff --git a/mm/highmem.c b/mm/highmem.c index 400911599468..ce2e7e8bbfa7 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -30,11 +30,9 @@ static mempool_t *page_pool, *isa_page_pool; -static void *page_pool_alloc(unsigned int __nocast gfp_mask, void *data) +static void *page_pool_alloc_isa(gfp_t gfp_mask, void *data) { - unsigned int gfp = gfp_mask | (unsigned int) (long) data; - - return alloc_page(gfp); + return alloc_page(gfp_mask | GFP_DMA); } static void page_pool_free(void *page, void *data) @@ -51,6 +49,12 @@ static void page_pool_free(void *page, void *data) * n means that there are (n-1) current users of it. */ #ifdef CONFIG_HIGHMEM + +static void *page_pool_alloc(gfp_t gfp_mask, void *data) +{ + return alloc_page(gfp_mask); +} + static int pkmap_count[LAST_PKMAP]; static unsigned int last_pkmap_nr; static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); @@ -267,7 +271,7 @@ int init_emergency_isa_pool(void) if (isa_page_pool) return 0; - isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, (void *) __GFP_DMA); + isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc_isa, page_pool_free, NULL); if (!isa_page_pool) BUG(); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 901ac523a1c3..61d380678030 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -274,21 +274,22 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, { pte_t *src_pte, *dst_pte, entry; struct page *ptepage; - unsigned long addr = vma->vm_start; - unsigned long end = vma->vm_end; + unsigned long addr; - while (addr < end) { + for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { dst_pte = huge_pte_alloc(dst, addr); if (!dst_pte) goto nomem; + spin_lock(&src->page_table_lock); src_pte = huge_pte_offset(src, addr); - BUG_ON(!src_pte || pte_none(*src_pte)); /* prefaulted */ - entry = *src_pte; - ptepage = pte_page(entry); - get_page(ptepage); - add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); - set_huge_pte_at(dst, addr, dst_pte, entry); - addr += HPAGE_SIZE; + if (src_pte && !pte_none(*src_pte)) { + entry = *src_pte; + ptepage = pte_page(entry); + get_page(ptepage); + add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); + set_huge_pte_at(dst, addr, dst_pte, entry); + } + spin_unlock(&src->page_table_lock); } return 0; @@ -323,8 +324,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, page = pte_page(pte); put_page(page); + add_mm_counter(mm, rss, - (HPAGE_SIZE / PAGE_SIZE)); } - add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT)); flush_tlb_range(vma, start, end); } @@ -393,6 +394,28 @@ out: return ret; } +/* + * On ia64 at least, it is possible to receive a hugetlb fault from a + * stale zero entry left in the TLB from earlier hardware prefetching. + * Low-level arch code should already have flushed the stale entry as + * part of its fault handling, but we do need to accept this minor fault + * and return successfully. Whereas the "normal" case is that this is + * an access to a hugetlb page which has been truncated off since mmap. + */ +int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, int write_access) +{ + int ret = VM_FAULT_SIGBUS; + pte_t *pte; + + spin_lock(&mm->page_table_lock); + pte = huge_pte_offset(mm, address); + if (pte && !pte_none(*pte)) + ret = VM_FAULT_MINOR; + spin_unlock(&mm->page_table_lock); + return ret; +} + int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, unsigned long *position, int *length, int i) @@ -403,6 +426,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, BUG_ON(!is_vm_hugetlb_page(vma)); vpfn = vaddr/PAGE_SIZE; + spin_lock(&mm->page_table_lock); while (vaddr < vma->vm_end && remainder) { if (pages) { @@ -415,8 +439,13 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, * indexing below to work. */ pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); - /* hugetlb should be locked, and hence, prefaulted */ - WARN_ON(!pte || pte_none(*pte)); + /* the hugetlb file might have been truncated */ + if (!pte || pte_none(*pte)) { + remainder = 0; + if (!i) + i = -EFAULT; + break; + } page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; @@ -434,7 +463,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, --remainder; ++i; } - + spin_unlock(&mm->page_table_lock); *length = remainder; *position = vaddr; diff --git a/mm/madvise.c b/mm/madvise.c index 4454936f87d1..20e075d1c64c 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -83,6 +83,9 @@ static long madvise_willneed(struct vm_area_struct * vma, { struct file *file = vma->vm_file; + if (!file) + return -EBADF; + if (file->f_mapping->a_ops->get_xip_page) { /* no bad return value, but ignore advice */ return 0; @@ -141,11 +144,7 @@ static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) { - struct file *filp = vma->vm_file; - long error = -EBADF; - - if (!filp) - goto out; + long error; switch (behavior) { case MADV_NORMAL: @@ -166,8 +165,6 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, error = -EINVAL; break; } - -out: return error; } diff --git a/mm/memory.c b/mm/memory.c index ae8161f1f459..1db40e935e55 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2045,8 +2045,8 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma, inc_page_state(pgfault); - if (is_vm_hugetlb_page(vma)) - return VM_FAULT_SIGBUS; /* mapping truncation does this. */ + if (unlikely(is_vm_hugetlb_page(vma))) + return hugetlb_fault(mm, vma, address, write_access); /* * We need the page table lock to synchronize with kswapd diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 9033f0859aa8..1d5c64df1653 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -687,7 +687,7 @@ get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned lo } /* Return a zonelist representing a mempolicy */ -static struct zonelist *zonelist_policy(unsigned int __nocast gfp, struct mempolicy *policy) +static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy) { int nd; @@ -700,7 +700,7 @@ static struct zonelist *zonelist_policy(unsigned int __nocast gfp, struct mempol case MPOL_BIND: /* Lower zones don't get a policy applied */ /* Careful: current->mems_allowed might have moved */ - if ((gfp & GFP_ZONEMASK) >= policy_zone) + if (gfp_zone(gfp) >= policy_zone) if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist)) return policy->v.zonelist; /*FALL THROUGH*/ @@ -712,7 +712,7 @@ static struct zonelist *zonelist_policy(unsigned int __nocast gfp, struct mempol nd = 0; BUG(); } - return NODE_DATA(nd)->node_zonelists + (gfp & GFP_ZONEMASK); + return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp); } /* Do dynamic interleaving for a process */ @@ -751,13 +751,13 @@ static unsigned offset_il_node(struct mempolicy *pol, /* Allocate a page in interleaved policy. Own path because it needs to do special accounting. */ -static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned order, unsigned nid) +static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned nid) { struct zonelist *zl; struct page *page; BUG_ON(!node_online(nid)); - zl = NODE_DATA(nid)->node_zonelists + (gfp & GFP_ZONEMASK); + zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp); page = __alloc_pages(gfp, order, zl); if (page && page_zone(page) == zl->zones[0]) { zone_pcp(zl->zones[0],get_cpu())->interleave_hit++; @@ -789,7 +789,7 @@ static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned or * Should be called with the mm_sem of the vma hold. */ struct page * -alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned long addr) +alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) { struct mempolicy *pol = get_vma_policy(current, vma, addr); @@ -832,7 +832,7 @@ alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned l * 1) it's ok to take cpuset_sem (can WAIT), and * 2) allocating for current task (not interrupt). */ -struct page *alloc_pages_current(unsigned int __nocast gfp, unsigned order) +struct page *alloc_pages_current(gfp_t gfp, unsigned order) { struct mempolicy *pol = current->mempolicy; diff --git a/mm/mempool.c b/mm/mempool.c index 65f2957b8d51..1a99b80480d3 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -112,7 +112,7 @@ EXPORT_SYMBOL(mempool_create_node); * while this function is running. mempool_alloc() & mempool_free() * might be called (eg. from IRQ contexts) while this function executes. */ -int mempool_resize(mempool_t *pool, int new_min_nr, unsigned int __nocast gfp_mask) +int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask) { void *element; void **new_elements; @@ -200,12 +200,12 @@ EXPORT_SYMBOL(mempool_destroy); * *never* fails when called from process contexts. (it might * fail if called from an IRQ context.) */ -void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask) +void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) { void *element; unsigned long flags; wait_queue_t wait; - unsigned int gfp_temp; + gfp_t gfp_temp; might_sleep_if(gfp_mask & __GFP_WAIT); @@ -276,7 +276,7 @@ EXPORT_SYMBOL(mempool_free); /* * A commonly used alloc and free fn. */ -void *mempool_alloc_slab(unsigned int __nocast gfp_mask, void *pool_data) +void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) { kmem_cache_t *mem = (kmem_cache_t *) pool_data; return kmem_cache_alloc(mem, gfp_mask); diff --git a/mm/mremap.c b/mm/mremap.c index a32fed454bd7..f343fc73a8bd 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -141,10 +141,10 @@ move_one_page(struct vm_area_struct *vma, unsigned long old_addr, if (dst) { pte_t pte; pte = ptep_clear_flush(vma, old_addr, src); + /* ZERO_PAGE can be dependant on virtual addr */ - if (pfn_valid(pte_pfn(pte)) && - pte_page(pte) == ZERO_PAGE(old_addr)) - pte = pte_wrprotect(mk_pte(ZERO_PAGE(new_addr), new_vma->vm_page_prot)); + pte = move_pte(pte, new_vma->vm_page_prot, + old_addr, new_addr); set_pte_at(mm, new_addr, dst, pte); } else error = -ENOMEM; diff --git a/mm/nommu.c b/mm/nommu.c index 064d70442895..0ef241ae3763 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -157,8 +157,7 @@ void vfree(void *addr) kfree(addr); } -void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, - pgprot_t prot) +void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) { /* * kmalloc doesn't like __GFP_HIGHMEM for some reason diff --git a/mm/oom_kill.c b/mm/oom_kill.c index ac3bf33e5370..d348b9035955 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -263,7 +263,7 @@ static struct mm_struct *oom_kill_process(struct task_struct *p) * OR try to be smart about which process to kill. Note that we * don't have to be perfect here, we just have to be good. */ -void out_of_memory(unsigned int __nocast gfp_mask, int order) +void out_of_memory(gfp_t gfp_mask, int order) { struct mm_struct *mm = NULL; task_t * p; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ae2903339e71..94c864eac9c4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -671,7 +671,7 @@ void fastcall free_cold_page(struct page *page) free_hot_cold_page(page, 1); } -static inline void prep_zero_page(struct page *page, int order, unsigned int __nocast gfp_flags) +static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) { int i; @@ -686,7 +686,7 @@ static inline void prep_zero_page(struct page *page, int order, unsigned int __n * or two. */ static struct page * -buffered_rmqueue(struct zone *zone, int order, unsigned int __nocast gfp_flags) +buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags) { unsigned long flags; struct page *page = NULL; @@ -734,7 +734,7 @@ buffered_rmqueue(struct zone *zone, int order, unsigned int __nocast gfp_flags) * of the allocation. */ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, - int classzone_idx, int can_try_harder, int gfp_high) + int classzone_idx, int can_try_harder, gfp_t gfp_high) { /* free_pages my go negative - that's OK */ long min = mark, free_pages = z->free_pages - (1 << order) + 1; @@ -761,7 +761,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, } static inline int -should_reclaim_zone(struct zone *z, unsigned int gfp_mask) +should_reclaim_zone(struct zone *z, gfp_t gfp_mask) { if (!z->reclaim_pages) return 0; @@ -774,10 +774,10 @@ should_reclaim_zone(struct zone *z, unsigned int gfp_mask) * This is the 'heart' of the zoned buddy allocator. */ struct page * fastcall -__alloc_pages(unsigned int __nocast gfp_mask, unsigned int order, +__alloc_pages(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist) { - const int wait = gfp_mask & __GFP_WAIT; + const gfp_t wait = gfp_mask & __GFP_WAIT; struct zone **zones, *z; struct page *page; struct reclaim_state reclaim_state; @@ -977,7 +977,7 @@ EXPORT_SYMBOL(__alloc_pages); /* * Common helper functions. */ -fastcall unsigned long __get_free_pages(unsigned int __nocast gfp_mask, unsigned int order) +fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) { struct page * page; page = alloc_pages(gfp_mask, order); @@ -988,7 +988,7 @@ fastcall unsigned long __get_free_pages(unsigned int __nocast gfp_mask, unsigned EXPORT_SYMBOL(__get_free_pages); -fastcall unsigned long get_zeroed_page(unsigned int __nocast gfp_mask) +fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) { struct page * page; @@ -996,7 +996,7 @@ fastcall unsigned long get_zeroed_page(unsigned int __nocast gfp_mask) * get_zeroed_page() returns a 32-bit address, which cannot represent * a highmem page */ - BUG_ON(gfp_mask & __GFP_HIGHMEM); + BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); page = alloc_pages(gfp_mask | __GFP_ZERO, 0); if (page) @@ -1089,7 +1089,7 @@ static unsigned int nr_free_zone_pages(int offset) */ unsigned int nr_free_buffer_pages(void) { - return nr_free_zone_pages(GFP_USER & GFP_ZONEMASK); + return nr_free_zone_pages(gfp_zone(GFP_USER)); } /* @@ -1097,7 +1097,7 @@ unsigned int nr_free_buffer_pages(void) */ unsigned int nr_free_pagecache_pages(void) { - return nr_free_zone_pages(GFP_HIGHUSER & GFP_ZONEMASK); + return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER)); } #ifdef CONFIG_HIGHMEM @@ -1428,6 +1428,16 @@ static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zoneli return j; } +static inline int highest_zone(int zone_bits) +{ + int res = ZONE_NORMAL; + if (zone_bits & (__force int)__GFP_HIGHMEM) + res = ZONE_HIGHMEM; + if (zone_bits & (__force int)__GFP_DMA) + res = ZONE_DMA; + return res; +} + #ifdef CONFIG_NUMA #define MAX_NODE_LOAD (num_online_nodes()) static int __initdata node_load[MAX_NUMNODES]; @@ -1524,11 +1534,7 @@ static void __init build_zonelists(pg_data_t *pgdat) zonelist = pgdat->node_zonelists + i; for (j = 0; zonelist->zones[j] != NULL; j++); - k = ZONE_NORMAL; - if (i & __GFP_HIGHMEM) - k = ZONE_HIGHMEM; - if (i & __GFP_DMA) - k = ZONE_DMA; + k = highest_zone(i); j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); zonelist->zones[j] = NULL; @@ -1549,12 +1555,7 @@ static void __init build_zonelists(pg_data_t *pgdat) zonelist = pgdat->node_zonelists + i; j = 0; - k = ZONE_NORMAL; - if (i & __GFP_HIGHMEM) - k = ZONE_HIGHMEM; - if (i & __GFP_DMA) - k = ZONE_DMA; - + k = highest_zone(i); j = build_zonelists_node(pgdat, zonelist, j, k); /* * Now we build the zonelist so that it contains the zones @@ -1750,6 +1751,8 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) { struct per_cpu_pages *pcp; + memset(p, 0, sizeof(*p)); + pcp = &p->pcp[0]; /* hot */ pcp->count = 0; pcp->low = 2 * batch; diff --git a/mm/page_io.c b/mm/page_io.c index 2e605a19ce57..330e00d6db00 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -19,7 +19,7 @@ #include #include -static struct bio *get_swap_bio(unsigned int __nocast gfp_flags, pgoff_t index, +static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index, struct page *page, bio_end_io_t end_io) { struct bio *bio; diff --git a/mm/shmem.c b/mm/shmem.c index 1f7aeb210c7b..55e04a0734c1 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -85,7 +85,7 @@ enum sgp_type { static int shmem_getpage(struct inode *inode, unsigned long idx, struct page **pagep, enum sgp_type sgp, int *type); -static inline struct page *shmem_dir_alloc(unsigned int gfp_mask) +static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) { /* * The above definition of ENTRIES_PER_PAGE, and the use of @@ -898,7 +898,7 @@ struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry, } static struct page * -shmem_alloc_page(unsigned long gfp, struct shmem_inode_info *info, +shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, unsigned long idx) { struct vm_area_struct pvma; @@ -921,8 +921,7 @@ shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx) } static inline struct page * -shmem_alloc_page(unsigned int __nocast gfp,struct shmem_inode_info *info, - unsigned long idx) +shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) { return alloc_page(gfp | __GFP_ZERO); } diff --git a/mm/slab.c b/mm/slab.c index 437d3388054b..d30423f167a2 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -308,12 +308,12 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; #define SIZE_L3 (1 + MAX_NUMNODES) /* - * This function may be completely optimized away if + * This function must be completely optimized away if * a constant is passed to it. Mostly the same as * what is in linux/slab.h except it returns an * index. */ -static inline int index_of(const size_t size) +static __always_inline int index_of(const size_t size) { if (__builtin_constant_p(size)) { int i = 0; @@ -329,7 +329,8 @@ static inline int index_of(const size_t size) extern void __bad_size(void); __bad_size(); } - } + } else + BUG(); return 0; } @@ -385,7 +386,7 @@ struct kmem_cache_s { unsigned int gfporder; /* force GFP flags, e.g. GFP_DMA */ - unsigned int gfpflags; + gfp_t gfpflags; size_t colour; /* cache colouring range */ unsigned int colour_off; /* colour offset */ @@ -639,7 +640,7 @@ static enum { static DEFINE_PER_CPU(struct work_struct, reap_work); -static void free_block(kmem_cache_t* cachep, void** objpp, int len); +static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node); static void enable_cpucache (kmem_cache_t *cachep); static void cache_reap (void *unused); static int __node_shrink(kmem_cache_t *cachep, int node); @@ -649,8 +650,7 @@ static inline struct array_cache *ac_data(kmem_cache_t *cachep) return cachep->array[smp_processor_id()]; } -static inline kmem_cache_t *__find_general_cachep(size_t size, - unsigned int __nocast gfpflags) +static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags) { struct cache_sizes *csizep = malloc_sizes; @@ -674,8 +674,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, return csizep->cs_cachep; } -kmem_cache_t *kmem_find_general_cachep(size_t size, - unsigned int __nocast gfpflags) +kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags) { return __find_general_cachep(size, gfpflags); } @@ -804,7 +803,7 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache if (ac->avail) { spin_lock(&rl3->list_lock); - free_block(cachep, ac->entry, ac->avail); + free_block(cachep, ac->entry, ac->avail, node); ac->avail = 0; spin_unlock(&rl3->list_lock); } @@ -925,7 +924,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, /* Free limit for this kmem_list3 */ l3->free_limit -= cachep->batchcount; if (nc) - free_block(cachep, nc->entry, nc->avail); + free_block(cachep, nc->entry, nc->avail, node); if (!cpus_empty(mask)) { spin_unlock(&l3->list_lock); @@ -934,7 +933,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, if (l3->shared) { free_block(cachep, l3->shared->entry, - l3->shared->avail); + l3->shared->avail, node); kfree(l3->shared); l3->shared = NULL; } @@ -1184,7 +1183,7 @@ __initcall(cpucache_init); * did not request dmaable memory, we might get it, but that * would be relatively rare and ignorable. */ -static void *kmem_getpages(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) +static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid) { struct page *page; void *addr; @@ -1882,12 +1881,13 @@ static void do_drain(void *arg) { kmem_cache_t *cachep = (kmem_cache_t*)arg; struct array_cache *ac; + int node = numa_node_id(); check_irq_off(); ac = ac_data(cachep); - spin_lock(&cachep->nodelists[numa_node_id()]->list_lock); - free_block(cachep, ac->entry, ac->avail); - spin_unlock(&cachep->nodelists[numa_node_id()]->list_lock); + spin_lock(&cachep->nodelists[node]->list_lock); + free_block(cachep, ac->entry, ac->avail, node); + spin_unlock(&cachep->nodelists[node]->list_lock); ac->avail = 0; } @@ -2046,7 +2046,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); /* Get the memory for a slab management obj. */ static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp, - int colour_off, unsigned int __nocast local_flags) + int colour_off, gfp_t local_flags) { struct slab *slabp; @@ -2117,7 +2117,7 @@ static void cache_init_objs(kmem_cache_t *cachep, slabp->free = 0; } -static void kmem_flagcheck(kmem_cache_t *cachep, unsigned int flags) +static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags) { if (flags & SLAB_DMA) { if (!(cachep->gfpflags & GFP_DMA)) @@ -2147,12 +2147,12 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) * Grow (by 1) the number of slabs within a cache. This is called by * kmem_cache_alloc() when there are no active objs left in a cache. */ -static int cache_grow(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) +static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) { struct slab *slabp; void *objp; size_t offset; - unsigned int local_flags; + gfp_t local_flags; unsigned long ctor_flags; struct kmem_list3 *l3; @@ -2354,7 +2354,7 @@ bad: #define check_slabp(x,y) do { } while(0) #endif -static void *cache_alloc_refill(kmem_cache_t *cachep, unsigned int __nocast flags) +static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) { int batchcount; struct kmem_list3 *l3; @@ -2454,7 +2454,7 @@ alloc_done: } static inline void -cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags) +cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags) { might_sleep_if(flags & __GFP_WAIT); #if DEBUG @@ -2465,7 +2465,7 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast flags) #if DEBUG static void * cache_alloc_debugcheck_after(kmem_cache_t *cachep, - unsigned int __nocast flags, void *objp, void *caller) + gfp_t flags, void *objp, void *caller) { if (!objp) return objp; @@ -2508,16 +2508,12 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep, #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) #endif - -static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) +static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) { - unsigned long save_flags; void* objp; struct array_cache *ac; - cache_alloc_debugcheck_before(cachep, flags); - - local_irq_save(save_flags); + check_irq_off(); ac = ac_data(cachep); if (likely(ac->avail)) { STATS_INC_ALLOCHIT(cachep); @@ -2527,6 +2523,18 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast fl STATS_INC_ALLOCMISS(cachep); objp = cache_alloc_refill(cachep, flags); } + return objp; +} + +static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) +{ + unsigned long save_flags; + void* objp; + + cache_alloc_debugcheck_before(cachep, flags); + + local_irq_save(save_flags); + objp = ____cache_alloc(cachep, flags); local_irq_restore(save_flags); objp = cache_alloc_debugcheck_after(cachep, flags, objp, __builtin_return_address(0)); @@ -2538,7 +2546,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast fl /* * A interface to enable slab creation on nodeid */ -static void *__cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid) +static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) { struct list_head *entry; struct slab *slabp; @@ -2608,7 +2616,7 @@ done: /* * Caller needs to acquire correct kmem_list's list_lock */ -static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects) +static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node) { int i; struct kmem_list3 *l3; @@ -2617,14 +2625,12 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects) void *objp = objpp[i]; struct slab *slabp; unsigned int objnr; - int nodeid = 0; slabp = GET_PAGE_SLAB(virt_to_page(objp)); - nodeid = slabp->nodeid; - l3 = cachep->nodelists[nodeid]; + l3 = cachep->nodelists[node]; list_del(&slabp->list); objnr = (objp - slabp->s_mem) / cachep->objsize; - check_spinlock_acquired_node(cachep, nodeid); + check_spinlock_acquired_node(cachep, node); check_slabp(cachep, slabp); @@ -2664,13 +2670,14 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) { int batchcount; struct kmem_list3 *l3; + int node = numa_node_id(); batchcount = ac->batchcount; #if DEBUG BUG_ON(!batchcount || batchcount > ac->avail); #endif check_irq_off(); - l3 = cachep->nodelists[numa_node_id()]; + l3 = cachep->nodelists[node]; spin_lock(&l3->list_lock); if (l3->shared) { struct array_cache *shared_array = l3->shared; @@ -2686,7 +2693,7 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) } } - free_block(cachep, ac->entry, batchcount); + free_block(cachep, ac->entry, batchcount, node); free_done: #if STATS { @@ -2751,7 +2758,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) } else { spin_lock(&(cachep->nodelists[nodeid])-> list_lock); - free_block(cachep, &objp, 1); + free_block(cachep, &objp, 1, nodeid); spin_unlock(&(cachep->nodelists[nodeid])-> list_lock); } @@ -2778,7 +2785,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) * Allocate an object from this cache. The flags are only relevant * if the cache has no available objects. */ -void *kmem_cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags) +void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags) { return __cache_alloc(cachep, flags); } @@ -2839,12 +2846,12 @@ out: * New and improved: it will now make sure that the object gets * put on the correct node list so that there is no false sharing. */ -void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, int nodeid) +void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) { unsigned long save_flags; void *ptr; - if (nodeid == numa_node_id() || nodeid == -1) + if (nodeid == -1) return __cache_alloc(cachep, flags); if (unlikely(!cachep->nodelists[nodeid])) { @@ -2855,7 +2862,10 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i cache_alloc_debugcheck_before(cachep, flags); local_irq_save(save_flags); - ptr = __cache_alloc_node(cachep, flags, nodeid); + if (nodeid == numa_node_id()) + ptr = ____cache_alloc(cachep, flags); + else + ptr = __cache_alloc_node(cachep, flags, nodeid); local_irq_restore(save_flags); ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0)); @@ -2863,7 +2873,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i } EXPORT_SYMBOL(kmem_cache_alloc_node); -void *kmalloc_node(size_t size, unsigned int __nocast flags, int node) +void *kmalloc_node(size_t size, gfp_t flags, int node) { kmem_cache_t *cachep; @@ -2896,7 +2906,7 @@ EXPORT_SYMBOL(kmalloc_node); * platforms. For example, on i386, it means that the memory must come * from the first 16MB. */ -void *__kmalloc(size_t size, unsigned int __nocast flags) +void *__kmalloc(size_t size, gfp_t flags) { kmem_cache_t *cachep; @@ -2985,7 +2995,7 @@ EXPORT_SYMBOL(kmem_cache_free); * @size: how many bytes of memory are required. * @flags: the type of memory to allocate. */ -void *kzalloc(size_t size, unsigned int __nocast flags) +void *kzalloc(size_t size, gfp_t flags) { void *ret = kmalloc(size, flags); if (ret) @@ -3079,7 +3089,7 @@ static int alloc_kmemlist(kmem_cache_t *cachep) if ((nc = cachep->nodelists[node]->shared)) free_block(cachep, nc->entry, - nc->avail); + nc->avail, node); l3->shared = new; if (!cachep->nodelists[node]->alien) { @@ -3160,7 +3170,7 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount, if (!ccold) continue; spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); - free_block(cachep, ccold->entry, ccold->avail); + free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); kfree(ccold); } @@ -3240,7 +3250,7 @@ static void drain_array_locked(kmem_cache_t *cachep, if (tofree > ac->avail) { tofree = (ac->avail+1)/2; } - free_block(cachep, ac->entry, tofree); + free_block(cachep, ac->entry, tofree, node); ac->avail -= tofree; memmove(ac->entry, &(ac->entry[tofree]), sizeof(void*)*ac->avail); @@ -3591,7 +3601,7 @@ unsigned int ksize(const void *objp) * @s: the string to duplicate * @gfp: the GFP mask used in the kmalloc() call when allocating memory */ -char *kstrdup(const char *s, unsigned int __nocast gfp) +char *kstrdup(const char *s, gfp_t gfp) { size_t len; char *buf; diff --git a/mm/swap_state.c b/mm/swap_state.c index adbc2b426c2f..132164f7d0a7 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -68,7 +68,7 @@ void show_swap_cache_info(void) * but sets SwapCache flag and private instead of mapping and index. */ static int __add_to_swap_cache(struct page *page, swp_entry_t entry, - unsigned int __nocast gfp_mask) + gfp_t gfp_mask) { int error; diff --git a/mm/swapfile.c b/mm/swapfile.c index 0184f510aace..1dcaeda039f4 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1381,6 +1381,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) error = bd_claim(bdev, sys_swapon); if (error < 0) { bdev = NULL; + error = -EINVAL; goto bad_swap; } p->old_block_size = block_size(bdev); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 13c3d82968ae..1150229b6366 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -395,7 +395,7 @@ void *vmap(struct page **pages, unsigned int count, EXPORT_SYMBOL(vmap); -void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgprot_t prot) +void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) { struct page **pages; unsigned int nr_pages, array_size, i; @@ -446,7 +446,7 @@ fail: * allocator with @gfp_mask flags. Map them into contiguous * kernel virtual space, using a pagetable protection of @prot. */ -void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t prot) +void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) { struct vm_struct *area; diff --git a/mm/vmscan.c b/mm/vmscan.c index 0ea71e887bb6..843c87d1e61f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -70,7 +70,7 @@ struct scan_control { unsigned int priority; /* This context's GFP mask */ - unsigned int gfp_mask; + gfp_t gfp_mask; int may_writepage; @@ -186,7 +186,7 @@ EXPORT_SYMBOL(remove_shrinker); * * Returns the number of slab objects which we shrunk. */ -static int shrink_slab(unsigned long scanned, unsigned int gfp_mask, +static int shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages) { struct shrinker *shrinker; @@ -511,10 +511,11 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) * PageDirty _after_ making sure that the page is freeable and * not in use by anybody. (pagecache + us == 2) */ - if (page_count(page) != 2 || PageDirty(page)) { - write_unlock_irq(&mapping->tree_lock); - goto keep_locked; - } + if (unlikely(page_count(page) != 2)) + goto cannot_free; + smp_rmb(); + if (unlikely(PageDirty(page))) + goto cannot_free; #ifdef CONFIG_SWAP if (PageSwapCache(page)) { @@ -538,6 +539,10 @@ free_it: __pagevec_release_nonlru(&freed_pvec); continue; +cannot_free: + write_unlock_irq(&mapping->tree_lock); + goto keep_locked; + activate_locked: SetPageActive(page); pgactivate++; @@ -921,7 +926,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc) * holds filesystem locks which prevent writeout this might not work, and the * allocation attempt will fail. */ -int try_to_free_pages(struct zone **zones, unsigned int gfp_mask) +int try_to_free_pages(struct zone **zones, gfp_t gfp_mask) { int priority; int ret = 0; @@ -1333,7 +1338,7 @@ module_init(kswapd_init) /* * Try to free up some pages from this zone through reclaim. */ -int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order) +int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) { struct scan_control sc; int nr_pages = 1 << order; diff --git a/net/802/p8022.c b/net/802/p8022.c index b24817c63ca8..2530f35241cd 100644 --- a/net/802/p8022.c +++ b/net/802/p8022.c @@ -56,7 +56,7 @@ struct datalink_proto *register_8022_client(unsigned char type, void unregister_8022_client(struct datalink_proto *proto) { - llc_sap_close(proto->sap); + llc_sap_put(proto->sap); kfree(proto); } diff --git a/net/802/psnap.c b/net/802/psnap.c index ab80b1fab53c..4d638944d933 100644 --- a/net/802/psnap.c +++ b/net/802/psnap.c @@ -106,7 +106,7 @@ module_init(snap_init); static void __exit snap_exit(void) { - llc_sap_close(snap_sap); + llc_sap_put(snap_sap); } module_exit(snap_exit); diff --git a/net/802/tr.c b/net/802/tr.c index 1bb7dc1b85cd..afd8385c0c9c 100644 --- a/net/802/tr.c +++ b/net/802/tr.c @@ -238,7 +238,7 @@ unsigned short tr_type_trans(struct sk_buff *skb, struct net_device *dev) return trllc->ethertype; } - return ntohs(ETH_P_802_2); + return ntohs(ETH_P_TR_802_2); } /* @@ -340,9 +340,10 @@ static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev) unsigned int hash, rii_p = 0; unsigned long flags; struct rif_cache *entry; - + unsigned char saddr0; spin_lock_irqsave(&rif_lock, flags); + saddr0 = trh->saddr[0]; /* * Firstly see if the entry exists @@ -395,7 +396,6 @@ printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n", entry->rcf = trh->rcf & htons((unsigned short)~TR_RCF_BROADCAST_MASK); memcpy(&(entry->rseg[0]),&(trh->rseg[0]),8*sizeof(unsigned short)); entry->local_ring = 0; - trh->saddr[0]|=TR_RII; /* put the routing indicator back for tcpdump */ } else { @@ -422,6 +422,7 @@ printk("updating rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n", } entry->last_used=jiffies; } + trh->saddr[0]=saddr0; /* put the routing indicator back for tcpdump */ spin_unlock_irqrestore(&rif_lock, flags); } diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 1d31b3a3f1e5..7982656b9c83 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -100,8 +100,7 @@ static struct sock *atalk_search_socket(struct sockaddr_at *to, continue; if (to->sat_addr.s_net == ATADDR_ANYNET && - to->sat_addr.s_node == ATADDR_BCAST && - at->src_net == atif->address.s_net) + to->sat_addr.s_node == ATADDR_BCAST) goto found; if (to->sat_addr.s_net == at->src_net && @@ -1443,8 +1442,10 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev, else atif = atalk_find_interface(ddp->deh_dnet, ddp->deh_dnode); - /* Not ours, so we route the packet via the correct AppleTalk iface */ if (!atif) { + /* Not ours, so we route the packet via the correct + * AppleTalk iface + */ atalk_route_packet(skb, dev, ddp, &ddphv, origlen); goto out; } @@ -1592,9 +1593,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr if (usat->sat_addr.s_net || usat->sat_addr.s_node == ATADDR_ANYNODE) { rt = atrtr_find(&usat->sat_addr); - if (!rt) - return -ENETUNREACH; - dev = rt->dev; } else { struct atalk_addr at_hint; @@ -1603,11 +1601,12 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr at_hint.s_net = at->src_net; rt = atrtr_find(&at_hint); - if (!rt) - return -ENETUNREACH; - dev = rt->dev; } + if (!rt) + return -ENETUNREACH; + + dev = rt->dev; SOCK_DEBUG(sk, "SK %p: Size needed %d, device %s\n", sk, size, dev->name); @@ -1677,6 +1676,20 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr SOCK_DEBUG(sk, "SK %p: Loop back.\n", sk); /* loop back */ skb_orphan(skb); + if (ddp->deh_dnode == ATADDR_BCAST) { + struct atalk_addr at_lo; + + at_lo.s_node = 0; + at_lo.s_net = 0; + + rt = atrtr_find(&at_lo); + if (!rt) { + kfree_skb(skb); + return -ENETUNREACH; + } + dev = rt->dev; + skb->dev = dev; + } ddp_dl->request(ddp_dl, skb, dev->dev_addr); } else { SOCK_DEBUG(sk, "SK %p: send out.\n", sk); diff --git a/net/atm/addr.c b/net/atm/addr.c index 1c8867f7f54a..3060fd0ba4b9 100644 --- a/net/atm/addr.c +++ b/net/atm/addr.c @@ -44,29 +44,43 @@ static void notify_sigd(struct atm_dev *dev) sigd_enq(NULL, as_itf_notify, NULL, &pvc, NULL); } -void atm_reset_addr(struct atm_dev *dev) +void atm_reset_addr(struct atm_dev *dev, enum atm_addr_type_t atype) { unsigned long flags; struct atm_dev_addr *this, *p; + struct list_head *head; spin_lock_irqsave(&dev->lock, flags); - list_for_each_entry_safe(this, p, &dev->local, entry) - kfree(this); + if (atype == ATM_ADDR_LECS) + head = &dev->lecs; + else + head = &dev->local; + list_for_each_entry_safe(this, p, head, entry) { + list_del(&this->entry); + kfree(this); + } spin_unlock_irqrestore(&dev->lock, flags); - notify_sigd(dev); + if (head == &dev->local) + notify_sigd(dev); } -int atm_add_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr) +int atm_add_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, + enum atm_addr_type_t atype) { unsigned long flags; struct atm_dev_addr *this; + struct list_head *head; int error; error = check_addr(addr); if (error) return error; spin_lock_irqsave(&dev->lock, flags); - list_for_each_entry(this, &dev->local, entry) { + if (atype == ATM_ADDR_LECS) + head = &dev->lecs; + else + head = &dev->local; + list_for_each_entry(this, head, entry) { if (identical(&this->addr, addr)) { spin_unlock_irqrestore(&dev->lock, flags); return -EEXIST; @@ -78,28 +92,36 @@ int atm_add_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr) return -ENOMEM; } this->addr = *addr; - list_add(&this->entry, &dev->local); + list_add(&this->entry, head); spin_unlock_irqrestore(&dev->lock, flags); - notify_sigd(dev); + if (head == &dev->local) + notify_sigd(dev); return 0; } -int atm_del_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr) +int atm_del_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, + enum atm_addr_type_t atype) { unsigned long flags; struct atm_dev_addr *this; + struct list_head *head; int error; error = check_addr(addr); if (error) return error; spin_lock_irqsave(&dev->lock, flags); - list_for_each_entry(this, &dev->local, entry) { + if (atype == ATM_ADDR_LECS) + head = &dev->lecs; + else + head = &dev->local; + list_for_each_entry(this, head, entry) { if (identical(&this->addr, addr)) { list_del(&this->entry); spin_unlock_irqrestore(&dev->lock, flags); kfree(this); - notify_sigd(dev); + if (head == &dev->local) + notify_sigd(dev); return 0; } } @@ -108,22 +130,27 @@ int atm_del_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr) } int atm_get_addr(struct atm_dev *dev, struct sockaddr_atmsvc __user * buf, - size_t size) + size_t size, enum atm_addr_type_t atype) { unsigned long flags; struct atm_dev_addr *this; + struct list_head *head; int total = 0, error; struct sockaddr_atmsvc *tmp_buf, *tmp_bufp; spin_lock_irqsave(&dev->lock, flags); - list_for_each_entry(this, &dev->local, entry) + if (atype == ATM_ADDR_LECS) + head = &dev->lecs; + else + head = &dev->local; + list_for_each_entry(this, head, entry) total += sizeof(struct sockaddr_atmsvc); tmp_buf = tmp_bufp = kmalloc(total, GFP_ATOMIC); if (!tmp_buf) { spin_unlock_irqrestore(&dev->lock, flags); return -ENOMEM; } - list_for_each_entry(this, &dev->local, entry) + list_for_each_entry(this, head, entry) memcpy(tmp_bufp++, &this->addr, sizeof(struct sockaddr_atmsvc)); spin_unlock_irqrestore(&dev->lock, flags); error = total > size ? -E2BIG : total; diff --git a/net/atm/addr.h b/net/atm/addr.h index 3099d21feeaa..f39433ad45da 100644 --- a/net/atm/addr.h +++ b/net/atm/addr.h @@ -9,10 +9,12 @@ #include #include - -void atm_reset_addr(struct atm_dev *dev); -int atm_add_addr(struct atm_dev *dev,struct sockaddr_atmsvc *addr); -int atm_del_addr(struct atm_dev *dev,struct sockaddr_atmsvc *addr); -int atm_get_addr(struct atm_dev *dev,struct sockaddr_atmsvc __user *buf,size_t size); +void atm_reset_addr(struct atm_dev *dev, enum atm_addr_type_t type); +int atm_add_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, + enum atm_addr_type_t type); +int atm_del_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, + enum atm_addr_type_t type); +int atm_get_addr(struct atm_dev *dev, struct sockaddr_atmsvc __user *buf, + size_t size, enum atm_addr_type_t type); #endif diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c index b2113c3454ae..223c7ad5bd0f 100644 --- a/net/atm/atm_misc.c +++ b/net/atm/atm_misc.c @@ -25,7 +25,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize) struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, - int gfp_flags) + gfp_t gfp_flags) { struct sock *sk = sk_atm(vcc); int guess = atm_guess_pdu2truesize(pdu_size); diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 289956c4dd3e..72f3f7b8de80 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -220,7 +220,7 @@ static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev) /* netif_stop_queue(dev); */ dev_kfree_skb(skb); read_unlock(&devs_lock); - return -EUNATCH; + return 0; } if (!br2684_xmit_vcc(skb, brdev, brvcc)) { /* diff --git a/net/atm/clip.c b/net/atm/clip.c index 28dab55a4387..4f54c9a5e84a 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -310,7 +310,7 @@ static int clip_constructor(struct neighbour *neigh) if (neigh->type != RTN_UNICAST) return -EINVAL; rcu_read_lock(); - in_dev = rcu_dereference(__in_dev_get(dev)); + in_dev = __in_dev_get_rcu(dev); if (!in_dev) { rcu_read_unlock(); return -EINVAL; diff --git a/net/atm/common.c b/net/atm/common.c index e93e838069e8..63feea49fb13 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -46,7 +46,7 @@ static void __vcc_insert_socket(struct sock *sk) struct atm_vcc *vcc = atm_sk(sk); struct hlist_head *head = &vcc_hash[vcc->vci & (VCC_HTABLE_SIZE - 1)]; - sk->sk_hashent = vcc->vci & (VCC_HTABLE_SIZE - 1); + sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1); sk_add_node(sk, head); } @@ -178,8 +178,6 @@ static void vcc_destroy_socket(struct sock *sk) if (vcc->push) vcc->push(vcc, NULL); /* atmarpd has no push */ - vcc_remove_socket(sk); /* no more receive */ - while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { atm_return(vcc,skb->truesize); kfree_skb(skb); @@ -188,6 +186,8 @@ static void vcc_destroy_socket(struct sock *sk) module_put(vcc->dev->ops->owner); atm_dev_put(vcc->dev); } + + vcc_remove_socket(sk); } diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c index d89056ec44d4..a150198b05a3 100644 --- a/net/atm/ioctl.c +++ b/net/atm/ioctl.c @@ -105,17 +105,35 @@ int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) if (!error) sock->state = SS_CONNECTED; goto done; - default: + case ATM_SETBACKEND: + case ATM_NEWBACKENDIF: + { + atm_backend_t backend; + error = get_user(backend, (atm_backend_t __user *) argp); + if (error) + goto done; + switch (backend) { + case ATM_BACKEND_PPP: + request_module("pppoatm"); + break; + case ATM_BACKEND_BR2684: + request_module("br2684"); + break; + } + } + break; + case ATMMPC_CTRL: + case ATMMPC_DATA: + request_module("mpoa"); + break; + case ATMARPD_CTRL: + request_module("clip"); + break; + case ATMLEC_CTRL: + request_module("lec"); break; } - if (cmd == ATMMPC_CTRL || cmd == ATMMPC_DATA) - request_module("mpoa"); - if (cmd == ATMARPD_CTRL) - request_module("clip"); - if (cmd == ATMLEC_CTRL) - request_module("lec"); - error = -ENOIOCTLCMD; down(&ioctl_mutex); diff --git a/net/atm/lec.c b/net/atm/lec.c index a0752487026d..ad840b9afba8 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -686,9 +686,19 @@ static unsigned char lec_ctrl_magic[] = { 0x01, 0x01 }; +#define LEC_DATA_DIRECT_8023 2 +#define LEC_DATA_DIRECT_8025 3 + +static int lec_is_data_direct(struct atm_vcc *vcc) +{ + return ((vcc->sap.blli[0].l3.tr9577.snap[4] == LEC_DATA_DIRECT_8023) || + (vcc->sap.blli[0].l3.tr9577.snap[4] == LEC_DATA_DIRECT_8025)); +} + static void lec_push(struct atm_vcc *vcc, struct sk_buff *skb) { + unsigned long flags; struct net_device *dev = (struct net_device *)vcc->proto_data; struct lec_priv *priv = (struct lec_priv *)dev->priv; @@ -728,7 +738,8 @@ lec_push(struct atm_vcc *vcc, struct sk_buff *skb) skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, skb->len); } else { /* Data frame, queue to protocol handlers */ - unsigned char *dst; + struct lec_arp_table *entry; + unsigned char *src, *dst; atm_return(vcc,skb->truesize); if (*(uint16_t *)skb->data == htons(priv->lecid) || @@ -741,10 +752,30 @@ lec_push(struct atm_vcc *vcc, struct sk_buff *skb) return; } #ifdef CONFIG_TR - if (priv->is_trdev) dst = ((struct lecdatahdr_8025 *)skb->data)->h_dest; + if (priv->is_trdev) + dst = ((struct lecdatahdr_8025 *) skb->data)->h_dest; else #endif - dst = ((struct lecdatahdr_8023 *)skb->data)->h_dest; + dst = ((struct lecdatahdr_8023 *) skb->data)->h_dest; + + /* If this is a Data Direct VCC, and the VCC does not match + * the LE_ARP cache entry, delete the LE_ARP cache entry. + */ + spin_lock_irqsave(&priv->lec_arp_lock, flags); + if (lec_is_data_direct(vcc)) { +#ifdef CONFIG_TR + if (priv->is_trdev) + src = ((struct lecdatahdr_8025 *) skb->data)->h_source; + else +#endif + src = ((struct lecdatahdr_8023 *) skb->data)->h_source; + entry = lec_arp_find(priv, src); + if (entry && entry->vcc != vcc) { + lec_arp_remove(priv, entry); + kfree(entry); + } + } + spin_unlock_irqrestore(&priv->lec_arp_lock, flags); if (!(dst[0]&0x01) && /* Never filter Multi/Broadcast */ !priv->is_proxy && /* Proxy wants all the packets */ @@ -1990,6 +2021,12 @@ lec_arp_resolve(struct lec_priv *priv, unsigned char *mac_to_find, found = entry->vcc; goto out; } + /* If the LE_ARP cache entry is still pending, reset count to 0 + * so another LE_ARP request can be made for this frame. + */ + if (entry->status == ESI_ARP_PENDING) { + entry->no_tries = 0; + } /* Data direct VC not yet set up, check to see if the unknown frame count is greater than the limit. If the limit has not been reached, allow the caller to send packet to diff --git a/net/atm/resources.c b/net/atm/resources.c index a57a9268bd24..415d2615d475 100644 --- a/net/atm/resources.c +++ b/net/atm/resources.c @@ -40,6 +40,7 @@ static struct atm_dev *__alloc_atm_dev(const char *type) dev->link_rate = ATM_OC3_PCR; spin_lock_init(&dev->lock); INIT_LIST_HEAD(&dev->local); + INIT_LIST_HEAD(&dev->lecs); return dev; } @@ -320,10 +321,12 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg) error = -EPERM; goto done; } - atm_reset_addr(dev); + atm_reset_addr(dev, ATM_ADDR_LOCAL); break; case ATM_ADDADDR: case ATM_DELADDR: + case ATM_ADDLECSADDR: + case ATM_DELLECSADDR: if (!capable(CAP_NET_ADMIN)) { error = -EPERM; goto done; @@ -335,14 +338,21 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg) error = -EFAULT; goto done; } - if (cmd == ATM_ADDADDR) - error = atm_add_addr(dev, &addr); + if (cmd == ATM_ADDADDR || cmd == ATM_ADDLECSADDR) + error = atm_add_addr(dev, &addr, + (cmd == ATM_ADDADDR ? + ATM_ADDR_LOCAL : ATM_ADDR_LECS)); else - error = atm_del_addr(dev, &addr); + error = atm_del_addr(dev, &addr, + (cmd == ATM_DELADDR ? + ATM_ADDR_LOCAL : ATM_ADDR_LECS)); goto done; } case ATM_GETADDR: - error = atm_get_addr(dev, buf, len); + case ATM_GETLECSADDR: + error = atm_get_addr(dev, buf, len, + (cmd == ATM_GETADDR ? + ATM_ADDR_LOCAL : ATM_ADDR_LECS)); if (error < 0) goto done; size = error; diff --git a/net/atm/signaling.c b/net/atm/signaling.c index f7c449ac1800..e7211a7f382c 100644 --- a/net/atm/signaling.c +++ b/net/atm/signaling.c @@ -217,8 +217,9 @@ void sigd_enq(struct atm_vcc *vcc,enum atmsvc_msg_type type, static void purge_vcc(struct atm_vcc *vcc) { if (sk_atm(vcc)->sk_family == PF_ATMSVC && - !test_bit(ATM_VF_META,&vcc->flags)) { - set_bit(ATM_VF_RELEASED,&vcc->flags); + !test_bit(ATM_VF_META, &vcc->flags)) { + set_bit(ATM_VF_RELEASED, &vcc->flags); + clear_bit(ATM_VF_REGIS, &vcc->flags); vcc_release_async(vcc, -EUNATCH); } } @@ -243,8 +244,7 @@ static void sigd_close(struct atm_vcc *vcc) sk_for_each(s, node, head) { struct atm_vcc *vcc = atm_sk(s); - if (vcc->dev) - purge_vcc(vcc); + purge_vcc(vcc); } } read_unlock(&vcc_sklist_lock); diff --git a/net/atm/svc.c b/net/atm/svc.c index 08e46052a3e4..d7b266136bf6 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c @@ -302,6 +302,7 @@ static int svc_listen(struct socket *sock,int backlog) error = -EINVAL; goto out; } + vcc_insert_socket(sk); set_bit(ATM_VF_WAITING, &vcc->flags); prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local); diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c index 810c9c76c2e0..73cfc3411c46 100644 --- a/net/ax25/ax25_in.c +++ b/net/ax25/ax25_in.c @@ -123,7 +123,7 @@ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) } skb_pull(skb, 1); /* Remove PID */ - skb->h.raw = skb->data; + skb->mac.raw = skb->nh.raw; skb->nh.raw = skb->data; skb->dev = ax25->ax25_dev->dev; skb->pkt_type = PACKET_HOST; diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index d3d6bc547212..59b2dd36baa7 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -372,7 +372,7 @@ static struct proto l2cap_proto = { .obj_size = sizeof(struct l2cap_pinfo) }; -static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, unsigned int __nocast prio) +static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, gfp_t prio) { struct sock *sk; diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 173f46e8cdae..35adce6482b6 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -229,7 +229,7 @@ static void rfcomm_dlc_clear_state(struct rfcomm_dlc *d) d->rx_credits = RFCOMM_DEFAULT_CREDITS; } -struct rfcomm_dlc *rfcomm_dlc_alloc(unsigned int __nocast prio) +struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio) { struct rfcomm_dlc *d = kmalloc(sizeof(*d), prio); if (!d) diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index f49e7e938bfb..a2b30f0aedb7 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -284,7 +284,7 @@ static struct proto rfcomm_proto = { .obj_size = sizeof(struct rfcomm_pinfo) }; -static struct sock *rfcomm_sock_alloc(struct socket *sock, int proto, unsigned int __nocast prio) +static struct sock *rfcomm_sock_alloc(struct socket *sock, int proto, gfp_t prio) { struct rfcomm_dlc *d; struct sock *sk; diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 1bca860a6109..158a9c46d863 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -286,7 +286,7 @@ static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *de skb->destructor = rfcomm_wfree; } -static struct sk_buff *rfcomm_wmalloc(struct rfcomm_dev *dev, unsigned long size, unsigned int __nocast priority) +static struct sk_buff *rfcomm_wmalloc(struct rfcomm_dev *dev, unsigned long size, gfp_t priority) { if (atomic_read(&dev->wmem_alloc) < rfcomm_room(dev->dlc)) { struct sk_buff *skb = alloc_skb(size, priority); diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index ce7ab7dfa0b2..997e42df115c 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -418,7 +418,7 @@ static struct proto sco_proto = { .obj_size = sizeof(struct sco_pinfo) }; -static struct sock *sco_sock_alloc(struct socket *sock, int proto, unsigned int __nocast prio) +static struct sock *sco_sock_alloc(struct socket *sock, int proto, gfp_t prio) { struct sock *sk; diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 069253f830c1..2d24fb400e0c 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -31,7 +31,8 @@ static inline int should_deliver(const struct net_bridge_port *p, int br_dev_queue_push_xmit(struct sk_buff *skb) { - if (skb->len > skb->dev->mtu) + /* drop mtu oversized packets except tso */ + if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->tso_size) kfree_skb(skb); else { #ifdef CONFIG_BRIDGE_NETFILTER diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 91bb895375f4..defcf6a8607c 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -79,7 +79,6 @@ static void destroy_nbp(struct net_bridge_port *p) { struct net_device *dev = p->dev; - dev->br_port = NULL; p->br = NULL; p->dev = NULL; dev_put(dev); @@ -100,6 +99,7 @@ static void del_nbp(struct net_bridge_port *p) struct net_bridge *br = p->br; struct net_device *dev = p->dev; + dev->br_port = NULL; dev_set_promiscuity(dev, -1); spin_lock_bh(&br->lock); diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index c4540144f0f4..f8ffbf6e2333 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -26,6 +26,7 @@ #include #include #include +#include #include /* needed for logical [in,out]-dev filtering */ #include "../br_private.h" @@ -823,10 +824,11 @@ static int translate_table(struct ebt_replace *repl, /* this will get free'd in do_replace()/ebt_register_table() if an error occurs */ newinfo->chainstack = (struct ebt_chainstack **) - vmalloc(num_possible_cpus() * sizeof(struct ebt_chainstack)); + vmalloc((highest_possible_processor_id()+1) + * sizeof(struct ebt_chainstack)); if (!newinfo->chainstack) return -ENOMEM; - for (i = 0; i < num_possible_cpus(); i++) { + for_each_cpu(i) { newinfo->chainstack[i] = vmalloc(udc_cnt * sizeof(struct ebt_chainstack)); if (!newinfo->chainstack[i]) { @@ -895,9 +897,12 @@ static void get_counters(struct ebt_counter *oldcounters, /* counters of cpu 0 */ memcpy(counters, oldcounters, - sizeof(struct ebt_counter) * nentries); + sizeof(struct ebt_counter) * nentries); + /* add other counters to those of cpu 0 */ - for (cpu = 1; cpu < num_possible_cpus(); cpu++) { + for_each_cpu(cpu) { + if (cpu == 0) + continue; counter_base = COUNTER_BASE(oldcounters, nentries, cpu); for (i = 0; i < nentries; i++) { counters[i].pcnt += counter_base[i].pcnt; @@ -929,7 +934,8 @@ static int do_replace(void __user *user, unsigned int len) BUGPRINT("Entries_size never zero\n"); return -EINVAL; } - countersize = COUNTER_OFFSET(tmp.nentries) * num_possible_cpus(); + countersize = COUNTER_OFFSET(tmp.nentries) * + (highest_possible_processor_id()+1); newinfo = (struct ebt_table_info *) vmalloc(sizeof(struct ebt_table_info) + countersize); if (!newinfo) @@ -1022,7 +1028,7 @@ static int do_replace(void __user *user, unsigned int len) vfree(table->entries); if (table->chainstack) { - for (i = 0; i < num_possible_cpus(); i++) + for_each_cpu(i) vfree(table->chainstack[i]); vfree(table->chainstack); } @@ -1040,7 +1046,7 @@ free_counterstmp: vfree(counterstmp); /* can be initialized in translate_table() */ if (newinfo->chainstack) { - for (i = 0; i < num_possible_cpus(); i++) + for_each_cpu(i) vfree(newinfo->chainstack[i]); vfree(newinfo->chainstack); } @@ -1132,7 +1138,8 @@ int ebt_register_table(struct ebt_table *table) return -EINVAL; } - countersize = COUNTER_OFFSET(table->table->nentries) * num_possible_cpus(); + countersize = COUNTER_OFFSET(table->table->nentries) * + (highest_possible_processor_id()+1); newinfo = (struct ebt_table_info *) vmalloc(sizeof(struct ebt_table_info) + countersize); ret = -ENOMEM; @@ -1186,7 +1193,7 @@ free_unlock: up(&ebt_mutex); free_chainstack: if (newinfo->chainstack) { - for (i = 0; i < num_possible_cpus(); i++) + for_each_cpu(i) vfree(newinfo->chainstack[i]); vfree(newinfo->chainstack); } @@ -1209,7 +1216,7 @@ void ebt_unregister_table(struct ebt_table *table) up(&ebt_mutex); vfree(table->private->entries); if (table->private->chainstack) { - for (i = 0; i < num_possible_cpus(); i++) + for_each_cpu(i) vfree(table->private->chainstack[i]); vfree(table->private->chainstack); } diff --git a/net/core/datagram.c b/net/core/datagram.c index da9bf71421a7..81987df536eb 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -211,74 +211,45 @@ void skb_free_datagram(struct sock *sk, struct sk_buff *skb) int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, struct iovec *to, int len) { - int start = skb_headlen(skb); - int i, copy = start - offset; + int i, err, fraglen, end = 0; + struct sk_buff *next = skb_shinfo(skb)->frag_list; +next_skb: + fraglen = skb_headlen(skb); + i = -1; - /* Copy header. */ - if (copy > 0) { - if (copy > len) - copy = len; - if (memcpy_toiovec(to, skb->data + offset, copy)) - goto fault; - if ((len -= copy) == 0) - return 0; - offset += copy; - } + while (1) { + int start = end; - /* Copy paged appendix. Hmm... why does this look so complicated? */ - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - int end; - - BUG_TRAP(start <= offset + len); - - end = start + skb_shinfo(skb)->frags[i].size; - if ((copy = end - offset) > 0) { - int err; - u8 *vaddr; - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - struct page *page = frag->page; + if ((end += fraglen) > offset) { + int copy = end - offset, o = offset - start; if (copy > len) copy = len; - vaddr = kmap(page); - err = memcpy_toiovec(to, vaddr + frag->page_offset + - offset - start, copy); - kunmap(page); + if (i == -1) + err = memcpy_toiovec(to, skb->data + o, copy); + else { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + struct page *page = frag->page; + void *p = kmap(page) + frag->page_offset + o; + err = memcpy_toiovec(to, p, copy); + kunmap(page); + } if (err) goto fault; if (!(len -= copy)) return 0; offset += copy; } - start = end; + if (++i >= skb_shinfo(skb)->nr_frags) + break; + fraglen = skb_shinfo(skb)->frags[i].size; } - - if (skb_shinfo(skb)->frag_list) { - struct sk_buff *list = skb_shinfo(skb)->frag_list; - - for (; list; list = list->next) { - int end; - - BUG_TRAP(start <= offset + len); - - end = start + list->len; - if ((copy = end - offset) > 0) { - if (copy > len) - copy = len; - if (skb_copy_datagram_iovec(list, - offset - start, - to, copy)) - goto fault; - if ((len -= copy) == 0) - return 0; - offset += copy; - } - start = end; - } + if (next) { + skb = next; + BUG_ON(skb_shinfo(skb)->frag_list); + next = skb->next; + goto next_skb; } - if (!len) - return 0; - fault: return -EFAULT; } diff --git a/net/core/dev.c b/net/core/dev.c index c01511e3d0c1..a44eeef24edf 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -574,6 +574,8 @@ struct net_device *dev_getbyhwaddr(unsigned short type, char *ha) return dev; } +EXPORT_SYMBOL(dev_getbyhwaddr); + struct net_device *dev_getfirstbyhwtype(unsigned short type) { struct net_device *dev; @@ -1130,7 +1132,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) #endif /* Keep head the same: replace data */ -int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp_mask) +int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask) { unsigned int size; u8 *data; @@ -1257,6 +1259,8 @@ int dev_queue_xmit(struct sk_buff *skb) if (skb_checksum_help(skb, 0)) goto out_kfree_skb; + spin_lock_prefetch(&dev->queue_lock); + /* Disable soft irqs for various locks below. Also * stops preemption for RCU. */ diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 39fc55edf691..e68700f950a5 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -61,7 +61,9 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev); void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); static struct neigh_table *neigh_tables; +#ifdef CONFIG_PROC_FS static struct file_operations neigh_stat_seq_fops; +#endif /* Neighbour hash table buckets are protected with rwlock tbl->lock. @@ -173,39 +175,10 @@ static void pneigh_queue_purge(struct sk_buff_head *list) } } -void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev) +static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev) { int i; - write_lock_bh(&tbl->lock); - - for (i=0; i <= tbl->hash_mask; i++) { - struct neighbour *n, **np; - - np = &tbl->hash_buckets[i]; - while ((n = *np) != NULL) { - if (dev && n->dev != dev) { - np = &n->next; - continue; - } - *np = n->next; - write_lock_bh(&n->lock); - n->dead = 1; - neigh_del_timer(n); - write_unlock_bh(&n->lock); - neigh_release(n); - } - } - - write_unlock_bh(&tbl->lock); -} - -int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) -{ - int i; - - write_lock_bh(&tbl->lock); - for (i = 0; i <= tbl->hash_mask; i++) { struct neighbour *n, **np = &tbl->hash_buckets[i]; @@ -241,7 +214,19 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) neigh_release(n); } } +} +void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev) +{ + write_lock_bh(&tbl->lock); + neigh_flush_dev(tbl, dev); + write_unlock_bh(&tbl->lock); +} + +int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) +{ + write_lock_bh(&tbl->lock); + neigh_flush_dev(tbl, dev); pneigh_ifdown(tbl, dev); write_unlock_bh(&tbl->lock); @@ -725,6 +710,14 @@ static __inline__ int neigh_max_probes(struct neighbour *n) p->ucast_probes + p->app_probes + p->mcast_probes); } +static inline void neigh_add_timer(struct neighbour *n, unsigned long when) +{ + if (unlikely(mod_timer(&n->timer, when))) { + printk("NEIGH: BUG, double timer add, state is %x\n", + n->nud_state); + dump_stack(); + } +} /* Called when a timer expires for a neighbour entry. */ @@ -806,11 +799,10 @@ static void neigh_timer_handler(unsigned long arg) } if (neigh->nud_state & NUD_IN_TIMER) { - neigh_hold(neigh); if (time_before(next, jiffies + HZ/2)) next = jiffies + HZ/2; - neigh->timer.expires = next; - add_timer(&neigh->timer); + if (!mod_timer(&neigh->timer, next)) + neigh_hold(neigh); } if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { struct sk_buff *skb = skb_peek(&neigh->arp_queue); @@ -852,8 +844,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) atomic_set(&neigh->probes, neigh->parms->ucast_probes); neigh->nud_state = NUD_INCOMPLETE; neigh_hold(neigh); - neigh->timer.expires = now + 1; - add_timer(&neigh->timer); + neigh_add_timer(neigh, now + 1); } else { neigh->nud_state = NUD_FAILED; write_unlock_bh(&neigh->lock); @@ -866,8 +857,8 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) NEIGH_PRINTK2("neigh %p is delayed.\n", neigh); neigh_hold(neigh); neigh->nud_state = NUD_DELAY; - neigh->timer.expires = jiffies + neigh->parms->delay_probe_time; - add_timer(&neigh->timer); + neigh_add_timer(neigh, + jiffies + neigh->parms->delay_probe_time); } if (neigh->nud_state == NUD_INCOMPLETE) { @@ -1013,10 +1004,10 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, neigh_del_timer(neigh); if (new & NUD_IN_TIMER) { neigh_hold(neigh); - neigh->timer.expires = jiffies + + neigh_add_timer(neigh, (jiffies + ((new & NUD_REACHABLE) ? - neigh->parms->reachable_time : 0); - add_timer(&neigh->timer); + neigh->parms->reachable_time : + 0))); } neigh->nud_state = new; } @@ -1634,12 +1625,9 @@ static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb, memset(&ndst, 0, sizeof(ndst)); - for (cpu = 0; cpu < NR_CPUS; cpu++) { + for_each_cpu(cpu) { struct neigh_statistics *st; - if (!cpu_possible(cpu)) - continue; - st = per_cpu_ptr(tbl->stats, cpu); ndst.ndts_allocs += st->allocs; ndst.ndts_destroys += st->destroys; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 5265dfd69928..802fe11efad0 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -703,7 +703,7 @@ int netpoll_setup(struct netpoll *np) if (!np->local_ip) { rcu_read_lock(); - in_dev = __in_dev_get(ndev); + in_dev = __in_dev_get_rcu(ndev); if (!in_dev || !in_dev->ifa_list) { rcu_read_unlock(); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index ef430b1e8e42..7fc3e9e28c34 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -75,7 +75,7 @@ * By design there should only be *one* "controlling" process. In practice * multiple write accesses gives unpredictable result. Understood by "write" * to /proc gives result code thats should be read be the "writer". - * For pratical use this should be no problem. + * For practical use this should be no problem. * * Note when adding devices to a specific CPU there good idea to also assign * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU. @@ -96,7 +96,7 @@ * New xmit() return, do_div and misc clean up by Stephen Hemminger * 040923 * - * Rany Dunlap fixed u64 printk compiler waring + * Randy Dunlap fixed u64 printk compiler waring * * Remove FCS from BW calculation. Lennert Buytenhek * New time handling. Lennert Buytenhek 041213 @@ -137,6 +137,7 @@ #include #include #include +#include #include #include #include @@ -151,7 +152,7 @@ #include -#define VERSION "pktgen v2.62: Packet Generator for packet performance testing.\n" +#define VERSION "pktgen v2.63: Packet Generator for packet performance testing.\n" /* #define PG_DEBUG(a) a */ #define PG_DEBUG(a) @@ -177,8 +178,8 @@ #define T_REMDEV (1<<3) /* Remove all devs */ /* Locks */ -#define thread_lock() spin_lock(&_thread_lock) -#define thread_unlock() spin_unlock(&_thread_lock) +#define thread_lock() down(&pktgen_sem) +#define thread_unlock() up(&pktgen_sem) /* If lock -- can be removed after some work */ #define if_lock(t) spin_lock(&(t->if_lock)); @@ -187,6 +188,8 @@ /* Used to help with determining the pkts on receive */ #define PKTGEN_MAGIC 0xbe9be955 #define PG_PROC_DIR "pktgen" +#define PGCTRL "pgctrl" +static struct proc_dir_entry *pg_proc_dir = NULL; #define MAX_CFLOWS 65536 @@ -202,11 +205,8 @@ struct pktgen_dev { * Try to keep frequent/infrequent used vars. separated. */ - char ifname[32]; - struct proc_dir_entry *proc_ent; + char ifname[IFNAMSIZ]; char result[512]; - /* proc file names */ - char fname[80]; struct pktgen_thread* pg_thread; /* the owner */ struct pktgen_dev *next; /* Used for chaining in the thread's run-queue */ @@ -244,7 +244,7 @@ struct pktgen_dev { __u32 seq_num; int clone_skb; /* Use multiple SKBs during packet gen. If this number - * is greater than 1, then that many coppies of the same + * is greater than 1, then that many copies of the same * packet will be sent before a new packet is allocated. * For instance, if you want to send 1024 identical packets * before creating a new packet, set clone_skb to 1024. @@ -330,8 +330,6 @@ struct pktgen_thread { struct pktgen_dev *if_list; /* All device here */ struct pktgen_thread* next; char name[32]; - char fname[128]; /* name of proc file */ - struct proc_dir_entry *proc_ent; char result[512]; u32 max_before_softirq; /* We'll call do_softirq to prevent starvation. */ @@ -396,7 +394,7 @@ static inline s64 divremdi3(s64 x, s64 y, int type) /* End of hacks to deal with 64-bit math on x86 */ -/** Convert to miliseconds */ +/** Convert to milliseconds */ static inline __u64 tv_to_ms(const struct timeval* tv) { __u64 ms = tv->tv_usec / 1000; @@ -425,7 +423,7 @@ static inline __u64 pg_div64(__u64 n, __u64 base) { __u64 tmp = n; /* - * How do we know if the architectrure we are running on + * How do we know if the architecture we are running on * supports division with 64 bit base? * */ @@ -473,16 +471,6 @@ static inline __u64 tv_diff(const struct timeval* a, const struct timeval* b) static char version[] __initdata = VERSION; -static ssize_t proc_pgctrl_read(struct file* file, char __user * buf, size_t count, loff_t *ppos); -static ssize_t proc_pgctrl_write(struct file* file, const char __user * buf, size_t count, loff_t *ppos); -static int proc_if_read(char *buf , char **start, off_t offset, int len, int *eof, void *data); - -static int proc_thread_read(char *buf , char **start, off_t offset, int len, int *eof, void *data); -static int proc_if_write(struct file *file, const char __user *user_buffer, unsigned long count, void *data); -static int proc_thread_write(struct file *file, const char __user *user_buffer, unsigned long count, void *data); -static int create_proc_dir(void); -static int remove_proc_dir(void); - static int pktgen_remove_device(struct pktgen_thread* t, struct pktgen_dev *i); static int pktgen_add_device(struct pktgen_thread* t, const char* ifname); static struct pktgen_thread* pktgen_find_thread(const char* name); @@ -503,83 +491,41 @@ static int pg_delay_d = 0; static int pg_clone_skb_d = 0; static int debug = 0; -static DEFINE_SPINLOCK(_thread_lock); +static DECLARE_MUTEX(pktgen_sem); static struct pktgen_thread *pktgen_threads = NULL; -static char module_fname[128]; -static struct proc_dir_entry *module_proc_ent = NULL; - static struct notifier_block pktgen_notifier_block = { .notifier_call = pktgen_device_event, }; -static struct file_operations pktgen_fops = { - .read = proc_pgctrl_read, - .write = proc_pgctrl_write, - /* .ioctl = pktgen_ioctl, later maybe */ -}; - /* * /proc handling functions * */ -static struct proc_dir_entry *pg_proc_dir = NULL; -static int proc_pgctrl_read_eof=0; - -static ssize_t proc_pgctrl_read(struct file* file, char __user * buf, - size_t count, loff_t *ppos) +static int pgctrl_show(struct seq_file *seq, void *v) { - char data[200]; - int len = 0; - - if(proc_pgctrl_read_eof) { - proc_pgctrl_read_eof=0; - len = 0; - goto out; - } - - sprintf(data, "%s", VERSION); - - len = strlen(data); - - if(len > count) { - len =-EFAULT; - goto out; - } - - if (copy_to_user(buf, data, len)) { - len =-EFAULT; - goto out; - } - - *ppos += len; - proc_pgctrl_read_eof=1; /* EOF next call */ - - out: - return len; + seq_puts(seq, VERSION); + return 0; } -static ssize_t proc_pgctrl_write(struct file* file,const char __user * buf, - size_t count, loff_t *ppos) +static ssize_t pgctrl_write(struct file* file,const char __user * buf, + size_t count, loff_t *ppos) { - char *data = NULL; int err = 0; + char data[128]; if (!capable(CAP_NET_ADMIN)){ err = -EPERM; goto out; } - data = (void*)vmalloc ((unsigned int)count); + if (count > sizeof(data)) + count = sizeof(data); - if(!data) { - err = -ENOMEM; - goto out; - } if (copy_from_user(data, buf, count)) { - err =-EFAULT; - goto out_free; + err = -EFAULT; + goto out; } data[count-1] = 0; /* Make string */ @@ -594,31 +540,40 @@ static ssize_t proc_pgctrl_write(struct file* file,const char __user * buf, err = count; - out_free: - vfree (data); out: return err; } -static int proc_if_read(char *buf , char **start, off_t offset, - int len, int *eof, void *data) +static int pgctrl_open(struct inode *inode, struct file *file) +{ + return single_open(file, pgctrl_show, PDE(inode)->data); +} + +static struct file_operations pktgen_fops = { + .owner = THIS_MODULE, + .open = pgctrl_open, + .read = seq_read, + .llseek = seq_lseek, + .write = pgctrl_write, + .release = single_release, +}; + +static int pktgen_if_show(struct seq_file *seq, void *v) { - char *p; int i; - struct pktgen_dev *pkt_dev = (struct pktgen_dev*)(data); + struct pktgen_dev *pkt_dev = seq->private; __u64 sa; __u64 stopped; __u64 now = getCurUs(); - p = buf; - p += sprintf(p, "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", - (unsigned long long) pkt_dev->count, - pkt_dev->min_pkt_size, pkt_dev->max_pkt_size); + seq_printf(seq, "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", + (unsigned long long) pkt_dev->count, + pkt_dev->min_pkt_size, pkt_dev->max_pkt_size); - p += sprintf(p, " frags: %d delay: %u clone_skb: %d ifname: %s\n", - pkt_dev->nfrags, 1000*pkt_dev->delay_us+pkt_dev->delay_ns, pkt_dev->clone_skb, pkt_dev->ifname); + seq_printf(seq, " frags: %d delay: %u clone_skb: %d ifname: %s\n", + pkt_dev->nfrags, 1000*pkt_dev->delay_us+pkt_dev->delay_ns, pkt_dev->clone_skb, pkt_dev->ifname); - p += sprintf(p, " flows: %u flowlen: %u\n", pkt_dev->cflows, pkt_dev->lflow); + seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, pkt_dev->lflow); if(pkt_dev->flags & F_IPV6) { @@ -626,19 +581,19 @@ static int proc_if_read(char *buf , char **start, off_t offset, fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr); fmt_ip6(b2, pkt_dev->min_in6_saddr.s6_addr); fmt_ip6(b3, pkt_dev->max_in6_saddr.s6_addr); - p += sprintf(p, " saddr: %s min_saddr: %s max_saddr: %s\n", b1, b2, b3); + seq_printf(seq, " saddr: %s min_saddr: %s max_saddr: %s\n", b1, b2, b3); fmt_ip6(b1, pkt_dev->in6_daddr.s6_addr); fmt_ip6(b2, pkt_dev->min_in6_daddr.s6_addr); fmt_ip6(b3, pkt_dev->max_in6_daddr.s6_addr); - p += sprintf(p, " daddr: %s min_daddr: %s max_daddr: %s\n", b1, b2, b3); + seq_printf(seq, " daddr: %s min_daddr: %s max_daddr: %s\n", b1, b2, b3); } else - p += sprintf(p, " dst_min: %s dst_max: %s\n src_min: %s src_max: %s\n", - pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, pkt_dev->src_max); + seq_printf(seq," dst_min: %s dst_max: %s\n src_min: %s src_max: %s\n", + pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, pkt_dev->src_max); - p += sprintf(p, " src_mac: "); + seq_puts(seq, " src_mac: "); if ((pkt_dev->src_mac[0] == 0) && (pkt_dev->src_mac[1] == 0) && @@ -648,89 +603,89 @@ static int proc_if_read(char *buf , char **start, off_t offset, (pkt_dev->src_mac[5] == 0)) for (i = 0; i < 6; i++) - p += sprintf(p, "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? " " : ":"); + seq_printf(seq, "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? " " : ":"); else for (i = 0; i < 6; i++) - p += sprintf(p, "%02X%s", pkt_dev->src_mac[i], i == 5 ? " " : ":"); + seq_printf(seq, "%02X%s", pkt_dev->src_mac[i], i == 5 ? " " : ":"); - p += sprintf(p, "dst_mac: "); + seq_printf(seq, "dst_mac: "); for (i = 0; i < 6; i++) - p += sprintf(p, "%02X%s", pkt_dev->dst_mac[i], i == 5 ? "\n" : ":"); + seq_printf(seq, "%02X%s", pkt_dev->dst_mac[i], i == 5 ? "\n" : ":"); - p += sprintf(p, " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n", - pkt_dev->udp_src_min, pkt_dev->udp_src_max, pkt_dev->udp_dst_min, - pkt_dev->udp_dst_max); + seq_printf(seq, " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n", + pkt_dev->udp_src_min, pkt_dev->udp_src_max, pkt_dev->udp_dst_min, + pkt_dev->udp_dst_max); - p += sprintf(p, " src_mac_count: %d dst_mac_count: %d \n Flags: ", - pkt_dev->src_mac_count, pkt_dev->dst_mac_count); + seq_printf(seq, " src_mac_count: %d dst_mac_count: %d \n Flags: ", + pkt_dev->src_mac_count, pkt_dev->dst_mac_count); if (pkt_dev->flags & F_IPV6) - p += sprintf(p, "IPV6 "); + seq_printf(seq, "IPV6 "); if (pkt_dev->flags & F_IPSRC_RND) - p += sprintf(p, "IPSRC_RND "); + seq_printf(seq, "IPSRC_RND "); if (pkt_dev->flags & F_IPDST_RND) - p += sprintf(p, "IPDST_RND "); + seq_printf(seq, "IPDST_RND "); if (pkt_dev->flags & F_TXSIZE_RND) - p += sprintf(p, "TXSIZE_RND "); + seq_printf(seq, "TXSIZE_RND "); if (pkt_dev->flags & F_UDPSRC_RND) - p += sprintf(p, "UDPSRC_RND "); + seq_printf(seq, "UDPSRC_RND "); if (pkt_dev->flags & F_UDPDST_RND) - p += sprintf(p, "UDPDST_RND "); + seq_printf(seq, "UDPDST_RND "); if (pkt_dev->flags & F_MACSRC_RND) - p += sprintf(p, "MACSRC_RND "); + seq_printf(seq, "MACSRC_RND "); if (pkt_dev->flags & F_MACDST_RND) - p += sprintf(p, "MACDST_RND "); + seq_printf(seq, "MACDST_RND "); - p += sprintf(p, "\n"); + seq_puts(seq, "\n"); sa = pkt_dev->started_at; stopped = pkt_dev->stopped_at; if (pkt_dev->running) stopped = now; /* not really stopped, more like last-running-at */ - p += sprintf(p, "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n", - (unsigned long long) pkt_dev->sofar, - (unsigned long long) pkt_dev->errors, - (unsigned long long) sa, - (unsigned long long) stopped, - (unsigned long long) pkt_dev->idle_acc); + seq_printf(seq, "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n", + (unsigned long long) pkt_dev->sofar, + (unsigned long long) pkt_dev->errors, + (unsigned long long) sa, + (unsigned long long) stopped, + (unsigned long long) pkt_dev->idle_acc); - p += sprintf(p, " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", - pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset, pkt_dev->cur_src_mac_offset); + seq_printf(seq, " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", + pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset, + pkt_dev->cur_src_mac_offset); if(pkt_dev->flags & F_IPV6) { char b1[128], b2[128]; fmt_ip6(b1, pkt_dev->cur_in6_daddr.s6_addr); fmt_ip6(b2, pkt_dev->cur_in6_saddr.s6_addr); - p += sprintf(p, " cur_saddr: %s cur_daddr: %s\n", b2, b1); + seq_printf(seq, " cur_saddr: %s cur_daddr: %s\n", b2, b1); } else - p += sprintf(p, " cur_saddr: 0x%x cur_daddr: 0x%x\n", - pkt_dev->cur_saddr, pkt_dev->cur_daddr); + seq_printf(seq, " cur_saddr: 0x%x cur_daddr: 0x%x\n", + pkt_dev->cur_saddr, pkt_dev->cur_daddr); - p += sprintf(p, " cur_udp_dst: %d cur_udp_src: %d\n", - pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src); + seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n", + pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src); - p += sprintf(p, " flows: %u\n", pkt_dev->nflows); + seq_printf(seq, " flows: %u\n", pkt_dev->nflows); if (pkt_dev->result[0]) - p += sprintf(p, "Result: %s\n", pkt_dev->result); + seq_printf(seq, "Result: %s\n", pkt_dev->result); else - p += sprintf(p, "Result: Idle\n"); - *eof = 1; + seq_printf(seq, "Result: Idle\n"); - return p - buf; + return 0; } @@ -802,13 +757,14 @@ done_str: return i; } -static int proc_if_write(struct file *file, const char __user *user_buffer, - unsigned long count, void *data) +static ssize_t pktgen_if_write(struct file *file, const char __user *user_buffer, + size_t count, loff_t *offset) { + struct seq_file *seq = (struct seq_file *) file->private_data; + struct pktgen_dev *pkt_dev = seq->private; int i = 0, max, len; char name[16], valstr[32]; unsigned long value = 0; - struct pktgen_dev *pkt_dev = (struct pktgen_dev*)(data); char* pg_result = NULL; int tmp = 0; char buf[128]; @@ -849,7 +805,8 @@ static int proc_if_write(struct file *file, const char __user *user_buffer, if (copy_from_user(tb, user_buffer, count)) return -EFAULT; tb[count] = 0; - printk("pktgen: %s,%lu buffer -:%s:-\n", name, count, tb); + printk("pktgen: %s,%lu buffer -:%s:-\n", name, + (unsigned long) count, tb); } if (!strcmp(name, "min_pkt_size")) { @@ -1335,92 +1292,98 @@ static int proc_if_write(struct file *file, const char __user *user_buffer, return -EINVAL; } -static int proc_thread_read(char *buf , char **start, off_t offset, - int len, int *eof, void *data) +static int pktgen_if_open(struct inode *inode, struct file *file) { - char *p; - struct pktgen_thread *t = (struct pktgen_thread*)(data); + return single_open(file, pktgen_if_show, PDE(inode)->data); +} + +static struct file_operations pktgen_if_fops = { + .owner = THIS_MODULE, + .open = pktgen_if_open, + .read = seq_read, + .llseek = seq_lseek, + .write = pktgen_if_write, + .release = single_release, +}; + +static int pktgen_thread_show(struct seq_file *seq, void *v) +{ + struct pktgen_thread *t = seq->private; struct pktgen_dev *pkt_dev = NULL; + BUG_ON(!t); - if (!t) { - printk("pktgen: ERROR: could not find thread in proc_thread_read\n"); - return -EINVAL; - } - - p = buf; - p += sprintf(p, "Name: %s max_before_softirq: %d\n", + seq_printf(seq, "Name: %s max_before_softirq: %d\n", t->name, t->max_before_softirq); - p += sprintf(p, "Running: "); + seq_printf(seq, "Running: "); if_lock(t); for(pkt_dev = t->if_list;pkt_dev; pkt_dev = pkt_dev->next) if(pkt_dev->running) - p += sprintf(p, "%s ", pkt_dev->ifname); + seq_printf(seq, "%s ", pkt_dev->ifname); - p += sprintf(p, "\nStopped: "); + seq_printf(seq, "\nStopped: "); for(pkt_dev = t->if_list;pkt_dev; pkt_dev = pkt_dev->next) if(!pkt_dev->running) - p += sprintf(p, "%s ", pkt_dev->ifname); + seq_printf(seq, "%s ", pkt_dev->ifname); if (t->result[0]) - p += sprintf(p, "\nResult: %s\n", t->result); + seq_printf(seq, "\nResult: %s\n", t->result); else - p += sprintf(p, "\nResult: NA\n"); - - *eof = 1; + seq_printf(seq, "\nResult: NA\n"); if_unlock(t); - return p - buf; + return 0; } -static int proc_thread_write(struct file *file, const char __user *user_buffer, - unsigned long count, void *data) +static ssize_t pktgen_thread_write(struct file *file, + const char __user *user_buffer, + size_t count, loff_t *offset) { + struct seq_file *seq = (struct seq_file *) file->private_data; + struct pktgen_thread *t = seq->private; int i = 0, max, len, ret; char name[40]; - struct pktgen_thread *t; char *pg_result; unsigned long value = 0; - + if (count < 1) { // sprintf(pg_result, "Wrong command format"); return -EINVAL; } - + max = count - i; len = count_trail_chars(&user_buffer[i], max); - if (len < 0) - return len; - + if (len < 0) + return len; + i += len; - + /* Read variable name */ len = strn_len(&user_buffer[i], sizeof(name) - 1); - if (len < 0) - return len; + if (len < 0) + return len; memset(name, 0, sizeof(name)); if (copy_from_user(name, &user_buffer[i], len)) return -EFAULT; i += len; - + max = count -i; len = count_trail_chars(&user_buffer[i], max); - if (len < 0) - return len; - + if (len < 0) + return len; + i += len; - if (debug) - printk("pktgen: t=%s, count=%lu\n", name, count); - + if (debug) + printk("pktgen: t=%s, count=%lu\n", name, + (unsigned long) count); - t = (struct pktgen_thread*)(data); if(!t) { printk("pktgen: ERROR: No thread\n"); ret = -EINVAL; @@ -1474,32 +1437,19 @@ static int proc_thread_write(struct file *file, const char __user *user_buffer, return ret; } -static int create_proc_dir(void) +static int pktgen_thread_open(struct inode *inode, struct file *file) { - int len; - /* does proc_dir already exists */ - len = strlen(PG_PROC_DIR); - - for (pg_proc_dir = proc_net->subdir; pg_proc_dir; pg_proc_dir=pg_proc_dir->next) { - if ((pg_proc_dir->namelen == len) && - (! memcmp(pg_proc_dir->name, PG_PROC_DIR, len))) - break; - } - - if (!pg_proc_dir) - pg_proc_dir = create_proc_entry(PG_PROC_DIR, S_IFDIR, proc_net); - - if (!pg_proc_dir) - return -ENODEV; - - return 0; + return single_open(file, pktgen_thread_show, PDE(inode)->data); } -static int remove_proc_dir(void) -{ - remove_proc_entry(PG_PROC_DIR, proc_net); - return 0; -} +static struct file_operations pktgen_thread_fops = { + .owner = THIS_MODULE, + .open = pktgen_thread_open, + .read = seq_read, + .llseek = seq_lseek, + .write = pktgen_thread_write, + .release = single_release, +}; /* Think find or remove for NN */ static struct pktgen_dev *__pktgen_NN_threads(const char* ifname, int remove) @@ -1678,13 +1628,12 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) struct in_device *in_dev; rcu_read_lock(); - in_dev = __in_dev_get(pkt_dev->odev); + in_dev = __in_dev_get_rcu(pkt_dev->odev); if (in_dev) { if (in_dev->ifa_list) { pkt_dev->saddr_min = in_dev->ifa_list->ifa_address; pkt_dev->saddr_max = pkt_dev->saddr_min; } - __in_dev_put(in_dev); } rcu_read_unlock(); } @@ -1714,7 +1663,7 @@ static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us) start = now = getCurUs(); printk(KERN_INFO "sleeping for %d\n", (int)(spin_until_us - now)); while (now < spin_until_us) { - /* TODO: optimise sleeping behavior */ + /* TODO: optimize sleeping behavior */ if (spin_until_us - now > jiffies_to_usecs(1)+1) schedule_timeout_interruptible(1); else if (spin_until_us - now > 100) { @@ -2373,7 +2322,7 @@ static void pktgen_stop_all_threads_ifs(void) pktgen_stop(t); t = t->next; } - thread_unlock(); + thread_unlock(); } static int thread_is_running(struct pktgen_thread *t ) @@ -2564,10 +2513,9 @@ static void pktgen_rem_thread(struct pktgen_thread *t) struct pktgen_thread *tmp = pktgen_threads; - if (strlen(t->fname)) - remove_proc_entry(t->fname, NULL); + remove_proc_entry(t->name, pg_proc_dir); - thread_lock(); + thread_lock(); if (tmp == t) pktgen_threads = tmp->next; @@ -2837,7 +2785,7 @@ static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, const char* i if_lock(t); for(pkt_dev=t->if_list; pkt_dev; pkt_dev = pkt_dev->next ) { - if (strcmp(pkt_dev->ifname, ifname) == 0) { + if (strncmp(pkt_dev->ifname, ifname, IFNAMSIZ) == 0) { break; } } @@ -2876,74 +2824,70 @@ static int add_dev_to_thread(struct pktgen_thread *t, struct pktgen_dev *pkt_dev static int pktgen_add_device(struct pktgen_thread *t, const char* ifname) { struct pktgen_dev *pkt_dev; + struct proc_dir_entry *pe; /* We don't allow a device to be on several threads */ - if( (pkt_dev = __pktgen_NN_threads(ifname, FIND)) == NULL) { - - pkt_dev = kmalloc(sizeof(struct pktgen_dev), GFP_KERNEL); - if (!pkt_dev) - return -ENOMEM; - - memset(pkt_dev, 0, sizeof(struct pktgen_dev)); - - pkt_dev->flows = vmalloc(MAX_CFLOWS*sizeof(struct flow_state)); - if (pkt_dev->flows == NULL) { - kfree(pkt_dev); - return -ENOMEM; - } - memset(pkt_dev->flows, 0, MAX_CFLOWS*sizeof(struct flow_state)); - - pkt_dev->min_pkt_size = ETH_ZLEN; - pkt_dev->max_pkt_size = ETH_ZLEN; - pkt_dev->nfrags = 0; - pkt_dev->clone_skb = pg_clone_skb_d; - pkt_dev->delay_us = pg_delay_d / 1000; - pkt_dev->delay_ns = pg_delay_d % 1000; - pkt_dev->count = pg_count_d; - pkt_dev->sofar = 0; - pkt_dev->udp_src_min = 9; /* sink port */ - pkt_dev->udp_src_max = 9; - pkt_dev->udp_dst_min = 9; - pkt_dev->udp_dst_max = 9; - - strncpy(pkt_dev->ifname, ifname, 31); - sprintf(pkt_dev->fname, "net/%s/%s", PG_PROC_DIR, ifname); - - if (! pktgen_setup_dev(pkt_dev)) { - printk("pktgen: ERROR: pktgen_setup_dev failed.\n"); - if (pkt_dev->flows) - vfree(pkt_dev->flows); - kfree(pkt_dev); - return -ENODEV; - } - - pkt_dev->proc_ent = create_proc_entry(pkt_dev->fname, 0600, NULL); - if (!pkt_dev->proc_ent) { - printk("pktgen: cannot create %s procfs entry.\n", pkt_dev->fname); - if (pkt_dev->flows) - vfree(pkt_dev->flows); - kfree(pkt_dev); - return -EINVAL; - } - pkt_dev->proc_ent->read_proc = proc_if_read; - pkt_dev->proc_ent->write_proc = proc_if_write; - pkt_dev->proc_ent->data = (void*)(pkt_dev); - pkt_dev->proc_ent->owner = THIS_MODULE; - - return add_dev_to_thread(t, pkt_dev); - } - else { + pkt_dev = __pktgen_NN_threads(ifname, FIND); + if (pkt_dev) { printk("pktgen: ERROR: interface already used.\n"); return -EBUSY; } + + pkt_dev = kzalloc(sizeof(struct pktgen_dev), GFP_KERNEL); + if (!pkt_dev) + return -ENOMEM; + + pkt_dev->flows = vmalloc(MAX_CFLOWS*sizeof(struct flow_state)); + if (pkt_dev->flows == NULL) { + kfree(pkt_dev); + return -ENOMEM; + } + memset(pkt_dev->flows, 0, MAX_CFLOWS*sizeof(struct flow_state)); + + pkt_dev->min_pkt_size = ETH_ZLEN; + pkt_dev->max_pkt_size = ETH_ZLEN; + pkt_dev->nfrags = 0; + pkt_dev->clone_skb = pg_clone_skb_d; + pkt_dev->delay_us = pg_delay_d / 1000; + pkt_dev->delay_ns = pg_delay_d % 1000; + pkt_dev->count = pg_count_d; + pkt_dev->sofar = 0; + pkt_dev->udp_src_min = 9; /* sink port */ + pkt_dev->udp_src_max = 9; + pkt_dev->udp_dst_min = 9; + pkt_dev->udp_dst_max = 9; + + strncpy(pkt_dev->ifname, ifname, IFNAMSIZ); + + if (! pktgen_setup_dev(pkt_dev)) { + printk("pktgen: ERROR: pktgen_setup_dev failed.\n"); + if (pkt_dev->flows) + vfree(pkt_dev->flows); + kfree(pkt_dev); + return -ENODEV; + } + + pe = create_proc_entry(ifname, 0600, pg_proc_dir); + if (!pe) { + printk("pktgen: cannot create %s/%s procfs entry.\n", + PG_PROC_DIR, ifname); + if (pkt_dev->flows) + vfree(pkt_dev->flows); + kfree(pkt_dev); + return -EINVAL; + } + pe->proc_fops = &pktgen_if_fops; + pe->data = pkt_dev; + + return add_dev_to_thread(t, pkt_dev); } static struct pktgen_thread *pktgen_find_thread(const char* name) { struct pktgen_thread *t = NULL; - thread_lock(); + thread_lock(); t = pktgen_threads; while (t) { @@ -2959,6 +2903,7 @@ static struct pktgen_thread *pktgen_find_thread(const char* name) static int pktgen_create_thread(const char* name, int cpu) { struct pktgen_thread *t = NULL; + struct proc_dir_entry *pe; if (strlen(name) > 31) { printk("pktgen: ERROR: Thread name cannot be more than 31 characters.\n"); @@ -2970,28 +2915,26 @@ static int pktgen_create_thread(const char* name, int cpu) return -EINVAL; } - t = (struct pktgen_thread*)(kmalloc(sizeof(struct pktgen_thread), GFP_KERNEL)); + t = kzalloc(sizeof(struct pktgen_thread), GFP_KERNEL); if (!t) { printk("pktgen: ERROR: out of memory, can't create new thread.\n"); return -ENOMEM; } - memset(t, 0, sizeof(struct pktgen_thread)); strcpy(t->name, name); spin_lock_init(&t->if_lock); t->cpu = cpu; - sprintf(t->fname, "net/%s/%s", PG_PROC_DIR, t->name); - t->proc_ent = create_proc_entry(t->fname, 0600, NULL); - if (!t->proc_ent) { - printk("pktgen: cannot create %s procfs entry.\n", t->fname); + pe = create_proc_entry(t->name, 0600, pg_proc_dir); + if (!pe) { + printk("pktgen: cannot create %s/%s procfs entry.\n", + PG_PROC_DIR, t->name); kfree(t); return -EINVAL; } - t->proc_ent->read_proc = proc_thread_read; - t->proc_ent->write_proc = proc_thread_write; - t->proc_ent->data = (void*)(t); - t->proc_ent->owner = THIS_MODULE; + + pe->proc_fops = &pktgen_thread_fops; + pe->data = t; t->next = pktgen_threads; pktgen_threads = t; @@ -3046,8 +2989,7 @@ static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *pkt_ /* Clean up proc file system */ - if (strlen(pkt_dev->fname)) - remove_proc_entry(pkt_dev->fname, NULL); + remove_proc_entry(pkt_dev->ifname, pg_proc_dir); if (pkt_dev->flows) vfree(pkt_dev->flows); @@ -3058,31 +3000,31 @@ static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *pkt_ static int __init pg_init(void) { int cpu; + struct proc_dir_entry *pe; + printk(version); - module_fname[0] = 0; + pg_proc_dir = proc_mkdir(PG_PROC_DIR, proc_net); + if (!pg_proc_dir) + return -ENODEV; + pg_proc_dir->owner = THIS_MODULE; - create_proc_dir(); - - sprintf(module_fname, "net/%s/pgctrl", PG_PROC_DIR); - module_proc_ent = create_proc_entry(module_fname, 0600, NULL); - if (!module_proc_ent) { - printk("pktgen: ERROR: cannot create %s procfs entry.\n", module_fname); + pe = create_proc_entry(PGCTRL, 0600, pg_proc_dir); + if (pe == NULL) { + printk("pktgen: ERROR: cannot create %s procfs entry.\n", PGCTRL); + proc_net_remove(PG_PROC_DIR); return -EINVAL; } - module_proc_ent->proc_fops = &pktgen_fops; - module_proc_ent->data = NULL; + pe->proc_fops = &pktgen_fops; + pe->data = NULL; /* Register us to receive netdevice events */ register_netdevice_notifier(&pktgen_notifier_block); - for (cpu = 0; cpu < NR_CPUS ; cpu++) { + for_each_online_cpu(cpu) { char buf[30]; - if (!cpu_online(cpu)) - continue; - sprintf(buf, "kpktgend_%i", cpu); pktgen_create_thread(buf, cpu); } @@ -3107,10 +3049,8 @@ static void __exit pg_cleanup(void) unregister_netdevice_notifier(&pktgen_notifier_block); /* Clean up proc file system */ - - remove_proc_entry(module_fname, NULL); - - remove_proc_dir(); + remove_proc_entry(PGCTRL, pg_proc_dir); + proc_net_remove(PG_PROC_DIR); } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f80a28785610..ef9d46b91eb9 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -71,8 +71,6 @@ static kmem_cache_t *skbuff_head_cache __read_mostly; static kmem_cache_t *skbuff_fclone_cache __read_mostly; -struct timeval __read_mostly skb_tv_base; - /* * Keep out-of-line to prevent kernel bloat. * __builtin_return_address is not used because it is not always @@ -124,6 +122,8 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) * __alloc_skb - allocate a network buffer * @size: size to allocate * @gfp_mask: allocation mask + * @fclone: allocate from fclone cache instead of head cache + * and allocate a cloned (child) skb * * Allocate a new &sk_buff. The returned buffer has no headroom and a * tail room of size bytes. The object has a reference count of one. @@ -132,7 +132,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) * Buffers may only be allocated from interrupts using a @gfp_mask of * %GFP_ATOMIC. */ -struct sk_buff *__alloc_skb(unsigned int size, unsigned int __nocast gfp_mask, +struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, int fclone) { struct sk_buff *skb; @@ -200,7 +200,7 @@ nodata: */ struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, unsigned int size, - unsigned int __nocast gfp_mask) + gfp_t gfp_mask) { struct sk_buff *skb; u8 *data; @@ -363,7 +363,7 @@ void __kfree_skb(struct sk_buff *skb) * %GFP_ATOMIC. */ -struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask) +struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) { struct sk_buff *n; @@ -412,6 +412,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask) C(nfct); nf_conntrack_get(skb->nfct); C(nfctinfo); +#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) + C(ipvs_property); +#endif #ifdef CONFIG_BRIDGE_NETFILTER C(nf_bridge); nf_bridge_get(skb->nf_bridge); @@ -469,6 +472,9 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) new->nfct = old->nfct; nf_conntrack_get(old->nfct); new->nfctinfo = old->nfctinfo; +#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) + new->ipvs_property = old->ipvs_property; +#endif #ifdef CONFIG_BRIDGE_NETFILTER new->nf_bridge = old->nf_bridge; nf_bridge_get(old->nf_bridge); @@ -502,7 +508,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) * header is going to be modified. Use pskb_copy() instead. */ -struct sk_buff *skb_copy(const struct sk_buff *skb, unsigned int __nocast gfp_mask) +struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) { int headerlen = skb->data - skb->head; /* @@ -541,7 +547,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, unsigned int __nocast gfp_ma * The returned buffer has a reference count of 1. */ -struct sk_buff *pskb_copy(struct sk_buff *skb, unsigned int __nocast gfp_mask) +struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) { /* * Allocate the copy buffer @@ -600,7 +606,7 @@ out: */ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, - unsigned int __nocast gfp_mask) + gfp_t gfp_mask) { int i; u8 *data; @@ -691,7 +697,7 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) */ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, - unsigned int __nocast gfp_mask) + gfp_t gfp_mask) { /* * Allocate the copy buffer @@ -1708,8 +1714,6 @@ void __init skb_init(void) NULL, NULL); if (!skbuff_fclone_cache) panic("cannot create skbuff cache"); - - do_gettimeofday(&skb_tv_base); } EXPORT_SYMBOL(___pskb_trim); @@ -1743,4 +1747,3 @@ EXPORT_SYMBOL(skb_prepare_seq_read); EXPORT_SYMBOL(skb_seq_read); EXPORT_SYMBOL(skb_abort_seq_read); EXPORT_SYMBOL(skb_find_text); -EXPORT_SYMBOL(skb_tv_base); diff --git a/net/core/sock.c b/net/core/sock.c index ac63b56e23b2..9602ceb3bac9 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -637,7 +637,7 @@ lenout: * @prot: struct proto associated with this new sock instance * @zero_it: if we should zero the newly allocated sock */ -struct sock *sk_alloc(int family, unsigned int __nocast priority, +struct sock *sk_alloc(int family, gfp_t priority, struct proto *prot, int zero_it) { struct sock *sk = NULL; @@ -660,16 +660,20 @@ struct sock *sk_alloc(int family, unsigned int __nocast priority, sock_lock_init(sk); } - if (security_sk_alloc(sk, family, priority)) { - if (slab != NULL) - kmem_cache_free(slab, sk); - else - kfree(sk); - sk = NULL; - } else - __module_get(prot->owner); + if (security_sk_alloc(sk, family, priority)) + goto out_free; + + if (!try_module_get(prot->owner)) + goto out_free; } return sk; + +out_free: + if (slab != NULL) + kmem_cache_free(slab, sk); + else + kfree(sk); + return NULL; } void sk_free(struct sock *sk) @@ -700,7 +704,7 @@ void sk_free(struct sock *sk) module_put(owner); } -struct sock *sk_clone(const struct sock *sk, const unsigned int __nocast priority) +struct sock *sk_clone(const struct sock *sk, const gfp_t priority) { struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0); @@ -841,7 +845,7 @@ unsigned long sock_i_ino(struct sock *sk) * Allocate a skb from the socket's send buffer. */ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, - unsigned int __nocast priority) + gfp_t priority) { if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { struct sk_buff * skb = alloc_skb(size, priority); @@ -857,7 +861,7 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, * Allocate a skb from the socket's receive buffer. */ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, - unsigned int __nocast priority) + gfp_t priority) { if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { struct sk_buff *skb = alloc_skb(size, priority); @@ -872,7 +876,7 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, /* * Allocate a memory block from the socket's option memory buffer. */ -void *sock_kmalloc(struct sock *sk, int size, unsigned int __nocast priority) +void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) { if ((unsigned)size <= sysctl_optmem_max && atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { @@ -936,7 +940,7 @@ static struct sk_buff *sock_alloc_send_pskb(struct sock *sk, int noblock, int *errcode) { struct sk_buff *skb; - unsigned int gfp_mask; + gfp_t gfp_mask; long timeo; int err; diff --git a/net/core/wireless.c b/net/core/wireless.c index d17f1583ea3e..271ddb35b0b2 100644 --- a/net/core/wireless.c +++ b/net/core/wireless.c @@ -455,10 +455,15 @@ static inline struct iw_statistics *get_wireless_stats(struct net_device *dev) /* Old location, field to be removed in next WE */ if(dev->get_wireless_stats) { - printk(KERN_DEBUG "%s (WE) : Driver using old /proc/net/wireless support, please fix driver !\n", - dev->name); + static int printed_message; + + if (!printed_message++) + printk(KERN_DEBUG "%s (WE) : Driver using old /proc/net/wireless support, please fix driver !\n", + dev->name); + return dev->get_wireless_stats(dev); } + /* Not found */ return (struct iw_statistics *) NULL; } diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index 6530283eafca..c9a62cca22fc 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c @@ -91,7 +91,7 @@ int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) } struct dccp_ackvec *dccp_ackvec_alloc(const unsigned int len, - const unsigned int __nocast priority) + const gfp_t priority) { struct dccp_ackvec *av = kmalloc(sizeof(*av) + len, priority); diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h index 8ca51c9191f7..d0fd6c60c574 100644 --- a/net/dccp/ackvec.h +++ b/net/dccp/ackvec.h @@ -74,7 +74,7 @@ struct sk_buff; #ifdef CONFIG_IP_DCCP_ACKVEC extern struct dccp_ackvec *dccp_ackvec_alloc(unsigned int len, - const unsigned int __nocast priority); + const gfp_t priority); extern void dccp_ackvec_free(struct dccp_ackvec *av); extern int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, @@ -93,7 +93,7 @@ static inline int dccp_ackvec_pending(const struct dccp_ackvec *av) } #else /* CONFIG_IP_DCCP_ACKVEC */ static inline struct dccp_ackvec *dccp_ackvec_alloc(unsigned int len, - const unsigned int __nocast priority) + const gfp_t priority) { return NULL; } diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index 21e55142dcd3..c37eeeaf5c6e 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h @@ -110,14 +110,14 @@ static inline int ccid_hc_tx_init(struct ccid *ccid, struct sock *sk) static inline void ccid_hc_rx_exit(struct ccid *ccid, struct sock *sk) { - if (ccid->ccid_hc_rx_exit != NULL && + if (ccid != NULL && ccid->ccid_hc_rx_exit != NULL && dccp_sk(sk)->dccps_hc_rx_ccid_private != NULL) ccid->ccid_hc_rx_exit(sk); } static inline void ccid_hc_tx_exit(struct ccid *ccid, struct sock *sk) { - if (ccid->ccid_hc_tx_exit != NULL && + if (ccid != NULL && ccid->ccid_hc_tx_exit != NULL && dccp_sk(sk)->dccps_hc_tx_ccid_private != NULL) ccid->ccid_hc_tx_exit(sk); } diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h index 13ad47ba1420..417d9d82df3e 100644 --- a/net/dccp/ccids/lib/loss_interval.h +++ b/net/dccp/ccids/lib/loss_interval.h @@ -36,7 +36,7 @@ struct dccp_li_hist_entry { static inline struct dccp_li_hist_entry * dccp_li_hist_entry_new(struct dccp_li_hist *hist, - const unsigned int __nocast prio) + const gfp_t prio) { return kmem_cache_alloc(hist->dccplih_slab, prio); } diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h index b375ebdb7dcf..122e96737ff6 100644 --- a/net/dccp/ccids/lib/packet_history.h +++ b/net/dccp/ccids/lib/packet_history.h @@ -86,7 +86,7 @@ extern struct dccp_rx_hist_entry * static inline struct dccp_tx_hist_entry * dccp_tx_hist_entry_new(struct dccp_tx_hist *hist, - const unsigned int __nocast prio) + const gfp_t prio) { struct dccp_tx_hist_entry *entry = kmem_cache_alloc(hist->dccptxh_slab, prio); @@ -137,7 +137,7 @@ static inline struct dccp_rx_hist_entry * const struct sock *sk, const u32 ndp, const struct sk_buff *skb, - const unsigned int __nocast prio) + const gfp_t prio) { struct dccp_rx_hist_entry *entry = kmem_cache_alloc(hist->dccprxh_slab, prio); diff --git a/net/dccp/input.c b/net/dccp/input.c index 1b6b2cb12376..3454d5941900 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -375,6 +375,9 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk, case DCCP_PKT_RESET: inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); break; + case DCCP_PKT_DATA: + if (sk->sk_state == DCCP_RESPOND) + break; case DCCP_PKT_DATAACK: case DCCP_PKT_ACK: /* @@ -393,7 +396,8 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk, dccp_sk(sk)->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq; dccp_set_state(sk, DCCP_OPEN); - if (dh->dccph_type == DCCP_PKT_DATAACK) { + if (dh->dccph_type == DCCP_PKT_DATAACK || + dh->dccph_type == DCCP_PKT_DATA) { dccp_rcv_established(sk, skb, dh, len); queued = 1; /* packet was queued (by dccp_rcv_established) */ diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 40fe6afacde6..6298cf58ff9e 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -62,27 +62,27 @@ static int __dccp_v4_check_established(struct sock *sk, const __u16 lport, const int dif = sk->sk_bound_dev_if; INET_ADDR_COOKIE(acookie, saddr, daddr) const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport); - const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, - dccp_hashinfo.ehash_size); - struct inet_ehash_bucket *head = &dccp_hashinfo.ehash[hash]; + unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport); + struct inet_ehash_bucket *head = inet_ehash_bucket(&dccp_hashinfo, hash); const struct sock *sk2; const struct hlist_node *node; struct inet_timewait_sock *tw; + prefetch(head->chain.first); write_lock(&head->lock); /* Check TIME-WAIT sockets first. */ sk_for_each(sk2, node, &(head + dccp_hashinfo.ehash_size)->chain) { tw = inet_twsk(sk2); - if (INET_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) + if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) goto not_unique; } tw = NULL; /* And established part... */ sk_for_each(sk2, node, &head->chain) { - if (INET_MATCH(sk2, acookie, saddr, daddr, ports, dif)) + if (INET_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) goto not_unique; } @@ -90,7 +90,7 @@ static int __dccp_v4_check_established(struct sock *sk, const __u16 lport, * in hash table socket with a funny identity. */ inet->num = lport; inet->sport = htons(lport); - sk->sk_hashent = hash; + sk->sk_hash = hash; BUG_TRAP(sk_unhashed(sk)); __sk_add_node(sk, &head->chain); sock_prot_inc_use(sk->sk_prot); @@ -463,6 +463,7 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, if (skb != NULL) { const struct inet_request_sock *ireq = inet_rsk(req); + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, ireq->rmt_addr, ireq->opt); @@ -647,6 +648,7 @@ int dccp_v4_send_reset(struct sock *sk, enum dccp_reset_codes code) if (skb != NULL) { const struct inet_sock *inet = inet_sk(sk); + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); err = ip_build_and_send_pkt(skb, sk, inet->saddr, inet->daddr, NULL); if (err == NET_XMIT_CN) diff --git a/net/dccp/output.c b/net/dccp/output.c index 4786bdcddcc9..d59f86f7ceab 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -62,10 +62,8 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) skb->h.raw = skb_push(skb, dccp_header_size); dh = dccp_hdr(skb); - /* - * Data packets are not cloned as they are never retransmitted - */ - if (skb_cloned(skb)) + + if (!skb->sk) skb_set_owner_w(skb, sk); /* Build DCCP header and checksum it. */ @@ -102,6 +100,7 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) DCCP_INC_STATS(DCCP_MIB_OUTSEGS); + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); err = ip_queue_xmit(skb, 0); if (err <= 0) return err; @@ -243,7 +242,8 @@ int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, long *timeo) err = dccp_transmit_skb(sk, skb); ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); - } + } else + kfree_skb(skb); return err; } @@ -495,7 +495,7 @@ void dccp_send_close(struct sock *sk, const int active) { struct dccp_sock *dp = dccp_sk(sk); struct sk_buff *skb; - const unsigned int prio = active ? GFP_KERNEL : GFP_ATOMIC; + const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC; skb = alloc_skb(sk->sk_prot->max_header, prio); if (skb == NULL) diff --git a/net/dccp/proto.c b/net/dccp/proto.c index a1cfd0e9e3bc..a021c3422f67 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -402,8 +402,6 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, * This bug was _quickly_ found & fixed by just looking at an OSTRA * generated callgraph 8) -acme */ - if (rc != 0) - goto out_discard; out_release: release_sock(sk); return rc ? : len; diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 348f36b529f7..3f25cadccddd 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -452,7 +452,7 @@ static struct proto dn_proto = { .obj_size = sizeof(struct dn_sock), }; -static struct sock *dn_alloc_sock(struct socket *sock, int gfp) +static struct sock *dn_alloc_sock(struct socket *sock, gfp_t gfp) { struct dn_scp *scp; struct sock *sk = sk_alloc(PF_DECnet, gfp, &dn_proto, 1); @@ -719,22 +719,9 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (saddr->sdn_flags & ~SDF_WILD) return -EINVAL; -#if 1 if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum || (saddr->sdn_flags & SDF_WILD))) return -EACCES; -#else - /* - * Maybe put the default actions in the default security ops for - * dn_prot_sock ? Would be nice if the capable call would go there - * too. - */ - if (security_dn_prot_sock(saddr) && - !capable(CAP_NET_BIND_SERVICE) || - saddr->sdn_objnum || (saddr->sdn_flags & SDF_WILD)) - return -EACCES; -#endif - if (!(saddr->sdn_flags & SDF_WILD)) { if (dn_ntohs(saddr->sdn_nodeaddrl)) { @@ -804,7 +791,7 @@ static int dn_auto_bind(struct socket *sock) return rv; } -static int dn_confirm_accept(struct sock *sk, long *timeo, int allocation) +static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) { struct dn_scp *scp = DN_SK(sk); DEFINE_WAIT(wait); diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c index 53633d352868..c96c767b1f74 100644 --- a/net/decnet/dn_nsp_out.c +++ b/net/decnet/dn_nsp_out.c @@ -117,7 +117,7 @@ try_again: * The eventual aim is for each socket to have a cached header size * for its outgoing packets, and to set hdr from this when sk != NULL. */ -struct sk_buff *dn_alloc_skb(struct sock *sk, int size, int pri) +struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri) { struct sk_buff *skb; int hdr = 64; @@ -210,7 +210,8 @@ static void dn_nsp_rtt(struct sock *sk, long rtt) * * Returns: The number of times the packet has been sent previously */ -static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb, int gfp) +static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb, + gfp_t gfp) { struct dn_skb_cb *cb = DN_SKB_CB(skb); struct sk_buff *skb2; @@ -350,7 +351,8 @@ static unsigned short *dn_nsp_mk_data_header(struct sock *sk, struct sk_buff *sk return ptr; } -void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, int gfp, int oth) +void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, + gfp_t gfp, int oth) { struct dn_scp *scp = DN_SK(sk); struct dn_skb_cb *cb = DN_SKB_CB(skb); @@ -517,7 +519,7 @@ static int dn_nsp_retrans_conn_conf(struct sock *sk) return 0; } -void dn_send_conn_conf(struct sock *sk, int gfp) +void dn_send_conn_conf(struct sock *sk, gfp_t gfp) { struct dn_scp *scp = DN_SK(sk); struct sk_buff *skb = NULL; @@ -549,7 +551,8 @@ void dn_send_conn_conf(struct sock *sk, int gfp) static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, - unsigned short reason, int gfp, struct dst_entry *dst, + unsigned short reason, gfp_t gfp, + struct dst_entry *dst, int ddl, unsigned char *dd, __u16 rem, __u16 loc) { struct sk_buff *skb = NULL; @@ -591,7 +594,7 @@ static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg, - unsigned short reason, int gfp) + unsigned short reason, gfp_t gfp) { struct dn_scp *scp = DN_SK(sk); int ddl = 0; @@ -612,7 +615,7 @@ void dn_nsp_return_disc(struct sk_buff *skb, unsigned char msgflg, { struct dn_skb_cb *cb = DN_SKB_CB(skb); int ddl = 0; - int gfp = GFP_ATOMIC; + gfp_t gfp = GFP_ATOMIC; dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb->dst, ddl, NULL, cb->src_port, cb->dst_port); @@ -624,7 +627,7 @@ void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval) struct dn_scp *scp = DN_SK(sk); struct sk_buff *skb; unsigned char *ptr; - int gfp = GFP_ATOMIC; + gfp_t gfp = GFP_ATOMIC; if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL) return; @@ -659,7 +662,7 @@ void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg) unsigned char menuver; struct dn_skb_cb *cb; unsigned char type = 1; - int allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC; + gfp_t allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC; struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation); if (!skb) diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 4a62093eb343..34fdac51df96 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c @@ -406,7 +406,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, unsigned long network = 0; rcu_read_lock(); - idev = __in_dev_get(dev); + idev = __in_dev_get_rcu(dev); if (idev) { if (idev->ifa_list) network = ntohl(idev->ifa_list->ifa_address) & diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 87a052a9a84f..68a5ca866442 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -146,6 +146,19 @@ int eth_rebuild_header(struct sk_buff *skb) return 0; } +static inline unsigned int compare_eth_addr(const unsigned char *__a, const unsigned char *__b) +{ + const unsigned short *dest = (unsigned short *) __a; + const unsigned short *devaddr = (unsigned short *) __b; + unsigned int res; + + BUILD_BUG_ON(ETH_ALEN != 6); + res = ((dest[0] ^ devaddr[0]) | + (dest[1] ^ devaddr[1]) | + (dest[2] ^ devaddr[2])) != 0; + + return res; +} /* * Determine the packet's protocol ID. The rule here is that we @@ -158,16 +171,15 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) struct ethhdr *eth; unsigned char *rawp; - skb->mac.raw=skb->data; + skb->mac.raw = skb->data; skb_pull(skb,ETH_HLEN); eth = eth_hdr(skb); - if(*eth->h_dest&1) - { - if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0) - skb->pkt_type=PACKET_BROADCAST; + if (*eth->h_dest&1) { + if (!compare_eth_addr(eth->h_dest, dev->broadcast)) + skb->pkt_type = PACKET_BROADCAST; else - skb->pkt_type=PACKET_MULTICAST; + skb->pkt_type = PACKET_MULTICAST; } /* @@ -178,10 +190,9 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) * seems to set IFF_PROMISC. */ - else if(1 /*dev->flags&IFF_PROMISC*/) - { - if(memcmp(eth->h_dest,dev->dev_addr, ETH_ALEN)) - skb->pkt_type=PACKET_OTHERHOST; + else if(1 /*dev->flags&IFF_PROMISC*/) { + if (unlikely(compare_eth_addr(eth->h_dest, dev->dev_addr))) + skb->pkt_type = PACKET_OTHERHOST; } if (ntohs(eth->h_proto) >= 1536) diff --git a/net/ieee80211/Makefile b/net/ieee80211/Makefile index a6ccac5baea8..f988417121da 100644 --- a/net/ieee80211/Makefile +++ b/net/ieee80211/Makefile @@ -7,5 +7,6 @@ ieee80211-objs := \ ieee80211_module.o \ ieee80211_tx.o \ ieee80211_rx.o \ - ieee80211_wx.o + ieee80211_wx.o \ + ieee80211_geo.o diff --git a/net/ieee80211/ieee80211_crypt.c b/net/ieee80211/ieee80211_crypt.c index 61a9d92e455b..f3b6aa3be638 100644 --- a/net/ieee80211/ieee80211_crypt.c +++ b/net/ieee80211/ieee80211_crypt.c @@ -41,6 +41,12 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force) { struct list_head *ptr, *n; struct ieee80211_crypt_data *entry; + unsigned long flags; + + spin_lock_irqsave(&ieee->lock, flags); + + if (list_empty(&ieee->crypt_deinit_list)) + goto unlock; for (ptr = ieee->crypt_deinit_list.next, n = ptr->next; ptr != &ieee->crypt_deinit_list; ptr = n, n = ptr->next) { @@ -57,6 +63,18 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, int force) } kfree(entry); } + unlock: + spin_unlock_irqrestore(&ieee->lock, flags); +} + +/* After this, crypt_deinit_list won't accept new members */ +void ieee80211_crypt_quiescing(struct ieee80211_device *ieee) +{ + unsigned long flags; + + spin_lock_irqsave(&ieee->lock, flags); + ieee->crypt_quiesced = 1; + spin_unlock_irqrestore(&ieee->lock, flags); } void ieee80211_crypt_deinit_handler(unsigned long data) @@ -64,16 +82,16 @@ void ieee80211_crypt_deinit_handler(unsigned long data) struct ieee80211_device *ieee = (struct ieee80211_device *)data; unsigned long flags; - spin_lock_irqsave(&ieee->lock, flags); ieee80211_crypt_deinit_entries(ieee, 0); - if (!list_empty(&ieee->crypt_deinit_list)) { + + spin_lock_irqsave(&ieee->lock, flags); + if (!list_empty(&ieee->crypt_deinit_list) && !ieee->crypt_quiesced) { printk(KERN_DEBUG "%s: entries remaining in delayed crypt " "deletion list\n", ieee->dev->name); ieee->crypt_deinit_timer.expires = jiffies + HZ; add_timer(&ieee->crypt_deinit_timer); } spin_unlock_irqrestore(&ieee->lock, flags); - } void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee, @@ -93,10 +111,12 @@ void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee, * locking. */ spin_lock_irqsave(&ieee->lock, flags); - list_add(&tmp->list, &ieee->crypt_deinit_list); - if (!timer_pending(&ieee->crypt_deinit_timer)) { - ieee->crypt_deinit_timer.expires = jiffies + HZ; - add_timer(&ieee->crypt_deinit_timer); + if (!ieee->crypt_quiesced) { + list_add(&tmp->list, &ieee->crypt_deinit_list); + if (!timer_pending(&ieee->crypt_deinit_timer)) { + ieee->crypt_deinit_timer.expires = jiffies + HZ; + add_timer(&ieee->crypt_deinit_timer); + } } spin_unlock_irqrestore(&ieee->lock, flags); } @@ -191,18 +211,18 @@ static void ieee80211_crypt_null_deinit(void *priv) } static struct ieee80211_crypto_ops ieee80211_crypt_null = { - .name = "NULL", - .init = ieee80211_crypt_null_init, - .deinit = ieee80211_crypt_null_deinit, - .encrypt_mpdu = NULL, - .decrypt_mpdu = NULL, - .encrypt_msdu = NULL, - .decrypt_msdu = NULL, - .set_key = NULL, - .get_key = NULL, - .extra_prefix_len = 0, - .extra_postfix_len = 0, - .owner = THIS_MODULE, + .name = "NULL", + .init = ieee80211_crypt_null_init, + .deinit = ieee80211_crypt_null_deinit, + .encrypt_mpdu = NULL, + .decrypt_mpdu = NULL, + .encrypt_msdu = NULL, + .decrypt_msdu = NULL, + .set_key = NULL, + .get_key = NULL, + .extra_mpdu_prefix_len = 0, + .extra_mpdu_postfix_len = 0, + .owner = THIS_MODULE, }; static int __init ieee80211_crypto_init(void) @@ -249,6 +269,7 @@ static void __exit ieee80211_crypto_deinit(void) EXPORT_SYMBOL(ieee80211_crypt_deinit_entries); EXPORT_SYMBOL(ieee80211_crypt_deinit_handler); EXPORT_SYMBOL(ieee80211_crypt_delayed_deinit); +EXPORT_SYMBOL(ieee80211_crypt_quiescing); EXPORT_SYMBOL(ieee80211_register_crypto_ops); EXPORT_SYMBOL(ieee80211_unregister_crypto_ops); diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c index 8fc13f45971e..05a853c13012 100644 --- a/net/ieee80211/ieee80211_crypt_ccmp.c +++ b/net/ieee80211/ieee80211_crypt_ccmp.c @@ -119,7 +119,7 @@ static inline void xor_block(u8 * b, u8 * a, size_t len) } static void ccmp_init_blocks(struct crypto_tfm *tfm, - struct ieee80211_hdr *hdr, + struct ieee80211_hdr_4addr *hdr, u8 * pn, size_t dlen, u8 * b0, u8 * auth, u8 * s0) { u8 *pos, qc = 0; @@ -191,26 +191,18 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm, ieee80211_ccmp_aes_encrypt(tfm, b0, s0); } -static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) +static int ieee80211_ccmp_hdr(struct sk_buff *skb, int hdr_len, void *priv) { struct ieee80211_ccmp_data *key = priv; - int data_len, i, blocks, last, len; - u8 *pos, *mic; - struct ieee80211_hdr *hdr; - u8 *b0 = key->tx_b0; - u8 *b = key->tx_b; - u8 *e = key->tx_e; - u8 *s0 = key->tx_s0; + int i; + u8 *pos; - if (skb_headroom(skb) < CCMP_HDR_LEN || - skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len) + if (skb_headroom(skb) < CCMP_HDR_LEN || skb->len < hdr_len) return -1; - data_len = skb->len - hdr_len; pos = skb_push(skb, CCMP_HDR_LEN); memmove(pos, pos + CCMP_HDR_LEN, hdr_len); pos += hdr_len; - mic = skb_put(skb, CCMP_MIC_LEN); i = CCMP_PN_LEN - 1; while (i >= 0) { @@ -229,7 +221,31 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) *pos++ = key->tx_pn[1]; *pos++ = key->tx_pn[0]; - hdr = (struct ieee80211_hdr *)skb->data; + return CCMP_HDR_LEN; +} + +static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) +{ + struct ieee80211_ccmp_data *key = priv; + int data_len, i, blocks, last, len; + u8 *pos, *mic; + struct ieee80211_hdr_4addr *hdr; + u8 *b0 = key->tx_b0; + u8 *b = key->tx_b; + u8 *e = key->tx_e; + u8 *s0 = key->tx_s0; + + if (skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len) + return -1; + + data_len = skb->len - hdr_len; + len = ieee80211_ccmp_hdr(skb, hdr_len, priv); + if (len < 0) + return -1; + + pos = skb->data + hdr_len + CCMP_HDR_LEN; + mic = skb_put(skb, CCMP_MIC_LEN); + hdr = (struct ieee80211_hdr_4addr *)skb->data; ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; @@ -258,7 +274,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct ieee80211_ccmp_data *key = priv; u8 keyidx, *pos; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; u8 *b0 = key->rx_b0; u8 *b = key->rx_b; u8 *a = key->rx_a; @@ -272,7 +288,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) return -1; } - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr_4addr *)skb->data; pos = skb->data + hdr_len; keyidx = pos[3]; if (!(keyidx & (1 << 5))) { @@ -426,19 +442,20 @@ static char *ieee80211_ccmp_print_stats(char *p, void *priv) } static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = { - .name = "CCMP", - .init = ieee80211_ccmp_init, - .deinit = ieee80211_ccmp_deinit, - .encrypt_mpdu = ieee80211_ccmp_encrypt, - .decrypt_mpdu = ieee80211_ccmp_decrypt, - .encrypt_msdu = NULL, - .decrypt_msdu = NULL, - .set_key = ieee80211_ccmp_set_key, - .get_key = ieee80211_ccmp_get_key, - .print_stats = ieee80211_ccmp_print_stats, - .extra_prefix_len = CCMP_HDR_LEN, - .extra_postfix_len = CCMP_MIC_LEN, - .owner = THIS_MODULE, + .name = "CCMP", + .init = ieee80211_ccmp_init, + .deinit = ieee80211_ccmp_deinit, + .build_iv = ieee80211_ccmp_hdr, + .encrypt_mpdu = ieee80211_ccmp_encrypt, + .decrypt_mpdu = ieee80211_ccmp_decrypt, + .encrypt_msdu = NULL, + .decrypt_msdu = NULL, + .set_key = ieee80211_ccmp_set_key, + .get_key = ieee80211_ccmp_get_key, + .print_stats = ieee80211_ccmp_print_stats, + .extra_mpdu_prefix_len = CCMP_HDR_LEN, + .extra_mpdu_postfix_len = CCMP_MIC_LEN, + .owner = THIS_MODULE, }; static int __init ieee80211_crypto_ccmp_init(void) diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index d4f9164be1a1..2e34f29b7956 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c @@ -59,8 +59,24 @@ struct ieee80211_tkip_data { /* scratch buffers for virt_to_page() (crypto API) */ u8 rx_hdr[16], tx_hdr[16]; + + unsigned long flags; }; +static unsigned long ieee80211_tkip_set_flags(unsigned long flags, void *priv) +{ + struct ieee80211_tkip_data *_priv = priv; + unsigned long old_flags = _priv->flags; + _priv->flags = flags; + return old_flags; +} + +static unsigned long ieee80211_tkip_get_flags(void *priv) +{ + struct ieee80211_tkip_data *_priv = priv; + return _priv->flags; +} + static void *ieee80211_tkip_init(int key_idx) { struct ieee80211_tkip_data *priv; @@ -69,6 +85,7 @@ static void *ieee80211_tkip_init(int key_idx) if (priv == NULL) goto fail; memset(priv, 0, sizeof(*priv)); + priv->key_idx = key_idx; priv->tfm_arc4 = crypto_alloc_tfm("arc4", 0); @@ -255,25 +272,27 @@ static void tkip_mixing_phase2(u8 * WEPSeed, const u8 * TK, const u16 * TTAK, #endif } -static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) +static u8 *ieee80211_tkip_hdr(struct sk_buff *skb, int hdr_len, void *priv) { struct ieee80211_tkip_data *tkey = priv; int len; - u8 rc4key[16], *pos, *icv; - struct ieee80211_hdr *hdr; + u8 *rc4key, *pos, *icv; + struct ieee80211_hdr_4addr *hdr; u32 crc; - struct scatterlist sg; - if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 || - skb->len < hdr_len) - return -1; + hdr = (struct ieee80211_hdr_4addr *)skb->data; + + if (skb_headroom(skb) < 8 || skb->len < hdr_len) + return NULL; - hdr = (struct ieee80211_hdr *)skb->data; if (!tkey->tx_phase1_done) { tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2, tkey->tx_iv32); tkey->tx_phase1_done = 1; } + rc4key = kmalloc(16, GFP_ATOMIC); + if (!rc4key) + return NULL; tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16); len = skb->len - hdr_len; @@ -282,9 +301,9 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) pos += hdr_len; icv = skb_put(skb, 4); - *pos++ = rc4key[0]; - *pos++ = rc4key[1]; - *pos++ = rc4key[2]; + *pos++ = *rc4key; + *pos++ = *(rc4key + 1); + *pos++ = *(rc4key + 2); *pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */ ; *pos++ = tkey->tx_iv32 & 0xff; *pos++ = (tkey->tx_iv32 >> 8) & 0xff; @@ -297,6 +316,38 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) icv[2] = crc >> 16; icv[3] = crc >> 24; + return rc4key; +} + +static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) +{ + struct ieee80211_tkip_data *tkey = priv; + int len; + const u8 *rc4key; + u8 *pos; + struct scatterlist sg; + + if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { + if (net_ratelimit()) { + struct ieee80211_hdr_4addr *hdr = + (struct ieee80211_hdr_4addr *)skb->data; + printk(KERN_DEBUG "TKIP countermeasures: dropped " + "TX packet to " MAC_FMT "\n", + MAC_ARG(hdr->addr1)); + } + return -1; + } + + if (skb_tailroom(skb) < 4 || skb->len < hdr_len) + return -1; + + len = skb->len - hdr_len; + pos = skb->data + hdr_len; + + rc4key = ieee80211_tkip_hdr(skb, hdr_len, priv); + if (!rc4key) + return -1; + crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16); sg.page = virt_to_page(pos); sg.offset = offset_in_page(pos); @@ -319,16 +370,26 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) u8 keyidx, *pos; u32 iv32; u16 iv16; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; u8 icv[4]; u32 crc; struct scatterlist sg; int plen; + hdr = (struct ieee80211_hdr_4addr *)skb->data; + + if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) { + if (net_ratelimit()) { + printk(KERN_DEBUG "TKIP countermeasures: dropped " + "received packet from " MAC_FMT "\n", + MAC_ARG(hdr->addr2)); + } + return -1; + } + if (skb->len < hdr_len + 8 + 4) return -1; - hdr = (struct ieee80211_hdr *)skb->data; pos = skb->data + hdr_len; keyidx = pos[3]; if (!(keyidx & (1 << 5))) { @@ -441,9 +502,9 @@ static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr, static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr) { - struct ieee80211_hdr *hdr11; + struct ieee80211_hdr_4addr *hdr11; - hdr11 = (struct ieee80211_hdr *)skb->data; + hdr11 = (struct ieee80211_hdr_4addr *)skb->data; switch (le16_to_cpu(hdr11->frame_ctl) & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { case IEEE80211_FCTL_TODS: @@ -490,9 +551,9 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, return 0; } -#if WIRELESS_EXT >= 18 static void ieee80211_michael_mic_failure(struct net_device *dev, - struct ieee80211_hdr *hdr, int keyidx) + struct ieee80211_hdr_4addr *hdr, + int keyidx) { union iwreq_data wrqu; struct iw_michaelmicfailure ev; @@ -510,28 +571,6 @@ static void ieee80211_michael_mic_failure(struct net_device *dev, wrqu.data.length = sizeof(ev); wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *)&ev); } -#elif WIRELESS_EXT >= 15 -static void ieee80211_michael_mic_failure(struct net_device *dev, - struct ieee80211_hdr *hdr, int keyidx) -{ - union iwreq_data wrqu; - char buf[128]; - - /* TODO: needed parameters: count, keyid, key type, TSC */ - sprintf(buf, "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr=" - MAC_FMT ")", keyidx, hdr->addr1[0] & 0x01 ? "broad" : "uni", - MAC_ARG(hdr->addr2)); - memset(&wrqu, 0, sizeof(wrqu)); - wrqu.data.length = strlen(buf); - wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); -} -#else /* WIRELESS_EXT >= 15 */ -static inline void ieee80211_michael_mic_failure(struct net_device *dev, - struct ieee80211_hdr *hdr, - int keyidx) -{ -} -#endif /* WIRELESS_EXT >= 15 */ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx, int hdr_len, void *priv) @@ -547,8 +586,8 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx, skb->data + hdr_len, skb->len - 8 - hdr_len, mic)) return -1; if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) { - struct ieee80211_hdr *hdr; - hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_hdr_4addr *hdr; + hdr = (struct ieee80211_hdr_4addr *)skb->data; printk(KERN_DEBUG "%s: Michael MIC verification failed for " "MSDU from " MAC_FMT " keyidx=%d\n", skb->dev ? skb->dev->name : "N/A", MAC_ARG(hdr->addr2), @@ -654,19 +693,22 @@ static char *ieee80211_tkip_print_stats(char *p, void *priv) } static struct ieee80211_crypto_ops ieee80211_crypt_tkip = { - .name = "TKIP", - .init = ieee80211_tkip_init, - .deinit = ieee80211_tkip_deinit, - .encrypt_mpdu = ieee80211_tkip_encrypt, - .decrypt_mpdu = ieee80211_tkip_decrypt, - .encrypt_msdu = ieee80211_michael_mic_add, - .decrypt_msdu = ieee80211_michael_mic_verify, - .set_key = ieee80211_tkip_set_key, - .get_key = ieee80211_tkip_get_key, - .print_stats = ieee80211_tkip_print_stats, - .extra_prefix_len = 4 + 4, /* IV + ExtIV */ - .extra_postfix_len = 8 + 4, /* MIC + ICV */ - .owner = THIS_MODULE, + .name = "TKIP", + .init = ieee80211_tkip_init, + .deinit = ieee80211_tkip_deinit, + .encrypt_mpdu = ieee80211_tkip_encrypt, + .decrypt_mpdu = ieee80211_tkip_decrypt, + .encrypt_msdu = ieee80211_michael_mic_add, + .decrypt_msdu = ieee80211_michael_mic_verify, + .set_key = ieee80211_tkip_set_key, + .get_key = ieee80211_tkip_get_key, + .print_stats = ieee80211_tkip_print_stats, + .extra_mpdu_prefix_len = 4 + 4, /* IV + ExtIV */ + .extra_mpdu_postfix_len = 4, /* ICV */ + .extra_msdu_postfix_len = 8, /* MIC */ + .get_flags = ieee80211_tkip_get_flags, + .set_flags = ieee80211_tkip_set_flags, + .owner = THIS_MODULE, }; static int __init ieee80211_crypto_tkip_init(void) diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c index b4d2514a0902..7c08ed2f2628 100644 --- a/net/ieee80211/ieee80211_crypt_wep.c +++ b/net/ieee80211/ieee80211_crypt_wep.c @@ -229,19 +229,19 @@ static char *prism2_wep_print_stats(char *p, void *priv) } static struct ieee80211_crypto_ops ieee80211_crypt_wep = { - .name = "WEP", - .init = prism2_wep_init, - .deinit = prism2_wep_deinit, - .encrypt_mpdu = prism2_wep_encrypt, - .decrypt_mpdu = prism2_wep_decrypt, - .encrypt_msdu = NULL, - .decrypt_msdu = NULL, - .set_key = prism2_wep_set_key, - .get_key = prism2_wep_get_key, - .print_stats = prism2_wep_print_stats, - .extra_prefix_len = 4, /* IV */ - .extra_postfix_len = 4, /* ICV */ - .owner = THIS_MODULE, + .name = "WEP", + .init = prism2_wep_init, + .deinit = prism2_wep_deinit, + .encrypt_mpdu = prism2_wep_encrypt, + .decrypt_mpdu = prism2_wep_decrypt, + .encrypt_msdu = NULL, + .decrypt_msdu = NULL, + .set_key = prism2_wep_set_key, + .get_key = prism2_wep_get_key, + .print_stats = prism2_wep_print_stats, + .extra_mpdu_prefix_len = 4, /* IV */ + .extra_mpdu_postfix_len = 4, /* ICV */ + .owner = THIS_MODULE, }; static int __init ieee80211_crypto_wep_init(void) diff --git a/net/ieee80211/ieee80211_geo.c b/net/ieee80211/ieee80211_geo.c new file mode 100644 index 000000000000..c4b54ef8f6d5 --- /dev/null +++ b/net/ieee80211/ieee80211_geo.c @@ -0,0 +1,141 @@ +/****************************************************************************** + + Copyright(c) 2005 Intel Corporation. All rights reserved. + + This program is free software; you can redistribute it and/or modify it + under the terms of version 2 of the GNU General Public License as + published by the Free Software Foundation. + + This program is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., 59 + Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + The full GNU General Public License is included in this distribution in the + file called LICENSE. + + Contact Information: + James P. Ketrenos + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +******************************************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +int ieee80211_is_valid_channel(struct ieee80211_device *ieee, u8 channel) +{ + int i; + + /* Driver needs to initialize the geography map before using + * these helper functions */ + BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); + + if (ieee->freq_band & IEEE80211_24GHZ_BAND) + for (i = 0; i < ieee->geo.bg_channels; i++) + /* NOTE: If G mode is currently supported but + * this is a B only channel, we don't see it + * as valid. */ + if ((ieee->geo.bg[i].channel == channel) && + (!(ieee->mode & IEEE_G) || + !(ieee->geo.bg[i].flags & IEEE80211_CH_B_ONLY))) + return IEEE80211_24GHZ_BAND; + + if (ieee->freq_band & IEEE80211_52GHZ_BAND) + for (i = 0; i < ieee->geo.a_channels; i++) + if (ieee->geo.a[i].channel == channel) + return IEEE80211_52GHZ_BAND; + + return 0; +} + +int ieee80211_channel_to_index(struct ieee80211_device *ieee, u8 channel) +{ + int i; + + /* Driver needs to initialize the geography map before using + * these helper functions */ + BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); + + if (ieee->freq_band & IEEE80211_24GHZ_BAND) + for (i = 0; i < ieee->geo.bg_channels; i++) + if (ieee->geo.bg[i].channel == channel) + return i; + + if (ieee->freq_band & IEEE80211_52GHZ_BAND) + for (i = 0; i < ieee->geo.a_channels; i++) + if (ieee->geo.a[i].channel == channel) + return i; + + return -1; +} + +u8 ieee80211_freq_to_channel(struct ieee80211_device * ieee, u32 freq) +{ + int i; + + /* Driver needs to initialize the geography map before using + * these helper functions */ + BUG_ON(ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0); + + freq /= 100000; + + if (ieee->freq_band & IEEE80211_24GHZ_BAND) + for (i = 0; i < ieee->geo.bg_channels; i++) + if (ieee->geo.bg[i].freq == freq) + return ieee->geo.bg[i].channel; + + if (ieee->freq_band & IEEE80211_52GHZ_BAND) + for (i = 0; i < ieee->geo.a_channels; i++) + if (ieee->geo.a[i].freq == freq) + return ieee->geo.a[i].channel; + + return 0; +} + +int ieee80211_set_geo(struct ieee80211_device *ieee, + const struct ieee80211_geo *geo) +{ + memcpy(ieee->geo.name, geo->name, 3); + ieee->geo.name[3] = '\0'; + ieee->geo.bg_channels = geo->bg_channels; + ieee->geo.a_channels = geo->a_channels; + memcpy(ieee->geo.bg, geo->bg, geo->bg_channels * + sizeof(struct ieee80211_channel)); + memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels * + sizeof(struct ieee80211_channel)); + return 0; +} + +const struct ieee80211_geo *ieee80211_get_geo(struct ieee80211_device *ieee) +{ + return &ieee->geo; +} + +EXPORT_SYMBOL(ieee80211_is_valid_channel); +EXPORT_SYMBOL(ieee80211_freq_to_channel); +EXPORT_SYMBOL(ieee80211_channel_to_index); +EXPORT_SYMBOL(ieee80211_set_geo); +EXPORT_SYMBOL(ieee80211_get_geo); diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c index 03a47343ddc7..f66d792cd204 100644 --- a/net/ieee80211/ieee80211_module.c +++ b/net/ieee80211/ieee80211_module.c @@ -1,6 +1,6 @@ /******************************************************************************* - Copyright(c) 2004 Intel Corporation. All rights reserved. + Copyright(c) 2004-2005 Intel Corporation. All rights reserved. Portions of this file are based on the WEP enablement code provided by the Host AP project hostap-drivers v0.1.3 @@ -53,12 +53,15 @@ #include -MODULE_DESCRIPTION("802.11 data/management/control stack"); -MODULE_AUTHOR - ("Copyright (C) 2004 Intel Corporation "); -MODULE_LICENSE("GPL"); +#define DRV_DESCRIPTION "802.11 data/management/control stack" +#define DRV_NAME "ieee80211" +#define DRV_VERSION IEEE80211_VERSION +#define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation " -#define DRV_NAME "ieee80211" +MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR(DRV_COPYRIGHT); +MODULE_LICENSE("GPL"); static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee) { @@ -126,26 +129,34 @@ struct net_device *alloc_ieee80211(int sizeof_priv) /* Default fragmentation threshold is maximum payload size */ ieee->fts = DEFAULT_FTS; + ieee->rts = DEFAULT_FTS; ieee->scan_age = DEFAULT_MAX_SCAN_AGE; ieee->open_wep = 1; /* Default to enabling full open WEP with host based encrypt/decrypt */ ieee->host_encrypt = 1; ieee->host_decrypt = 1; + ieee->host_mc_decrypt = 1; + + /* Host fragementation in Open mode. Default is enabled. + * Note: host fragmentation is always enabled if host encryption + * is enabled. For cards can do hardware encryption, they must do + * hardware fragmentation as well. So we don't need a variable + * like host_enc_frag. */ + ieee->host_open_frag = 1; ieee->ieee802_1x = 1; /* Default to supporting 802.1x */ INIT_LIST_HEAD(&ieee->crypt_deinit_list); init_timer(&ieee->crypt_deinit_timer); ieee->crypt_deinit_timer.data = (unsigned long)ieee; ieee->crypt_deinit_timer.function = ieee80211_crypt_deinit_handler; + ieee->crypt_quiesced = 0; spin_lock_init(&ieee->lock); ieee->wpa_enabled = 0; - ieee->tkip_countermeasures = 0; ieee->drop_unencrypted = 0; ieee->privacy_invoked = 0; - ieee->ieee802_1x = 1; return dev; @@ -161,6 +172,7 @@ void free_ieee80211(struct net_device *dev) int i; + ieee80211_crypt_quiescing(ieee); del_timer_sync(&ieee->crypt_deinit_timer); ieee80211_crypt_deinit_entries(ieee, 1); @@ -195,42 +207,30 @@ static int show_debug_level(char *page, char **start, off_t offset, static int store_debug_level(struct file *file, const char __user * buffer, unsigned long count, void *data) { - char buf[] = "0x00000000"; - char *p = (char *)buf; + char buf[] = "0x00000000\n"; + unsigned long len = min((unsigned long)sizeof(buf) - 1, count); unsigned long val; - if (count > sizeof(buf) - 1) - count = sizeof(buf) - 1; - - if (copy_from_user(buf, buffer, count)) + if (copy_from_user(buf, buffer, len)) return count; - buf[count] = 0; - /* - * what a FPOS... What, sscanf(buf, "%i", &val) would be too - * scary? - */ - if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') { - p++; - if (p[0] == 'x' || p[0] == 'X') - p++; - val = simple_strtoul(p, &p, 16); - } else - val = simple_strtoul(p, &p, 10); - if (p == buf) + buf[len] = 0; + if (sscanf(buf, "%li", &val) != 1) printk(KERN_INFO DRV_NAME ": %s is not in hex or decimal form.\n", buf); else ieee80211_debug_level = val; - return strlen(buf); + return strnlen(buf, len); } +#endif /* CONFIG_IEEE80211_DEBUG */ static int __init ieee80211_init(void) { +#ifdef CONFIG_IEEE80211_DEBUG struct proc_dir_entry *e; ieee80211_debug_level = debug; - ieee80211_proc = create_proc_entry(DRV_NAME, S_IFDIR, proc_net); + ieee80211_proc = proc_mkdir(DRV_NAME, proc_net); if (ieee80211_proc == NULL) { IEEE80211_ERROR("Unable to create " DRV_NAME " proc directory\n"); @@ -246,26 +246,33 @@ static int __init ieee80211_init(void) e->read_proc = show_debug_level; e->write_proc = store_debug_level; e->data = NULL; +#endif /* CONFIG_IEEE80211_DEBUG */ + + printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); + printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); return 0; } static void __exit ieee80211_exit(void) { +#ifdef CONFIG_IEEE80211_DEBUG if (ieee80211_proc) { remove_proc_entry("debug_level", ieee80211_proc); remove_proc_entry(DRV_NAME, proc_net); ieee80211_proc = NULL; } +#endif /* CONFIG_IEEE80211_DEBUG */ } +#ifdef CONFIG_IEEE80211_DEBUG #include module_param(debug, int, 0444); MODULE_PARM_DESC(debug, "debug output mask"); +#endif /* CONFIG_IEEE80211_DEBUG */ module_exit(ieee80211_exit); module_init(ieee80211_init); -#endif const char *escape_essid(const char *essid, u8 essid_len) { diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c index f7dcd854139e..ce694cf5c160 100644 --- a/net/ieee80211/ieee80211_rx.c +++ b/net/ieee80211/ieee80211_rx.c @@ -5,7 +5,7 @@ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen * * Copyright (c) 2002-2003, Jouni Malinen - * Copyright (c) 2004, Intel Corporation + * Copyright (c) 2004-2005, Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -87,7 +87,7 @@ static struct ieee80211_frag_entry *ieee80211_frag_cache_find(struct /* Called only as a tasklet (software IRQ) */ static struct sk_buff *ieee80211_frag_cache_get(struct ieee80211_device *ieee, - struct ieee80211_hdr *hdr) + struct ieee80211_hdr_4addr *hdr) { struct sk_buff *skb = NULL; u16 sc; @@ -101,7 +101,7 @@ static struct sk_buff *ieee80211_frag_cache_get(struct ieee80211_device *ieee, if (frag == 0) { /* Reserve enough space to fit maximum frame length */ skb = dev_alloc_skb(ieee->dev->mtu + - sizeof(struct ieee80211_hdr) + + sizeof(struct ieee80211_hdr_4addr) + 8 /* LLC */ + 2 /* alignment */ + 8 /* WEP */ + ETH_ALEN /* WDS */ ); @@ -138,7 +138,7 @@ static struct sk_buff *ieee80211_frag_cache_get(struct ieee80211_device *ieee, /* Called only as a tasklet (software IRQ) */ static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee, - struct ieee80211_hdr *hdr) + struct ieee80211_hdr_4addr *hdr) { u16 sc; unsigned int seq; @@ -176,7 +176,7 @@ ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb, ieee->dev->name); return 0; /* - hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr *) + hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr_4addr *) skb->data);*/ } @@ -232,13 +232,13 @@ static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee, { struct net_device *dev = ieee->dev; u16 fc, ethertype; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_3addr *hdr; u8 *pos; if (skb->len < 24) return 0; - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr_3addr *)skb->data; fc = le16_to_cpu(hdr->frame_ctl); /* check that the frame is unicast frame to us */ @@ -271,26 +271,15 @@ static inline int ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_crypt_data *crypt) { - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_3addr *hdr; int res, hdrlen; if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL) return 0; - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr_3addr *)skb->data; hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); -#ifdef CONFIG_IEEE80211_CRYPT_TKIP - if (ieee->tkip_countermeasures && strcmp(crypt->ops->name, "TKIP") == 0) { - if (net_ratelimit()) { - printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " - "received packet from " MAC_FMT "\n", - ieee->dev->name, MAC_ARG(hdr->addr2)); - } - return -1; - } -#endif - atomic_inc(&crypt->refcnt); res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv); atomic_dec(&crypt->refcnt); @@ -314,13 +303,13 @@ ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, struct sk_buff *skb, int keyidx, struct ieee80211_crypt_data *crypt) { - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_3addr *hdr; int res, hdrlen; if (crypt == NULL || crypt->ops->decrypt_msdu == NULL) return 0; - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr_3addr *)skb->data; hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); atomic_inc(&crypt->refcnt); @@ -343,7 +332,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats) { struct net_device *dev = ieee->dev; - struct ieee80211_hdr *hdr; + struct ieee80211_hdr_4addr *hdr; size_t hdrlen; u16 fc, type, stype, sc; struct net_device_stats *stats; @@ -363,7 +352,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_crypt_data *crypt = NULL; int keyidx = 0; - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr_4addr *)skb->data; stats = &ieee->stats; if (skb->len < 10) { @@ -378,35 +367,51 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, frag = WLAN_GET_SEQ_FRAG(sc); hdrlen = ieee80211_get_hdrlen(fc); -#ifdef NOT_YET -#if WIRELESS_EXT > 15 /* Put this code here so that we avoid duplicating it in all * Rx paths. - Jean II */ #ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ /* If spy monitoring on */ - if (iface->spy_data.spy_number > 0) { + if (ieee->spy_data.spy_number > 0) { struct iw_quality wstats; - wstats.level = rx_stats->signal; - wstats.noise = rx_stats->noise; - wstats.updated = 6; /* No qual value */ + + wstats.updated = 0; + if (rx_stats->mask & IEEE80211_STATMASK_RSSI) { + wstats.level = rx_stats->rssi; + wstats.updated |= IW_QUAL_LEVEL_UPDATED; + } else + wstats.updated |= IW_QUAL_LEVEL_INVALID; + + if (rx_stats->mask & IEEE80211_STATMASK_NOISE) { + wstats.noise = rx_stats->noise; + wstats.updated |= IW_QUAL_NOISE_UPDATED; + } else + wstats.updated |= IW_QUAL_NOISE_INVALID; + + if (rx_stats->mask & IEEE80211_STATMASK_SIGNAL) { + wstats.qual = rx_stats->signal; + wstats.updated |= IW_QUAL_QUAL_UPDATED; + } else + wstats.updated |= IW_QUAL_QUAL_INVALID; + /* Update spy records */ - wireless_spy_update(dev, hdr->addr2, &wstats); + wireless_spy_update(ieee->dev, hdr->addr2, &wstats); } #endif /* IW_WIRELESS_SPY */ -#endif /* WIRELESS_EXT > 15 */ + +#ifdef NOT_YET hostap_update_rx_stats(local->ap, hdr, rx_stats); #endif -#if WIRELESS_EXT > 15 if (ieee->iw_mode == IW_MODE_MONITOR) { ieee80211_monitor_rx(ieee, skb, rx_stats); stats->rx_packets++; stats->rx_bytes += skb->len; return 1; } -#endif - if (ieee->host_decrypt) { + if ((is_multicast_ether_addr(hdr->addr1) || + is_broadcast_ether_addr(hdr->addr2)) ? ieee->host_mc_decrypt : + ieee->host_decrypt) { int idx = 0; if (skb->len >= hdrlen + 3) idx = skb->data[hdrlen + 3] >> 6; @@ -531,6 +536,9 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, /* Nullfunc frames may have PS-bit set, so they must be passed to * hostap_handle_sta_rx() before being dropped here. */ + + stype &= ~IEEE80211_STYPE_QOS_DATA; + if (stype != IEEE80211_STYPE_DATA && stype != IEEE80211_STYPE_DATA_CFACK && stype != IEEE80211_STYPE_DATA_CFPOLL && @@ -549,7 +557,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) goto rx_dropped; - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr_4addr *)skb->data; /* skb: hdr + (possibly fragmented) plaintext payload */ // PR: FIXME: hostap has additional conditions in the "if" below: @@ -603,7 +611,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, /* this was the last fragment and the frame will be * delivered, so remove skb from fragment cache */ skb = frag_skb; - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr_4addr *)skb->data; ieee80211_frag_cache_invalidate(ieee, hdr); } @@ -613,7 +621,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) goto rx_dropped; - hdr = (struct ieee80211_hdr *)skb->data; + hdr = (struct ieee80211_hdr_4addr *)skb->data; if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep) { if ( /*ieee->ieee802_1x && */ ieee80211_is_eapol_frame(ieee, skb)) { @@ -755,69 +763,179 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, #define MGMT_FRAME_FIXED_PART_LENGTH 0x24 -static inline int ieee80211_is_ofdm_rate(u8 rate) +static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 }; + +/* +* Make ther structure we read from the beacon packet has +* the right values +*/ +static int ieee80211_verify_qos_info(struct ieee80211_qos_information_element + *info_element, int sub_type) { - switch (rate & ~IEEE80211_BASIC_RATE_MASK) { - case IEEE80211_OFDM_RATE_6MB: - case IEEE80211_OFDM_RATE_9MB: - case IEEE80211_OFDM_RATE_12MB: - case IEEE80211_OFDM_RATE_18MB: - case IEEE80211_OFDM_RATE_24MB: - case IEEE80211_OFDM_RATE_36MB: - case IEEE80211_OFDM_RATE_48MB: - case IEEE80211_OFDM_RATE_54MB: - return 1; - } + + if (info_element->qui_subtype != sub_type) + return -1; + if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN)) + return -1; + if (info_element->qui_type != QOS_OUI_TYPE) + return -1; + if (info_element->version != QOS_VERSION_1) + return -1; + return 0; } -static inline int ieee80211_network_init(struct ieee80211_device *ieee, - struct ieee80211_probe_response - *beacon, - struct ieee80211_network *network, - struct ieee80211_rx_stats *stats) +/* + * Parse a QoS parameter element + */ +static int ieee80211_read_qos_param_element(struct ieee80211_qos_parameter_info + *element_param, struct ieee80211_info_element + *info_element) { + int ret = 0; + u16 size = sizeof(struct ieee80211_qos_parameter_info) - 2; + + if ((info_element == NULL) || (element_param == NULL)) + return -1; + + if (info_element->id == QOS_ELEMENT_ID && info_element->len == size) { + memcpy(element_param->info_element.qui, info_element->data, + info_element->len); + element_param->info_element.elementID = info_element->id; + element_param->info_element.length = info_element->len; + } else + ret = -1; + if (ret == 0) + ret = ieee80211_verify_qos_info(&element_param->info_element, + QOS_OUI_PARAM_SUB_TYPE); + return ret; +} + +/* + * Parse a QoS information element + */ +static int ieee80211_read_qos_info_element(struct + ieee80211_qos_information_element + *element_info, struct ieee80211_info_element + *info_element) +{ + int ret = 0; + u16 size = sizeof(struct ieee80211_qos_information_element) - 2; + + if (element_info == NULL) + return -1; + if (info_element == NULL) + return -1; + + if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) { + memcpy(element_info->qui, info_element->data, + info_element->len); + element_info->elementID = info_element->id; + element_info->length = info_element->len; + } else + ret = -1; + + if (ret == 0) + ret = ieee80211_verify_qos_info(element_info, + QOS_OUI_INFO_SUB_TYPE); + return ret; +} + +/* + * Write QoS parameters from the ac parameters. + */ +static int ieee80211_qos_convert_ac_to_parameters(struct + ieee80211_qos_parameter_info + *param_elm, struct + ieee80211_qos_parameters + *qos_param) +{ + int rc = 0; + int i; + struct ieee80211_qos_ac_parameter *ac_params; + u32 txop; + u8 cw_min; + u8 cw_max; + + for (i = 0; i < QOS_QUEUE_NUM; i++) { + ac_params = &(param_elm->ac_params_record[i]); + + qos_param->aifs[i] = (ac_params->aci_aifsn) & 0x0F; + qos_param->aifs[i] -= (qos_param->aifs[i] < 2) ? 0 : 2; + + cw_min = ac_params->ecw_min_max & 0x0F; + qos_param->cw_min[i] = (u16) ((1 << cw_min) - 1); + + cw_max = (ac_params->ecw_min_max & 0xF0) >> 4; + qos_param->cw_max[i] = (u16) ((1 << cw_max) - 1); + + qos_param->flag[i] = + (ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00; + + txop = le16_to_cpu(ac_params->tx_op_limit) * 32; + qos_param->tx_op_limit[i] = (u16) txop; + } + return rc; +} + +/* + * we have a generic data element which it may contain QoS information or + * parameters element. check the information element length to decide + * which type to read + */ +static int ieee80211_parse_qos_info_param_IE(struct ieee80211_info_element + *info_element, + struct ieee80211_network *network) +{ + int rc = 0; + struct ieee80211_qos_parameters *qos_param = NULL; + struct ieee80211_qos_information_element qos_info_element; + + rc = ieee80211_read_qos_info_element(&qos_info_element, info_element); + + if (rc == 0) { + network->qos_data.param_count = qos_info_element.ac_info & 0x0F; + network->flags |= NETWORK_HAS_QOS_INFORMATION; + } else { + struct ieee80211_qos_parameter_info param_element; + + rc = ieee80211_read_qos_param_element(¶m_element, + info_element); + if (rc == 0) { + qos_param = &(network->qos_data.parameters); + ieee80211_qos_convert_ac_to_parameters(¶m_element, + qos_param); + network->flags |= NETWORK_HAS_QOS_PARAMETERS; + network->qos_data.param_count = + param_element.info_element.ac_info & 0x0F; + } + } + + if (rc == 0) { + IEEE80211_DEBUG_QOS("QoS is supported\n"); + network->qos_data.supported = 1; + } + return rc; +} + +static int ieee80211_parse_info_param(struct ieee80211_info_element + *info_element, u16 length, + struct ieee80211_network *network) +{ + u8 i; #ifdef CONFIG_IEEE80211_DEBUG char rates_str[64]; char *p; #endif - struct ieee80211_info_element *info_element; - u16 left; - u8 i; - /* Pull out fixed field data */ - memcpy(network->bssid, beacon->header.addr3, ETH_ALEN); - network->capability = beacon->capability; - network->last_scanned = jiffies; - network->time_stamp[0] = beacon->time_stamp[0]; - network->time_stamp[1] = beacon->time_stamp[1]; - network->beacon_interval = beacon->beacon_interval; - /* Where to pull this? beacon->listen_interval; */ - network->listen_interval = 0x0A; - network->rates_len = network->rates_ex_len = 0; - network->last_associate = 0; - network->ssid_len = 0; - network->flags = 0; - network->atim_window = 0; - - if (stats->freq == IEEE80211_52GHZ_BAND) { - /* for A band (No DS info) */ - network->channel = stats->received_channel; - } else - network->flags |= NETWORK_HAS_CCK; - - network->wpa_ie_len = 0; - network->rsn_ie_len = 0; - - info_element = &beacon->info_element; - left = stats->len - ((void *)info_element - (void *)beacon); - while (left >= sizeof(struct ieee80211_info_element_hdr)) { - if (sizeof(struct ieee80211_info_element_hdr) + - info_element->len > left) { - IEEE80211_DEBUG_SCAN - ("SCAN: parse failed: info_element->len + 2 > left : info_element->len+2=%Zd left=%d.\n", - info_element->len + - sizeof(struct ieee80211_info_element), left); + while (length >= sizeof(*info_element)) { + if (sizeof(*info_element) + info_element->len > length) { + IEEE80211_DEBUG_MGMT("Info elem: parse failed: " + "info_element->len + 2 > left : " + "info_element->len+2=%zd left=%d, id=%d.\n", + info_element->len + + sizeof(*info_element), + length, info_element->id); return 1; } @@ -837,7 +955,7 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, memset(network->ssid + network->ssid_len, 0, IW_ESSID_MAX_SIZE - network->ssid_len); - IEEE80211_DEBUG_SCAN("MFIE_TYPE_SSID: '%s' len=%d.\n", + IEEE80211_DEBUG_MGMT("MFIE_TYPE_SSID: '%s' len=%d.\n", network->ssid, network->ssid_len); break; @@ -845,15 +963,14 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, #ifdef CONFIG_IEEE80211_DEBUG p = rates_str; #endif - network->rates_len = - min(info_element->len, MAX_RATES_LENGTH); + network->rates_len = min(info_element->len, + MAX_RATES_LENGTH); for (i = 0; i < network->rates_len; i++) { network->rates[i] = info_element->data[i]; #ifdef CONFIG_IEEE80211_DEBUG - p += snprintf(p, - sizeof(rates_str) - (p - - rates_str), - "%02X ", network->rates[i]); + p += snprintf(p, sizeof(rates_str) - + (p - rates_str), "%02X ", + network->rates[i]); #endif if (ieee80211_is_ofdm_rate (info_element->data[i])) { @@ -865,7 +982,7 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, } } - IEEE80211_DEBUG_SCAN("MFIE_TYPE_RATES: '%s' (%d)\n", + IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES: '%s' (%d)\n", rates_str, network->rates_len); break; @@ -873,15 +990,14 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, #ifdef CONFIG_IEEE80211_DEBUG p = rates_str; #endif - network->rates_ex_len = - min(info_element->len, MAX_RATES_EX_LENGTH); + network->rates_ex_len = min(info_element->len, + MAX_RATES_EX_LENGTH); for (i = 0; i < network->rates_ex_len; i++) { network->rates_ex[i] = info_element->data[i]; #ifdef CONFIG_IEEE80211_DEBUG - p += snprintf(p, - sizeof(rates_str) - (p - - rates_str), - "%02X ", network->rates[i]); + p += snprintf(p, sizeof(rates_str) - + (p - rates_str), "%02X ", + network->rates[i]); #endif if (ieee80211_is_ofdm_rate (info_element->data[i])) { @@ -893,40 +1009,51 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, } } - IEEE80211_DEBUG_SCAN("MFIE_TYPE_RATES_EX: '%s' (%d)\n", + IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES_EX: '%s' (%d)\n", rates_str, network->rates_ex_len); break; case MFIE_TYPE_DS_SET: - IEEE80211_DEBUG_SCAN("MFIE_TYPE_DS_SET: %d\n", + IEEE80211_DEBUG_MGMT("MFIE_TYPE_DS_SET: %d\n", info_element->data[0]); - if (stats->freq == IEEE80211_24GHZ_BAND) - network->channel = info_element->data[0]; + network->channel = info_element->data[0]; break; case MFIE_TYPE_FH_SET: - IEEE80211_DEBUG_SCAN("MFIE_TYPE_FH_SET: ignored\n"); + IEEE80211_DEBUG_MGMT("MFIE_TYPE_FH_SET: ignored\n"); break; case MFIE_TYPE_CF_SET: - IEEE80211_DEBUG_SCAN("MFIE_TYPE_CF_SET: ignored\n"); + IEEE80211_DEBUG_MGMT("MFIE_TYPE_CF_SET: ignored\n"); break; case MFIE_TYPE_TIM: - IEEE80211_DEBUG_SCAN("MFIE_TYPE_TIM: ignored\n"); + IEEE80211_DEBUG_MGMT("MFIE_TYPE_TIM: ignored\n"); + break; + + case MFIE_TYPE_ERP_INFO: + network->erp_value = info_element->data[0]; + IEEE80211_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n", + network->erp_value); break; case MFIE_TYPE_IBSS_SET: - IEEE80211_DEBUG_SCAN("MFIE_TYPE_IBSS_SET: ignored\n"); + network->atim_window = info_element->data[0]; + IEEE80211_DEBUG_MGMT("MFIE_TYPE_IBSS_SET: %d\n", + network->atim_window); break; case MFIE_TYPE_CHALLENGE: - IEEE80211_DEBUG_SCAN("MFIE_TYPE_CHALLENGE: ignored\n"); + IEEE80211_DEBUG_MGMT("MFIE_TYPE_CHALLENGE: ignored\n"); break; case MFIE_TYPE_GENERIC: - IEEE80211_DEBUG_SCAN("MFIE_TYPE_GENERIC: %d bytes\n", + IEEE80211_DEBUG_MGMT("MFIE_TYPE_GENERIC: %d bytes\n", info_element->len); + if (!ieee80211_parse_qos_info_param_IE(info_element, + network)) + break; + if (info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x50 && @@ -940,7 +1067,7 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, break; case MFIE_TYPE_RSN: - IEEE80211_DEBUG_SCAN("MFIE_TYPE_RSN: %d bytes\n", + IEEE80211_DEBUG_MGMT("MFIE_TYPE_RSN: %d bytes\n", info_element->len); network->rsn_ie_len = min(info_element->len + 2, MAX_WPA_IE_LEN); @@ -948,18 +1075,127 @@ static inline int ieee80211_network_init(struct ieee80211_device *ieee, network->rsn_ie_len); break; + case MFIE_TYPE_QOS_PARAMETER: + printk(KERN_ERR + "QoS Error need to parse QOS_PARAMETER IE\n"); + break; + default: - IEEE80211_DEBUG_SCAN("unsupported IE %d\n", + IEEE80211_DEBUG_MGMT("unsupported IE %d\n", info_element->id); break; } - left -= sizeof(struct ieee80211_info_element_hdr) + - info_element->len; - info_element = (struct ieee80211_info_element *) - &info_element->data[info_element->len]; + length -= sizeof(*info_element) + info_element->len; + info_element = + (struct ieee80211_info_element *)&info_element-> + data[info_element->len]; } + return 0; +} + +static int ieee80211_handle_assoc_resp(struct ieee80211_device *ieee, struct ieee80211_assoc_response + *frame, struct ieee80211_rx_stats *stats) +{ + struct ieee80211_network network_resp; + struct ieee80211_network *network = &network_resp; + struct net_device *dev = ieee->dev; + + network->flags = 0; + network->qos_data.active = 0; + network->qos_data.supported = 0; + network->qos_data.param_count = 0; + network->qos_data.old_param_count = 0; + + //network->atim_window = le16_to_cpu(frame->aid) & (0x3FFF); + network->atim_window = le16_to_cpu(frame->aid); + network->listen_interval = le16_to_cpu(frame->status); + memcpy(network->bssid, frame->header.addr3, ETH_ALEN); + network->capability = le16_to_cpu(frame->capability); + network->last_scanned = jiffies; + network->rates_len = network->rates_ex_len = 0; + network->last_associate = 0; + network->ssid_len = 0; + network->erp_value = + (network->capability & WLAN_CAPABILITY_IBSS) ? 0x3 : 0x0; + + if (stats->freq == IEEE80211_52GHZ_BAND) { + /* for A band (No DS info) */ + network->channel = stats->received_channel; + } else + network->flags |= NETWORK_HAS_CCK; + + network->wpa_ie_len = 0; + network->rsn_ie_len = 0; + + if (ieee80211_parse_info_param + (frame->info_element, stats->len - sizeof(*frame), network)) + return 1; + + network->mode = 0; + if (stats->freq == IEEE80211_52GHZ_BAND) + network->mode = IEEE_A; + else { + if (network->flags & NETWORK_HAS_OFDM) + network->mode |= IEEE_G; + if (network->flags & NETWORK_HAS_CCK) + network->mode |= IEEE_B; + } + + if (ieee80211_is_empty_essid(network->ssid, network->ssid_len)) + network->flags |= NETWORK_EMPTY_ESSID; + + memcpy(&network->stats, stats, sizeof(network->stats)); + + if (ieee->handle_assoc_response != NULL) + ieee->handle_assoc_response(dev, frame, network); + + return 0; +} + +/***************************************************/ + +static inline int ieee80211_network_init(struct ieee80211_device *ieee, struct ieee80211_probe_response + *beacon, + struct ieee80211_network *network, + struct ieee80211_rx_stats *stats) +{ + network->qos_data.active = 0; + network->qos_data.supported = 0; + network->qos_data.param_count = 0; + network->qos_data.old_param_count = 0; + + /* Pull out fixed field data */ + memcpy(network->bssid, beacon->header.addr3, ETH_ALEN); + network->capability = le16_to_cpu(beacon->capability); + network->last_scanned = jiffies; + network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]); + network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]); + network->beacon_interval = le16_to_cpu(beacon->beacon_interval); + /* Where to pull this? beacon->listen_interval; */ + network->listen_interval = 0x0A; + network->rates_len = network->rates_ex_len = 0; + network->last_associate = 0; + network->ssid_len = 0; + network->flags = 0; + network->atim_window = 0; + network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ? + 0x3 : 0x0; + + if (stats->freq == IEEE80211_52GHZ_BAND) { + /* for A band (No DS info) */ + network->channel = stats->received_channel; + } else + network->flags |= NETWORK_HAS_CCK; + + network->wpa_ie_len = 0; + network->rsn_ie_len = 0; + + if (ieee80211_parse_info_param + (beacon->info_element, stats->len - sizeof(*beacon), network)) + return 1; + network->mode = 0; if (stats->freq == IEEE80211_52GHZ_BAND) network->mode = IEEE_A; @@ -1002,6 +1238,9 @@ static inline int is_same_network(struct ieee80211_network *src, static inline void update_network(struct ieee80211_network *dst, struct ieee80211_network *src) { + int qos_active; + u8 old_param; + memcpy(&dst->stats, &src->stats, sizeof(struct ieee80211_rx_stats)); dst->capability = src->capability; memcpy(dst->rates, src->rates, src->rates_len); @@ -1017,6 +1256,7 @@ static inline void update_network(struct ieee80211_network *dst, dst->beacon_interval = src->beacon_interval; dst->listen_interval = src->listen_interval; dst->atim_window = src->atim_window; + dst->erp_value = src->erp_value; memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len); dst->wpa_ie_len = src->wpa_ie_len; @@ -1024,22 +1264,48 @@ static inline void update_network(struct ieee80211_network *dst, dst->rsn_ie_len = src->rsn_ie_len; dst->last_scanned = jiffies; + qos_active = src->qos_data.active; + old_param = dst->qos_data.old_param_count; + if (dst->flags & NETWORK_HAS_QOS_MASK) + memcpy(&dst->qos_data, &src->qos_data, + sizeof(struct ieee80211_qos_data)); + else { + dst->qos_data.supported = src->qos_data.supported; + dst->qos_data.param_count = src->qos_data.param_count; + } + + if (dst->qos_data.supported == 1) { + if (dst->ssid_len) + IEEE80211_DEBUG_QOS + ("QoS the network %s is QoS supported\n", + dst->ssid); + else + IEEE80211_DEBUG_QOS + ("QoS the network is QoS supported\n"); + } + dst->qos_data.active = qos_active; + dst->qos_data.old_param_count = old_param; + /* dst->last_associate is not overwritten */ } +static inline int is_beacon(int fc) +{ + return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON); +} + static inline void ieee80211_process_probe_response(struct ieee80211_device - *ieee, - struct + *ieee, struct ieee80211_probe_response - *beacon, - struct ieee80211_rx_stats + *beacon, struct ieee80211_rx_stats *stats) { + struct net_device *dev = ieee->dev; struct ieee80211_network network; struct ieee80211_network *target; struct ieee80211_network *oldest = NULL; #ifdef CONFIG_IEEE80211_DEBUG - struct ieee80211_info_element *info_element = &beacon->info_element; + struct ieee80211_info_element *info_element = beacon->info_element; #endif unsigned long flags; @@ -1070,10 +1336,10 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device escape_essid(info_element->data, info_element->len), MAC_ARG(beacon->header.addr3), - WLAN_FC_GET_STYPE(beacon->header. - frame_ctl) == - IEEE80211_STYPE_PROBE_RESP ? - "PROBE RESPONSE" : "BEACON"); + is_beacon(le16_to_cpu + (beacon->header. + frame_ctl)) ? + "BEACON" : "PROBE RESPONSE"); return; } @@ -1122,10 +1388,10 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device escape_essid(network.ssid, network.ssid_len), MAC_ARG(network.bssid), - WLAN_FC_GET_STYPE(beacon->header. - frame_ctl) == - IEEE80211_STYPE_PROBE_RESP ? - "PROBE RESPONSE" : "BEACON"); + is_beacon(le16_to_cpu + (beacon->header. + frame_ctl)) ? + "BEACON" : "PROBE RESPONSE"); #endif memcpy(target, &network, sizeof(*target)); list_add_tail(&target->list, &ieee->network_list); @@ -1134,34 +1400,60 @@ static inline void ieee80211_process_probe_response(struct ieee80211_device escape_essid(target->ssid, target->ssid_len), MAC_ARG(target->bssid), - WLAN_FC_GET_STYPE(beacon->header. - frame_ctl) == - IEEE80211_STYPE_PROBE_RESP ? - "PROBE RESPONSE" : "BEACON"); + is_beacon(le16_to_cpu + (beacon->header. + frame_ctl)) ? + "BEACON" : "PROBE RESPONSE"); update_network(target, &network); } spin_unlock_irqrestore(&ieee->lock, flags); + + if (is_beacon(le16_to_cpu(beacon->header.frame_ctl))) { + if (ieee->handle_beacon != NULL) + ieee->handle_beacon(dev, beacon, &network); + } else { + if (ieee->handle_probe_response != NULL) + ieee->handle_probe_response(dev, beacon, &network); + } } void ieee80211_rx_mgt(struct ieee80211_device *ieee, - struct ieee80211_hdr *header, + struct ieee80211_hdr_4addr *header, struct ieee80211_rx_stats *stats) { - switch (WLAN_FC_GET_STYPE(header->frame_ctl)) { + switch (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl))) { case IEEE80211_STYPE_ASSOC_RESP: IEEE80211_DEBUG_MGMT("received ASSOCIATION RESPONSE (%d)\n", - WLAN_FC_GET_STYPE(header->frame_ctl)); + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); + ieee80211_handle_assoc_resp(ieee, + (struct ieee80211_assoc_response *) + header, stats); break; case IEEE80211_STYPE_REASSOC_RESP: IEEE80211_DEBUG_MGMT("received REASSOCIATION RESPONSE (%d)\n", - WLAN_FC_GET_STYPE(header->frame_ctl)); + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); + break; + + case IEEE80211_STYPE_PROBE_REQ: + IEEE80211_DEBUG_MGMT("recieved auth (%d)\n", + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); + + if (ieee->handle_probe_request != NULL) + ieee->handle_probe_request(ieee->dev, + (struct + ieee80211_probe_request *) + header, stats); break; case IEEE80211_STYPE_PROBE_RESP: IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n", - WLAN_FC_GET_STYPE(header->frame_ctl)); + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); IEEE80211_DEBUG_SCAN("Probe response\n"); ieee80211_process_probe_response(ieee, (struct @@ -1171,20 +1463,46 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee, case IEEE80211_STYPE_BEACON: IEEE80211_DEBUG_MGMT("received BEACON (%d)\n", - WLAN_FC_GET_STYPE(header->frame_ctl)); + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); IEEE80211_DEBUG_SCAN("Beacon\n"); ieee80211_process_probe_response(ieee, (struct ieee80211_probe_response *) header, stats); break; + case IEEE80211_STYPE_AUTH: + IEEE80211_DEBUG_MGMT("recieved auth (%d)\n", + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); + + if (ieee->handle_auth != NULL) + ieee->handle_auth(ieee->dev, + (struct ieee80211_auth *)header); + break; + + case IEEE80211_STYPE_DISASSOC: + if (ieee->handle_disassoc != NULL) + ieee->handle_disassoc(ieee->dev, + (struct ieee80211_disassoc *) + header); + break; + + case IEEE80211_STYPE_DEAUTH: + printk("DEAUTH from AP\n"); + if (ieee->handle_deauth != NULL) + ieee->handle_deauth(ieee->dev, (struct ieee80211_auth *) + header); + break; default: IEEE80211_DEBUG_MGMT("received UNKNOWN (%d)\n", - WLAN_FC_GET_STYPE(header->frame_ctl)); + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); IEEE80211_WARNING("%s: Unknown management packet: %d\n", ieee->dev->name, - WLAN_FC_GET_STYPE(header->frame_ctl)); + WLAN_FC_GET_STYPE(le16_to_cpu + (header->frame_ctl))); break; } } diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index c9aaff3fea1e..95ccbadbf55b 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved. + Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as @@ -128,7 +128,7 @@ payload of each frame is reduced to 492 bytes. static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 }; static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; -static inline int ieee80211_put_snap(u8 * data, u16 h_proto) +static inline int ieee80211_copy_snap(u8 * data, u16 h_proto) { struct ieee80211_snap_hdr *snap; u8 *oui; @@ -157,31 +157,14 @@ static inline int ieee80211_encrypt_fragment(struct ieee80211_device *ieee, struct ieee80211_crypt_data *crypt = ieee->crypt[ieee->tx_keyidx]; int res; -#ifdef CONFIG_IEEE80211_CRYPT_TKIP - struct ieee80211_hdr *header; - - if (ieee->tkip_countermeasures && - crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) { - header = (struct ieee80211_hdr *)frag->data; - if (net_ratelimit()) { - printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " - "TX packet to " MAC_FMT "\n", - ieee->dev->name, MAC_ARG(header->addr1)); - } + if (crypt == NULL) return -1; - } -#endif + /* To encrypt, frame format is: * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */ - - // PR: FIXME: Copied from hostap. Check fragmentation/MSDU/MPDU encryption. - /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so - * call both MSDU and MPDU encryption functions from here. */ atomic_inc(&crypt->refcnt); res = 0; - if (crypt->ops->encrypt_msdu) - res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv); - if (res == 0 && crypt->ops->encrypt_mpdu) + if (crypt->ops && crypt->ops->encrypt_mpdu) res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv); atomic_dec(&crypt->refcnt); @@ -207,7 +190,7 @@ void ieee80211_txb_free(struct ieee80211_txb *txb) } static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, - int gfp_mask) + int headroom, gfp_t gfp_mask) { struct ieee80211_txb *txb; int i; @@ -221,11 +204,13 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, txb->frag_size = txb_size; for (i = 0; i < nr_frags; i++) { - txb->fragments[i] = dev_alloc_skb(txb_size); + txb->fragments[i] = __dev_alloc_skb(txb_size + headroom, + gfp_mask); if (unlikely(!txb->fragments[i])) { i--; break; } + skb_reserve(txb->fragments[i], headroom); } if (unlikely(i != nr_frags)) { while (i >= 0) @@ -236,25 +221,31 @@ static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, return txb; } -/* SKBs are added to the ieee->tx_queue. */ +/* Incoming skb is converted to a txb which consists of + * a block of 802.11 fragment packets (stored as skbs) */ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_device *ieee = netdev_priv(dev); struct ieee80211_txb *txb = NULL; - struct ieee80211_hdr *frag_hdr; - int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size; + struct ieee80211_hdr_3addr *frag_hdr; + int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size, + rts_required; unsigned long flags; struct net_device_stats *stats = &ieee->stats; - int ether_type, encrypt; + int ether_type, encrypt, host_encrypt, host_encrypt_msdu, host_build_iv; int bytes, fc, hdr_len; struct sk_buff *skb_frag; - struct ieee80211_hdr header = { /* Ensure zero initialized */ + struct ieee80211_hdr_3addr header = { /* Ensure zero initialized */ .duration_id = 0, .seq_ctl = 0 }; u8 dest[ETH_ALEN], src[ETH_ALEN]; - struct ieee80211_crypt_data *crypt; + int priority = skb->priority; + int snapped = 0; + + if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority)) + return NETDEV_TX_BUSY; spin_lock_irqsave(&ieee->lock, flags); @@ -276,7 +267,11 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) crypt = ieee->crypt[ieee->tx_keyidx]; encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) && - ieee->host_encrypt && crypt && crypt->ops; + ieee->sec.encrypt; + + host_encrypt = ieee->host_encrypt && encrypt && crypt; + host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt; + host_build_iv = ieee->host_build_iv && encrypt && crypt; if (!encrypt && ieee->ieee802_1x && ieee->drop_unencrypted && ether_type != ETH_P_PAE) { @@ -285,8 +280,8 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) } /* Save source and destination addresses */ - memcpy(&dest, skb->data, ETH_ALEN); - memcpy(&src, skb->data + ETH_ALEN, ETH_ALEN); + memcpy(dest, skb->data, ETH_ALEN); + memcpy(src, skb->data + ETH_ALEN, ETH_ALEN); /* Advance the SKB to the start of the payload */ skb_pull(skb, sizeof(struct ethhdr)); @@ -294,7 +289,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) /* Determine total amount of storage required for TXB packets */ bytes = skb->len + SNAP_SIZE + sizeof(u16); - if (encrypt) + if (host_encrypt) fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | IEEE80211_FCTL_PROTECTED; else @@ -302,70 +297,144 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) if (ieee->iw_mode == IW_MODE_INFRA) { fc |= IEEE80211_FCTL_TODS; - /* To DS: Addr1 = BSSID, Addr2 = SA, - Addr3 = DA */ - memcpy(&header.addr1, ieee->bssid, ETH_ALEN); - memcpy(&header.addr2, &src, ETH_ALEN); - memcpy(&header.addr3, &dest, ETH_ALEN); + /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ + memcpy(header.addr1, ieee->bssid, ETH_ALEN); + memcpy(header.addr2, src, ETH_ALEN); + memcpy(header.addr3, dest, ETH_ALEN); } else if (ieee->iw_mode == IW_MODE_ADHOC) { - /* not From/To DS: Addr1 = DA, Addr2 = SA, - Addr3 = BSSID */ - memcpy(&header.addr1, dest, ETH_ALEN); - memcpy(&header.addr2, src, ETH_ALEN); - memcpy(&header.addr3, ieee->bssid, ETH_ALEN); + /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ + memcpy(header.addr1, dest, ETH_ALEN); + memcpy(header.addr2, src, ETH_ALEN); + memcpy(header.addr3, ieee->bssid, ETH_ALEN); } header.frame_ctl = cpu_to_le16(fc); hdr_len = IEEE80211_3ADDR_LEN; - /* Determine fragmentation size based on destination (multicast - * and broadcast are not fragmented) */ - if (is_multicast_ether_addr(dest) || is_broadcast_ether_addr(dest)) - frag_size = MAX_FRAG_THRESHOLD; - else - frag_size = ieee->fts; + /* Encrypt msdu first on the whole data packet. */ + if ((host_encrypt || host_encrypt_msdu) && + crypt && crypt->ops && crypt->ops->encrypt_msdu) { + int res = 0; + int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len + + crypt->ops->extra_msdu_postfix_len; + struct sk_buff *skb_new = dev_alloc_skb(len); - /* Determine amount of payload per fragment. Regardless of if - * this stack is providing the full 802.11 header, one will - * eventually be affixed to this fragment -- so we must account for - * it when determining the amount of payload space. */ - bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN; - if (ieee->config & - (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) - bytes_per_frag -= IEEE80211_FCS_LEN; + if (unlikely(!skb_new)) + goto failed; - /* Each fragment may need to have room for encryptiong pre/postfix */ - if (encrypt) - bytes_per_frag -= crypt->ops->extra_prefix_len + - crypt->ops->extra_postfix_len; + skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len); + memcpy(skb_put(skb_new, hdr_len), &header, hdr_len); + snapped = 1; + ieee80211_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)), + ether_type); + memcpy(skb_put(skb_new, skb->len), skb->data, skb->len); + res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv); + if (res < 0) { + IEEE80211_ERROR("msdu encryption failed\n"); + dev_kfree_skb_any(skb_new); + goto failed; + } + dev_kfree_skb_any(skb); + skb = skb_new; + bytes += crypt->ops->extra_msdu_prefix_len + + crypt->ops->extra_msdu_postfix_len; + skb_pull(skb, hdr_len); + } - /* Number of fragments is the total bytes_per_frag / - * payload_per_fragment */ - nr_frags = bytes / bytes_per_frag; - bytes_last_frag = bytes % bytes_per_frag; - if (bytes_last_frag) + if (host_encrypt || ieee->host_open_frag) { + /* Determine fragmentation size based on destination (multicast + * and broadcast are not fragmented) */ + if (is_multicast_ether_addr(dest) || + is_broadcast_ether_addr(dest)) + frag_size = MAX_FRAG_THRESHOLD; + else + frag_size = ieee->fts; + + /* Determine amount of payload per fragment. Regardless of if + * this stack is providing the full 802.11 header, one will + * eventually be affixed to this fragment -- so we must account + * for it when determining the amount of payload space. */ + bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN; + if (ieee->config & + (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) + bytes_per_frag -= IEEE80211_FCS_LEN; + + /* Each fragment may need to have room for encryptiong + * pre/postfix */ + if (host_encrypt) + bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len + + crypt->ops->extra_mpdu_postfix_len; + + /* Number of fragments is the total + * bytes_per_frag / payload_per_fragment */ + nr_frags = bytes / bytes_per_frag; + bytes_last_frag = bytes % bytes_per_frag; + if (bytes_last_frag) + nr_frags++; + else + bytes_last_frag = bytes_per_frag; + } else { + nr_frags = 1; + bytes_per_frag = bytes_last_frag = bytes; + frag_size = bytes + IEEE80211_3ADDR_LEN; + } + + rts_required = (frag_size > ieee->rts + && ieee->config & CFG_IEEE80211_RTS); + if (rts_required) nr_frags++; - else - bytes_last_frag = bytes_per_frag; /* When we allocate the TXB we allocate enough space for the reserve * and full fragment bytes (bytes_per_frag doesn't include prefix, * postfix, header, FCS, etc.) */ - txb = ieee80211_alloc_txb(nr_frags, frag_size, GFP_ATOMIC); + txb = ieee80211_alloc_txb(nr_frags, frag_size, + ieee->tx_headroom, GFP_ATOMIC); if (unlikely(!txb)) { printk(KERN_WARNING "%s: Could not allocate TXB\n", ieee->dev->name); goto failed; } txb->encrypted = encrypt; - txb->payload_size = bytes; + if (host_encrypt) + txb->payload_size = frag_size * (nr_frags - 1) + + bytes_last_frag; + else + txb->payload_size = bytes; - for (i = 0; i < nr_frags; i++) { + if (rts_required) { + skb_frag = txb->fragments[0]; + frag_hdr = + (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len); + + /* + * Set header frame_ctl to the RTS. + */ + header.frame_ctl = + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); + memcpy(frag_hdr, &header, hdr_len); + + /* + * Restore header frame_ctl to the original data setting. + */ + header.frame_ctl = cpu_to_le16(fc); + + if (ieee->config & + (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) + skb_put(skb_frag, 4); + + txb->rts_included = 1; + i = 1; + } else + i = 0; + + for (; i < nr_frags; i++) { skb_frag = txb->fragments[i]; - if (encrypt) - skb_reserve(skb_frag, crypt->ops->extra_prefix_len); + if (host_encrypt || host_build_iv) + skb_reserve(skb_frag, + crypt->ops->extra_mpdu_prefix_len); - frag_hdr = (struct ieee80211_hdr *)skb_put(skb_frag, hdr_len); + frag_hdr = + (struct ieee80211_hdr_3addr *)skb_put(skb_frag, hdr_len); memcpy(frag_hdr, &header, hdr_len); /* If this is not the last fragment, then add the MOREFRAGS @@ -379,11 +448,10 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) bytes = bytes_last_frag; } - /* Put a SNAP header on the first fragment */ - if (i == 0) { - ieee80211_put_snap(skb_put - (skb_frag, SNAP_SIZE + sizeof(u16)), - ether_type); + if (i == 0 && !snapped) { + ieee80211_copy_snap(skb_put + (skb_frag, SNAP_SIZE + sizeof(u16)), + ether_type); bytes -= SNAP_SIZE + sizeof(u16); } @@ -394,8 +462,19 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) /* Encryption routine will move the header forward in order * to insert the IV between the header and the payload */ - if (encrypt) + if (host_encrypt) ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len); + else if (host_build_iv) { + struct ieee80211_crypt_data *crypt; + + crypt = ieee->crypt[ieee->tx_keyidx]; + atomic_inc(&crypt->refcnt); + if (crypt->ops->build_iv) + crypt->ops->build_iv(skb_frag, hdr_len, + crypt->priv); + atomic_dec(&crypt->refcnt); + } + if (ieee->config & (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) skb_put(skb_frag, 4); @@ -407,11 +486,20 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb_any(skb); if (txb) { - if ((*ieee->hard_start_xmit) (txb, dev) == 0) { + int ret = (*ieee->hard_start_xmit) (txb, dev, priority); + if (ret == 0) { stats->tx_packets++; stats->tx_bytes += txb->payload_size; return 0; } + + if (ret == NETDEV_TX_BUSY) { + printk(KERN_ERR "%s: NETDEV_TX_BUSY returned; " + "driver should report queue full via " + "ieee_device->is_queue_full.\n", + ieee->dev->name); + } + ieee80211_txb_free(txb); } @@ -422,7 +510,72 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); stats->tx_errors++; return 1; - } +/* Incoming 802.11 strucure is converted to a TXB + * a block of 802.11 fragment packets (stored as skbs) */ +int ieee80211_tx_frame(struct ieee80211_device *ieee, + struct ieee80211_hdr *frame, int len) +{ + struct ieee80211_txb *txb = NULL; + unsigned long flags; + struct net_device_stats *stats = &ieee->stats; + struct sk_buff *skb_frag; + int priority = -1; + + spin_lock_irqsave(&ieee->lock, flags); + + /* If there is no driver handler to take the TXB, dont' bother + * creating it... */ + if (!ieee->hard_start_xmit) { + printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name); + goto success; + } + + if (unlikely(len < 24)) { + printk(KERN_WARNING "%s: skb too small (%d).\n", + ieee->dev->name, len); + goto success; + } + + /* When we allocate the TXB we allocate enough space for the reserve + * and full fragment bytes (bytes_per_frag doesn't include prefix, + * postfix, header, FCS, etc.) */ + txb = ieee80211_alloc_txb(1, len, ieee->tx_headroom, GFP_ATOMIC); + if (unlikely(!txb)) { + printk(KERN_WARNING "%s: Could not allocate TXB\n", + ieee->dev->name); + goto failed; + } + txb->encrypted = 0; + txb->payload_size = len; + + skb_frag = txb->fragments[0]; + + memcpy(skb_put(skb_frag, len), frame, len); + + if (ieee->config & + (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS)) + skb_put(skb_frag, 4); + + success: + spin_unlock_irqrestore(&ieee->lock, flags); + + if (txb) { + if ((*ieee->hard_start_xmit) (txb, ieee->dev, priority) == 0) { + stats->tx_packets++; + stats->tx_bytes += txb->payload_size; + return 0; + } + ieee80211_txb_free(txb); + } + return 0; + + failed: + spin_unlock_irqrestore(&ieee->lock, flags); + stats->tx_errors++; + return 1; +} + +EXPORT_SYMBOL(ieee80211_tx_frame); EXPORT_SYMBOL(ieee80211_txb_free); diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c index 94882f39b072..1ce7af9bec35 100644 --- a/net/ieee80211/ieee80211_wx.c +++ b/net/ieee80211/ieee80211_wx.c @@ -1,6 +1,6 @@ /****************************************************************************** - Copyright(c) 2004 Intel Corporation. All rights reserved. + Copyright(c) 2004-2005 Intel Corporation. All rights reserved. Portions of this file are based on the WEP enablement code provided by the Host AP project hostap-drivers v0.1.3 @@ -32,6 +32,7 @@ #include #include +#include #include #include @@ -140,18 +141,41 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee, start = iwe_stream_add_point(start, stop, &iwe, custom); /* Add quality statistics */ - /* TODO: Fix these values... */ iwe.cmd = IWEVQUAL; - iwe.u.qual.qual = network->stats.signal; - iwe.u.qual.level = network->stats.rssi; - iwe.u.qual.noise = network->stats.noise; - iwe.u.qual.updated = network->stats.mask & IEEE80211_STATMASK_WEMASK; - if (!(network->stats.mask & IEEE80211_STATMASK_RSSI)) - iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID; - if (!(network->stats.mask & IEEE80211_STATMASK_NOISE)) + iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | + IW_QUAL_NOISE_UPDATED; + + if (!(network->stats.mask & IEEE80211_STATMASK_RSSI)) { + iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID | + IW_QUAL_LEVEL_INVALID; + iwe.u.qual.qual = 0; + iwe.u.qual.level = 0; + } else { + iwe.u.qual.level = network->stats.rssi; + if (ieee->perfect_rssi == ieee->worst_rssi) + iwe.u.qual.qual = 100; + else + iwe.u.qual.qual = + (100 * + (ieee->perfect_rssi - ieee->worst_rssi) * + (ieee->perfect_rssi - ieee->worst_rssi) - + (ieee->perfect_rssi - network->stats.rssi) * + (15 * (ieee->perfect_rssi - ieee->worst_rssi) + + 62 * (ieee->perfect_rssi - network->stats.rssi))) / + ((ieee->perfect_rssi - ieee->worst_rssi) * + (ieee->perfect_rssi - ieee->worst_rssi)); + if (iwe.u.qual.qual > 100) + iwe.u.qual.qual = 100; + else if (iwe.u.qual.qual < 1) + iwe.u.qual.qual = 0; + } + + if (!(network->stats.mask & IEEE80211_STATMASK_NOISE)) { iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID; - if (!(network->stats.mask & IEEE80211_STATMASK_SIGNAL)) - iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID; + iwe.u.qual.noise = 0; + } else { + iwe.u.qual.noise = network->stats.noise; + } start = iwe_stream_add_event(start, stop, &iwe, IW_EV_QUAL_LEN); @@ -162,7 +186,7 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee, if (iwe.u.data.length) start = iwe_stream_add_point(start, stop, &iwe, custom); - if (ieee->wpa_enabled && network->wpa_ie_len) { + if (network->wpa_ie_len) { char buf[MAX_WPA_IE_LEN * 2 + 30]; u8 *p = buf; @@ -177,7 +201,7 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee, start = iwe_stream_add_point(start, stop, &iwe, buf); } - if (ieee->wpa_enabled && network->rsn_ie_len) { + if (network->rsn_ie_len) { char buf[MAX_WPA_IE_LEN * 2 + 30]; u8 *p = buf; @@ -197,8 +221,8 @@ static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee, iwe.cmd = IWEVCUSTOM; p = custom; p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), - " Last beacon: %lums ago", - (jiffies - network->last_scanned) / (HZ / 100)); + " Last beacon: %dms ago", + jiffies_to_msecs(jiffies - network->last_scanned)); iwe.u.data.length = p - custom; if (iwe.u.data.length) start = iwe_stream_add_point(start, stop, &iwe, custom); @@ -228,13 +252,13 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee, ev = ipw2100_translate_scan(ieee, ev, stop, network); else IEEE80211_DEBUG_SCAN("Not showing network '%s (" - MAC_FMT ")' due to age (%lums).\n", + MAC_FMT ")' due to age (%dms).\n", escape_essid(network->ssid, network->ssid_len), MAC_ARG(network->bssid), - (jiffies - - network->last_scanned) / (HZ / - 100)); + jiffies_to_msecs(jiffies - + network-> + last_scanned)); } spin_unlock_irqrestore(&ieee->lock, flags); @@ -258,6 +282,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, }; int i, key, key_provided, len; struct ieee80211_crypt_data **crypt; + int host_crypto = ieee->host_encrypt || ieee->host_decrypt; IEEE80211_DEBUG_WX("SET_ENCODE\n"); @@ -298,15 +323,17 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, if (i == WEP_KEYS) { sec.enabled = 0; + sec.encrypt = 0; sec.level = SEC_LEVEL_0; - sec.flags |= SEC_ENABLED | SEC_LEVEL; + sec.flags |= SEC_ENABLED | SEC_LEVEL | SEC_ENCRYPT; } goto done; } sec.enabled = 1; - sec.flags |= SEC_ENABLED; + sec.encrypt = 1; + sec.flags |= SEC_ENABLED | SEC_ENCRYPT; if (*crypt != NULL && (*crypt)->ops != NULL && strcmp((*crypt)->ops->name, "WEP") != 0) { @@ -315,7 +342,7 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, ieee80211_crypt_delayed_deinit(ieee, crypt); } - if (*crypt == NULL) { + if (*crypt == NULL && host_crypto) { struct ieee80211_crypt_data *new_crypt; /* take WEP into use */ @@ -355,49 +382,56 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, key, escape_essid(sec.keys[key], len), erq->length, len); sec.key_sizes[key] = len; - (*crypt)->ops->set_key(sec.keys[key], len, NULL, - (*crypt)->priv); + if (*crypt) + (*crypt)->ops->set_key(sec.keys[key], len, NULL, + (*crypt)->priv); sec.flags |= (1 << key); /* This ensures a key will be activated if no key is * explicitely set */ if (key == sec.active_key) sec.flags |= SEC_ACTIVE_KEY; - } else { - len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN, - NULL, (*crypt)->priv); - if (len == 0) { - /* Set a default key of all 0 */ - IEEE80211_DEBUG_WX("Setting key %d to all zero.\n", - key); - memset(sec.keys[key], 0, 13); - (*crypt)->ops->set_key(sec.keys[key], 13, NULL, - (*crypt)->priv); - sec.key_sizes[key] = 13; - sec.flags |= (1 << key); - } + } else { + if (host_crypto) { + len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN, + NULL, (*crypt)->priv); + if (len == 0) { + /* Set a default key of all 0 */ + IEEE80211_DEBUG_WX("Setting key %d to all " + "zero.\n", key); + memset(sec.keys[key], 0, 13); + (*crypt)->ops->set_key(sec.keys[key], 13, NULL, + (*crypt)->priv); + sec.key_sizes[key] = 13; + sec.flags |= (1 << key); + } + } /* No key data - just set the default TX key index */ if (key_provided) { - IEEE80211_DEBUG_WX - ("Setting key %d to default Tx key.\n", key); + IEEE80211_DEBUG_WX("Setting key %d to default Tx " + "key.\n", key); ieee->tx_keyidx = key; sec.active_key = key; sec.flags |= SEC_ACTIVE_KEY; } } - - done: - ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED); - sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY; - sec.flags |= SEC_AUTH_MODE; - IEEE80211_DEBUG_WX("Auth: %s\n", sec.auth_mode == WLAN_AUTH_OPEN ? - "OPEN" : "SHARED KEY"); + if (erq->flags & (IW_ENCODE_OPEN | IW_ENCODE_RESTRICTED)) { + ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED); + sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : + WLAN_AUTH_SHARED_KEY; + sec.flags |= SEC_AUTH_MODE; + IEEE80211_DEBUG_WX("Auth: %s\n", + sec.auth_mode == WLAN_AUTH_OPEN ? + "OPEN" : "SHARED KEY"); + } /* For now we just support WEP, so only set that security level... * TODO: When WPA is added this is one place that needs to change */ sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */ + sec.encode_alg[key] = SEC_ALG_WEP; + done: if (ieee->set_security) ieee->set_security(dev, &sec); @@ -422,6 +456,7 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee, struct iw_point *erq = &(wrqu->encoding); int len, key; struct ieee80211_crypt_data *crypt; + struct ieee80211_security *sec = &ieee->sec; IEEE80211_DEBUG_WX("GET_ENCODE\n"); @@ -436,23 +471,16 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee, crypt = ieee->crypt[key]; erq->flags = key + 1; - if (crypt == NULL || crypt->ops == NULL) { + if (!sec->enabled) { erq->length = 0; erq->flags |= IW_ENCODE_DISABLED; return 0; } - if (strcmp(crypt->ops->name, "WEP") != 0) { - /* only WEP is supported with wireless extensions, so just - * report that encryption is used */ - erq->length = 0; - erq->flags |= IW_ENCODE_ENABLED; - return 0; - } + len = sec->key_sizes[key]; + memcpy(keybuf, sec->keys[key], len); - len = crypt->ops->get_key(keybuf, WEP_KEY_LEN, NULL, crypt->priv); erq->length = (len >= 0 ? len : 0); - erq->flags |= IW_ENCODE_ENABLED; if (ieee->open_wep) @@ -463,6 +491,240 @@ int ieee80211_wx_get_encode(struct ieee80211_device *ieee, return 0; } +int ieee80211_wx_set_encodeext(struct ieee80211_device *ieee, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct net_device *dev = ieee->dev; + struct iw_point *encoding = &wrqu->encoding; + struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; + int i, idx, ret = 0; + int group_key = 0; + const char *alg, *module; + struct ieee80211_crypto_ops *ops; + struct ieee80211_crypt_data **crypt; + + struct ieee80211_security sec = { + .flags = 0, + }; + + idx = encoding->flags & IW_ENCODE_INDEX; + if (idx) { + if (idx < 1 || idx > WEP_KEYS) + return -EINVAL; + idx--; + } else + idx = ieee->tx_keyidx; + + if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) { + crypt = &ieee->crypt[idx]; + group_key = 1; + } else { + if (idx != 0) + return -EINVAL; + if (ieee->iw_mode == IW_MODE_INFRA) + crypt = &ieee->crypt[idx]; + else + return -EINVAL; + } + + sec.flags |= SEC_ENABLED | SEC_ENCRYPT; + if ((encoding->flags & IW_ENCODE_DISABLED) || + ext->alg == IW_ENCODE_ALG_NONE) { + if (*crypt) + ieee80211_crypt_delayed_deinit(ieee, crypt); + + for (i = 0; i < WEP_KEYS; i++) + if (ieee->crypt[i] != NULL) + break; + + if (i == WEP_KEYS) { + sec.enabled = 0; + sec.encrypt = 0; + sec.level = SEC_LEVEL_0; + sec.flags |= SEC_LEVEL; + } + goto done; + } + + sec.enabled = 1; + sec.encrypt = 1; + + if (group_key ? !ieee->host_mc_decrypt : + !(ieee->host_encrypt || ieee->host_decrypt || + ieee->host_encrypt_msdu)) + goto skip_host_crypt; + + switch (ext->alg) { + case IW_ENCODE_ALG_WEP: + alg = "WEP"; + module = "ieee80211_crypt_wep"; + break; + case IW_ENCODE_ALG_TKIP: + alg = "TKIP"; + module = "ieee80211_crypt_tkip"; + break; + case IW_ENCODE_ALG_CCMP: + alg = "CCMP"; + module = "ieee80211_crypt_ccmp"; + break; + default: + IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", + dev->name, ext->alg); + ret = -EINVAL; + goto done; + } + + ops = ieee80211_get_crypto_ops(alg); + if (ops == NULL) { + request_module(module); + ops = ieee80211_get_crypto_ops(alg); + } + if (ops == NULL) { + IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", + dev->name, ext->alg); + ret = -EINVAL; + goto done; + } + + if (*crypt == NULL || (*crypt)->ops != ops) { + struct ieee80211_crypt_data *new_crypt; + + ieee80211_crypt_delayed_deinit(ieee, crypt); + + new_crypt = (struct ieee80211_crypt_data *) + kmalloc(sizeof(*new_crypt), GFP_KERNEL); + if (new_crypt == NULL) { + ret = -ENOMEM; + goto done; + } + memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); + new_crypt->ops = ops; + if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) + new_crypt->priv = new_crypt->ops->init(idx); + if (new_crypt->priv == NULL) { + kfree(new_crypt); + ret = -EINVAL; + goto done; + } + *crypt = new_crypt; + } + + if (ext->key_len > 0 && (*crypt)->ops->set_key && + (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq, + (*crypt)->priv) < 0) { + IEEE80211_DEBUG_WX("%s: key setting failed\n", dev->name); + ret = -EINVAL; + goto done; + } + + skip_host_crypt: + if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { + ieee->tx_keyidx = idx; + sec.active_key = idx; + sec.flags |= SEC_ACTIVE_KEY; + } + + if (ext->alg != IW_ENCODE_ALG_NONE) { + memcpy(sec.keys[idx], ext->key, ext->key_len); + sec.key_sizes[idx] = ext->key_len; + sec.flags |= (1 << idx); + if (ext->alg == IW_ENCODE_ALG_WEP) { + sec.encode_alg[idx] = SEC_ALG_WEP; + sec.flags |= SEC_LEVEL; + sec.level = SEC_LEVEL_1; + } else if (ext->alg == IW_ENCODE_ALG_TKIP) { + sec.encode_alg[idx] = SEC_ALG_TKIP; + sec.flags |= SEC_LEVEL; + sec.level = SEC_LEVEL_2; + } else if (ext->alg == IW_ENCODE_ALG_CCMP) { + sec.encode_alg[idx] = SEC_ALG_CCMP; + sec.flags |= SEC_LEVEL; + sec.level = SEC_LEVEL_3; + } + /* Don't set sec level for group keys. */ + if (group_key) + sec.flags &= ~SEC_LEVEL; + } + done: + if (ieee->set_security) + ieee->set_security(ieee->dev, &sec); + + /* + * Do not reset port if card is in Managed mode since resetting will + * generate new IEEE 802.11 authentication which may end up in looping + * with IEEE 802.1X. If your hardware requires a reset after WEP + * configuration (for example... Prism2), implement the reset_port in + * the callbacks structures used to initialize the 802.11 stack. + */ + if (ieee->reset_on_keychange && + ieee->iw_mode != IW_MODE_INFRA && + ieee->reset_port && ieee->reset_port(dev)) { + IEEE80211_DEBUG_WX("%s: reset_port failed\n", dev->name); + return -EINVAL; + } + + return ret; +} + +int ieee80211_wx_get_encodeext(struct ieee80211_device *ieee, + struct iw_request_info *info, + union iwreq_data *wrqu, char *extra) +{ + struct iw_point *encoding = &wrqu->encoding; + struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; + struct ieee80211_security *sec = &ieee->sec; + int idx, max_key_len; + + max_key_len = encoding->length - sizeof(*ext); + if (max_key_len < 0) + return -EINVAL; + + idx = encoding->flags & IW_ENCODE_INDEX; + if (idx) { + if (idx < 1 || idx > WEP_KEYS) + return -EINVAL; + idx--; + } else + idx = ieee->tx_keyidx; + + if (!ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) + if (idx != 0 || ieee->iw_mode != IW_MODE_INFRA) + return -EINVAL; + + encoding->flags = idx + 1; + memset(ext, 0, sizeof(*ext)); + + if (!sec->enabled) { + ext->alg = IW_ENCODE_ALG_NONE; + ext->key_len = 0; + encoding->flags |= IW_ENCODE_DISABLED; + } else { + if (sec->encode_alg[idx] == SEC_ALG_WEP) + ext->alg = IW_ENCODE_ALG_WEP; + else if (sec->encode_alg[idx] == SEC_ALG_TKIP) + ext->alg = IW_ENCODE_ALG_TKIP; + else if (sec->encode_alg[idx] == SEC_ALG_CCMP) + ext->alg = IW_ENCODE_ALG_CCMP; + else + return -EINVAL; + + ext->key_len = sec->key_sizes[idx]; + memcpy(ext->key, sec->keys[idx], ext->key_len); + encoding->flags |= IW_ENCODE_ENABLED; + if (ext->key_len && + (ext->alg == IW_ENCODE_ALG_TKIP || + ext->alg == IW_ENCODE_ALG_CCMP)) + ext->ext_flags |= IW_ENCODE_EXT_TX_SEQ_VALID; + + } + + return 0; +} + +EXPORT_SYMBOL(ieee80211_wx_set_encodeext); +EXPORT_SYMBOL(ieee80211_wx_get_encodeext); + EXPORT_SYMBOL(ieee80211_wx_get_scan); EXPORT_SYMBOL(ieee80211_wx_set_encode); EXPORT_SYMBOL(ieee80211_wx_get_encode); diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 8bf312bdea13..b425748f02d7 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -241,7 +241,7 @@ static int arp_constructor(struct neighbour *neigh) neigh->type = inet_addr_type(addr); rcu_read_lock(); - in_dev = rcu_dereference(__in_dev_get(dev)); + in_dev = __in_dev_get_rcu(dev); if (in_dev == NULL) { rcu_read_unlock(); return -EINVAL; @@ -697,12 +697,6 @@ void arp_send(int type, int ptype, u32 dest_ip, arp_xmit(skb); } -static void parp_redo(struct sk_buff *skb) -{ - nf_reset(skb); - arp_rcv(skb, skb->dev, NULL, skb->dev); -} - /* * Process an arp request. */ @@ -922,6 +916,11 @@ out: return 0; } +static void parp_redo(struct sk_buff *skb) +{ + arp_process(skb); +} + /* * Receive an arp request from the device layer. @@ -990,8 +989,8 @@ static int arp_req_set(struct arpreq *r, struct net_device * dev) ipv4_devconf.proxy_arp = 1; return 0; } - if (__in_dev_get(dev)) { - __in_dev_get(dev)->cnf.proxy_arp = 1; + if (__in_dev_get_rtnl(dev)) { + __in_dev_get_rtnl(dev)->cnf.proxy_arp = 1; return 0; } return -ENXIO; @@ -1096,8 +1095,8 @@ static int arp_req_delete(struct arpreq *r, struct net_device * dev) ipv4_devconf.proxy_arp = 0; return 0; } - if (__in_dev_get(dev)) { - __in_dev_get(dev)->cnf.proxy_arp = 0; + if (__in_dev_get_rtnl(dev)) { + __in_dev_get_rtnl(dev)->cnf.proxy_arp = 0; return 0; } return -ENXIO; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index ba2895ae8151..4ec4b2ca6ab1 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -351,7 +351,7 @@ static int inet_insert_ifa(struct in_ifaddr *ifa) static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa) { - struct in_device *in_dev = __in_dev_get(dev); + struct in_device *in_dev = __in_dev_get_rtnl(dev); ASSERT_RTNL(); @@ -449,7 +449,7 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg goto out; rc = -ENOBUFS; - if ((in_dev = __in_dev_get(dev)) == NULL) { + if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) { in_dev = inetdev_init(dev); if (!in_dev) goto out; @@ -584,7 +584,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg) if (colon) *colon = ':'; - if ((in_dev = __in_dev_get(dev)) != NULL) { + if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) { if (tryaddrmatch) { /* Matthias Andree */ /* compare label and address (4.4BSD style) */ @@ -715,6 +715,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg) break; ret = 0; if (ifa->ifa_mask != sin->sin_addr.s_addr) { + u32 old_mask = ifa->ifa_mask; inet_del_ifa(in_dev, ifap, 0); ifa->ifa_mask = sin->sin_addr.s_addr; ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask); @@ -728,7 +729,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg) if ((dev->flags & IFF_BROADCAST) && (ifa->ifa_prefixlen < 31) && (ifa->ifa_broadcast == - (ifa->ifa_local|~ifa->ifa_mask))) { + (ifa->ifa_local|~old_mask))) { ifa->ifa_broadcast = (ifa->ifa_local | ~sin->sin_addr.s_addr); } @@ -748,7 +749,7 @@ rarok: static int inet_gifconf(struct net_device *dev, char __user *buf, int len) { - struct in_device *in_dev = __in_dev_get(dev); + struct in_device *in_dev = __in_dev_get_rtnl(dev); struct in_ifaddr *ifa; struct ifreq ifr; int done = 0; @@ -791,7 +792,7 @@ u32 inet_select_addr(const struct net_device *dev, u32 dst, int scope) struct in_device *in_dev; rcu_read_lock(); - in_dev = __in_dev_get(dev); + in_dev = __in_dev_get_rcu(dev); if (!in_dev) goto no_in_dev; @@ -818,7 +819,7 @@ no_in_dev: read_lock(&dev_base_lock); rcu_read_lock(); for (dev = dev_base; dev; dev = dev->next) { - if ((in_dev = __in_dev_get(dev)) == NULL) + if ((in_dev = __in_dev_get_rcu(dev)) == NULL) continue; for_primary_ifa(in_dev) { @@ -887,7 +888,7 @@ u32 inet_confirm_addr(const struct net_device *dev, u32 dst, u32 local, int scop if (dev) { rcu_read_lock(); - if ((in_dev = __in_dev_get(dev))) + if ((in_dev = __in_dev_get_rcu(dev))) addr = confirm_addr_indev(in_dev, dst, local, scope); rcu_read_unlock(); @@ -897,7 +898,7 @@ u32 inet_confirm_addr(const struct net_device *dev, u32 dst, u32 local, int scop read_lock(&dev_base_lock); rcu_read_lock(); for (dev = dev_base; dev; dev = dev->next) { - if ((in_dev = __in_dev_get(dev))) { + if ((in_dev = __in_dev_get_rcu(dev))) { addr = confirm_addr_indev(in_dev, dst, local, scope); if (addr) break; @@ -957,7 +958,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; - struct in_device *in_dev = __in_dev_get(dev); + struct in_device *in_dev = __in_dev_get_rtnl(dev); ASSERT_RTNL(); @@ -1078,7 +1079,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) if (idx > s_idx) s_ip_idx = 0; rcu_read_lock(); - if ((in_dev = __in_dev_get(dev)) == NULL) { + if ((in_dev = __in_dev_get_rcu(dev)) == NULL) { rcu_read_unlock(); continue; } @@ -1149,7 +1150,7 @@ void inet_forward_change(void) for (dev = dev_base; dev; dev = dev->next) { struct in_device *in_dev; rcu_read_lock(); - in_dev = __in_dev_get(dev); + in_dev = __in_dev_get_rcu(dev); if (in_dev) in_dev->cnf.forwarding = on; rcu_read_unlock(); diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 1b5a09d1b90b..1b18ce66e7b7 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -42,10 +43,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) esp = x->data; alen = esp->auth.icv_trunc_len; tfm = esp->conf.tfm; - blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3; - clen = (clen + 2 + blksize-1)&~(blksize-1); + blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4); + clen = ALIGN(clen + 2, blksize); if (esp->conf.padlen) - clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1); + clen = ALIGN(clen, esp->conf.padlen); if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) goto error; @@ -143,7 +144,7 @@ static int esp_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struc struct ip_esp_hdr *esph; struct esp_data *esp = x->data; struct sk_buff *trailer; - int blksize = crypto_tfm_alg_blocksize(esp->conf.tfm); + int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); int alen = esp->auth.icv_trunc_len; int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen; int nfrags; @@ -304,16 +305,16 @@ static int esp_post_input(struct xfrm_state *x, struct xfrm_decap_state *decap, static u32 esp4_get_max_size(struct xfrm_state *x, int mtu) { struct esp_data *esp = x->data; - u32 blksize = crypto_tfm_alg_blocksize(esp->conf.tfm); + u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); if (x->props.mode) { - mtu = (mtu + 2 + blksize-1)&~(blksize-1); + mtu = ALIGN(mtu + 2, blksize); } else { /* The worst case. */ - mtu += 2 + blksize; + mtu = ALIGN(mtu + 2, 4) + blksize - 4; } if (esp->conf.padlen) - mtu = (mtu + esp->conf.padlen-1)&~(esp->conf.padlen-1); + mtu = ALIGN(mtu, esp->conf.padlen); return mtu + x->props.header_len + esp->auth.icv_trunc_len; } diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 4e1379f71269..e61bc7177eb1 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -173,7 +173,7 @@ int fib_validate_source(u32 src, u32 dst, u8 tos, int oif, no_addr = rpf = 0; rcu_read_lock(); - in_dev = __in_dev_get(dev); + in_dev = __in_dev_get_rcu(dev); if (in_dev) { no_addr = in_dev->ifa_list == NULL; rpf = IN_DEV_RPFILTER(in_dev); @@ -607,7 +607,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = ptr; - struct in_device *in_dev = __in_dev_get(dev); + struct in_device *in_dev = __in_dev_get_rtnl(dev); if (event == NETDEV_UNREGISTER) { fib_disable_ip(dev, 2); diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index d41219e8037c..186f20c4a45e 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1087,7 +1087,7 @@ fib_convert_rtentry(int cmd, struct nlmsghdr *nl, struct rtmsg *rtm, rta->rta_oif = &dev->ifindex; if (colon) { struct in_ifaddr *ifa; - struct in_device *in_dev = __in_dev_get(dev); + struct in_device *in_dev = __in_dev_get_rtnl(dev); if (!in_dev) return -ENODEV; *colon = ':'; @@ -1268,7 +1268,7 @@ int fib_sync_up(struct net_device *dev) } if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP)) continue; - if (nh->nh_dev != dev || __in_dev_get(dev) == NULL) + if (nh->nh_dev != dev || !__in_dev_get_rtnl(dev)) continue; alive++; spin_lock_bh(&fib_multipath_lock); diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 50c0519cd70d..66247f38b371 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -286,6 +286,8 @@ static inline void check_tnode(const struct tnode *tn) static int halve_threshold = 25; static int inflate_threshold = 50; +static int halve_threshold_root = 15; +static int inflate_threshold_root = 25; static void __alias_free_mem(struct rcu_head *head) @@ -449,6 +451,8 @@ static struct node *resize(struct trie *t, struct tnode *tn) int i; int err = 0; struct tnode *old_tn; + int inflate_threshold_use; + int halve_threshold_use; if (!tn) return NULL; @@ -541,10 +545,17 @@ static struct node *resize(struct trie *t, struct tnode *tn) check_tnode(tn); + /* Keep root node larger */ + + if(!tn->parent) + inflate_threshold_use = inflate_threshold_root; + else + inflate_threshold_use = inflate_threshold; + err = 0; while ((tn->full_children > 0 && 50 * (tn->full_children + tnode_child_length(tn) - tn->empty_children) >= - inflate_threshold * tnode_child_length(tn))) { + inflate_threshold_use * tnode_child_length(tn))) { old_tn = tn; tn = inflate(t, tn); @@ -564,10 +575,18 @@ static struct node *resize(struct trie *t, struct tnode *tn) * node is above threshold. */ + + /* Keep root node larger */ + + if(!tn->parent) + halve_threshold_use = halve_threshold_root; + else + halve_threshold_use = halve_threshold; + err = 0; while (tn->bits > 1 && 100 * (tnode_child_length(tn) - tn->empty_children) < - halve_threshold * tnode_child_length(tn)) { + halve_threshold_use * tnode_child_length(tn)) { old_tn = tn; tn = halve(t, tn); @@ -2385,7 +2404,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v) prefix = htonl(l->key); list_for_each_entry_rcu(fa, &li->falh, fa_list) { - const struct fib_info *fi = rcu_dereference(fa->fa_info); + const struct fib_info *fi = fa->fa_info; unsigned flags = fib_flag_trans(fa->fa_type, mask, fi); if (fa->fa_type == RTN_BROADCAST diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 24eb56ae1b5a..175e093ec564 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -188,7 +188,7 @@ struct icmp_err icmp_err_convert[] = { /* Control parameters for ECHO replies. */ int sysctl_icmp_echo_ignore_all; -int sysctl_icmp_echo_ignore_broadcasts; +int sysctl_icmp_echo_ignore_broadcasts = 1; /* Control parameter - ignore bogus broadcast responses? */ int sysctl_icmp_ignore_bogus_error_responses; @@ -1108,12 +1108,9 @@ void __init icmp_init(struct net_proto_family *ops) struct inet_sock *inet; int i; - for (i = 0; i < NR_CPUS; i++) { + for_each_cpu(i) { int err; - if (!cpu_possible(i)) - continue; - err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP, &per_cpu(__icmp_socket, i)); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 70c44e4c3ceb..8b6d3939e1e6 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1323,7 +1323,7 @@ static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr) } if (dev) { imr->imr_ifindex = dev->ifindex; - idev = __in_dev_get(dev); + idev = __in_dev_get_rtnl(dev); } return idev; } diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index fe3c6d3d0c91..94468a76c5b4 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -494,7 +494,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune); struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req, - const unsigned int __nocast priority) + const gfp_t priority) { struct sock *newsk = sk_clone(sk, priority); diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 4d1502a49852..a010e9a68811 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -20,7 +20,7 @@ void __inet_twsk_kill(struct inet_timewait_sock *tw, struct inet_hashinfo *hashi struct inet_bind_hashbucket *bhead; struct inet_bind_bucket *tb; /* Unlink from established hashes. */ - struct inet_ehash_bucket *ehead = &hashinfo->ehash[tw->tw_hashent]; + struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, tw->tw_hash); write_lock(&ehead->lock); if (hlist_unhashed(&tw->tw_node)) { @@ -60,7 +60,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, { const struct inet_sock *inet = inet_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); - struct inet_ehash_bucket *ehead = &hashinfo->ehash[sk->sk_hashent]; + struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash); struct inet_bind_hashbucket *bhead; /* Step 1: Put TW into bind hash. Original socket stays there too. Note, that any socket with inet->num != 0 MUST be bound in @@ -106,11 +106,12 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat tw->tw_dport = inet->dport; tw->tw_family = sk->sk_family; tw->tw_reuse = sk->sk_reuse; - tw->tw_hashent = sk->sk_hashent; + tw->tw_hash = sk->sk_hash; tw->tw_ipv6only = 0; tw->tw_prot = sk->sk_prot_creator; atomic_set(&tw->tw_refcnt, 1); inet_twsk_dead_node_init(tw); + __module_get(tw->tw_prot->owner); } return tw; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index f0d5740d7e22..896ce3f8f53a 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -1104,10 +1104,10 @@ static int ipgre_open(struct net_device *dev) return -EADDRNOTAVAIL; dev = rt->u.dst.dev; ip_rt_put(rt); - if (__in_dev_get(dev) == NULL) + if (__in_dev_get_rtnl(dev) == NULL) return -EADDRNOTAVAIL; t->mlink = dev->ifindex; - ip_mc_inc_group(__in_dev_get(dev), t->parms.iph.daddr); + ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr); } return 0; } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 3f1a263e1249..87e350069abb 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -391,6 +391,9 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) to->nfct = from->nfct; nf_conntrack_get(to->nfct); to->nfctinfo = from->nfctinfo; +#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) + to->ipvs_property = from->ipvs_property; +#endif #ifdef CONFIG_BRIDGE_NETFILTER nf_bridge_put(to->nf_bridge); to->nf_bridge = from->nf_bridge; @@ -1020,10 +1023,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, int alloclen; skb_prev = skb; - if (skb_prev) - fraggap = skb_prev->len - maxfraglen; - else - fraggap = 0; + fraggap = skb_prev->len - maxfraglen; alloclen = fragheaderlen + hh_len + fraggap + 15; skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 9dbf5909f3a6..302b7eb507c9 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -149,7 +149,7 @@ struct net_device *ipmr_new_tunnel(struct vifctl *v) if (err == 0 && (dev = __dev_get_by_name(p.name)) != NULL) { dev->flags |= IFF_MULTICAST; - in_dev = __in_dev_get(dev); + in_dev = __in_dev_get_rtnl(dev); if (in_dev == NULL && (in_dev = inetdev_init(dev)) == NULL) goto failure; in_dev->cnf.rp_filter = 0; @@ -278,7 +278,7 @@ static int vif_delete(int vifi) dev_set_allmulti(dev, -1); - if ((in_dev = __in_dev_get(dev)) != NULL) { + if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) { in_dev->cnf.mc_forwarding--; ip_rt_multicast_event(in_dev); } @@ -421,7 +421,7 @@ static int vif_add(struct vifctl *vifc, int mrtsock) return -EINVAL; } - if ((in_dev = __in_dev_get(dev)) == NULL) + if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) return -EADDRNOTAVAIL; in_dev->cnf.mc_forwarding++; dev_set_allmulti(dev, +1); diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c index 6e092dadb388..fc6f95aaa969 100644 --- a/net/ipv4/ipvs/ip_vs_app.c +++ b/net/ipv4/ipvs/ip_vs_app.c @@ -604,7 +604,7 @@ static struct file_operations ip_vs_app_fops = { /* * Replace a segment of data with a new segment */ -int ip_vs_skb_replace(struct sk_buff *skb, int pri, +int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri, char *o_buf, int o_len, char *n_buf, int n_len) { struct iphdr *iph; diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 3cf9b451675c..7d917e4ce1d9 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -139,9 +139,10 @@ config IP_NF_AMANDA config IP_NF_PPTP tristate 'PPTP protocol support' + depends on IP_NF_CONNTRACK help This module adds support for PPTP (Point to Point Tunnelling - Protocol, RFC2637) conncection tracking and NAT. + Protocol, RFC2637) connection tracking and NAT. If you are running PPTP sessions over a stateful firewall or NAT box, you may want to enable this feature. @@ -498,9 +499,14 @@ config IP_NF_TARGET_LOG To compile it as a module, choose M here. If unsure, say N. config IP_NF_TARGET_ULOG - tristate "ULOG target support" + tristate "ULOG target support (OBSOLETE)" depends on IP_NF_IPTABLES ---help--- + + This option enables the old IPv4-only "ipt_ULOG" implementation + which has been obsoleted by the new "nfnetlink_log" code (see + CONFIG_NETFILTER_NETLINK_LOG). + This option adds a `ULOG' target, which allows you to create rules in any iptables table. The packet is passed to a userspace logging daemon using netlink multicast sockets; unlike the LOG target @@ -537,6 +543,17 @@ config IP_NF_TARGET_TCPMSS To compile it as a module, choose M here. If unsure, say N. +config IP_NF_TARGET_NFQUEUE + tristate "NFQUEUE Target Support" + depends on IP_NF_IPTABLES + help + This Target replaced the old obsolete QUEUE target. + + As opposed to QUEUE, it supports 65535 different queues, + not just one. + + To compile it as a module, choose M here. If unsure, say N. + # NAT + specific targets config IP_NF_NAT tristate "Full NAT" diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index 3d45d3c0283c..dab4b58dd31e 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile @@ -4,7 +4,8 @@ # objects for the standalone - connection tracking / NAT ip_conntrack-objs := ip_conntrack_standalone.o ip_conntrack_core.o ip_conntrack_proto_generic.o ip_conntrack_proto_tcp.o ip_conntrack_proto_udp.o ip_conntrack_proto_icmp.o -iptable_nat-objs := ip_nat_standalone.o ip_nat_rule.o ip_nat_core.o ip_nat_helper.o ip_nat_proto_unknown.o ip_nat_proto_tcp.o ip_nat_proto_udp.o ip_nat_proto_icmp.o +ip_nat-objs := ip_nat_core.o ip_nat_helper.o ip_nat_proto_unknown.o ip_nat_proto_tcp.o ip_nat_proto_udp.o ip_nat_proto_icmp.o +iptable_nat-objs := ip_nat_rule.o ip_nat_standalone.o ip_conntrack_pptp-objs := ip_conntrack_helper_pptp.o ip_conntrack_proto_gre.o ip_nat_pptp-objs := ip_nat_helper_pptp.o ip_nat_proto_gre.o @@ -40,7 +41,7 @@ obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o # the three instances of ip_tables obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o -obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o +obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o ip_nat.o obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o # matches @@ -92,6 +93,7 @@ obj-$(CONFIG_IP_NF_TARGET_TCPMSS) += ipt_TCPMSS.o obj-$(CONFIG_IP_NF_TARGET_NOTRACK) += ipt_NOTRACK.o obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o +obj-$(CONFIG_IP_NF_TARGET_NFQUEUE) += ipt_NFQUEUE.o # generic ARP tables obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o @@ -101,4 +103,3 @@ obj-$(CONFIG_IP_NF_ARP_MANGLE) += arpt_mangle.o obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o obj-$(CONFIG_IP_NF_QUEUE) += ip_queue.o -obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += ipt_NFQUEUE.o diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index fa1634256680..a7969286e6e7 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -716,8 +716,10 @@ static int translate_table(const char *name, } /* And one copy for every other CPU */ - for (i = 1; i < num_possible_cpus(); i++) { - memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i, + for_each_cpu(i) { + if (i == 0) + continue; + memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i, newinfo->entries, SMP_ALIGN(newinfo->size)); } @@ -767,7 +769,7 @@ static void get_counters(const struct arpt_table_info *t, unsigned int cpu; unsigned int i; - for (cpu = 0; cpu < num_possible_cpus(); cpu++) { + for_each_cpu(cpu) { i = 0; ARPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), t->size, @@ -885,7 +887,8 @@ static int do_replace(void __user *user, unsigned int len) return -ENOMEM; newinfo = vmalloc(sizeof(struct arpt_table_info) - + SMP_ALIGN(tmp.size) * num_possible_cpus()); + + SMP_ALIGN(tmp.size) * + (highest_possible_processor_id()+1)); if (!newinfo) return -ENOMEM; @@ -1158,7 +1161,8 @@ int arpt_register_table(struct arpt_table *table, = { 0, 0, 0, { 0 }, { 0 }, { } }; newinfo = vmalloc(sizeof(struct arpt_table_info) - + SMP_ALIGN(repl->size) * num_possible_cpus()); + + SMP_ALIGN(repl->size) * + (highest_possible_processor_id()+1)); if (!newinfo) { ret = -ENOMEM; return ret; diff --git a/net/ipv4/netfilter/ip_conntrack_amanda.c b/net/ipv4/netfilter/ip_conntrack_amanda.c index dc20881004bc..fa3f914117ec 100644 --- a/net/ipv4/netfilter/ip_conntrack_amanda.c +++ b/net/ipv4/netfilter/ip_conntrack_amanda.c @@ -65,7 +65,7 @@ static int help(struct sk_buff **pskb, /* increase the UDP timeout of the master connection as replies from * Amanda clients to the server can be quite delayed */ - ip_ct_refresh_acct(ct, ctinfo, NULL, master_timeout * HZ); + ip_ct_refresh(ct, *pskb, master_timeout * HZ); /* No data? */ dataoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct udphdr); diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index c1f82e0c81cf..422ab68ee7fb 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c @@ -50,7 +50,7 @@ #include #include -#define IP_CONNTRACK_VERSION "2.3" +#define IP_CONNTRACK_VERSION "2.4" #if 0 #define DEBUGP printk @@ -148,16 +148,20 @@ DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat); static int ip_conntrack_hash_rnd_initted; static unsigned int ip_conntrack_hash_rnd; -static u_int32_t -hash_conntrack(const struct ip_conntrack_tuple *tuple) +static u_int32_t __hash_conntrack(const struct ip_conntrack_tuple *tuple, + unsigned int size, unsigned int rnd) { -#if 0 - dump_tuple(tuple); -#endif return (jhash_3words(tuple->src.ip, (tuple->dst.ip ^ tuple->dst.protonum), (tuple->src.u.all | (tuple->dst.u.all << 16)), - ip_conntrack_hash_rnd) % ip_conntrack_htable_size); + rnd) % size); +} + +static u_int32_t +hash_conntrack(const struct ip_conntrack_tuple *tuple) +{ + return __hash_conntrack(tuple, ip_conntrack_htable_size, + ip_conntrack_hash_rnd); } int @@ -1112,45 +1116,49 @@ void ip_conntrack_helper_unregister(struct ip_conntrack_helper *me) synchronize_net(); } -static inline void ct_add_counters(struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - const struct sk_buff *skb) -{ -#ifdef CONFIG_IP_NF_CT_ACCT - if (skb) { - ct->counters[CTINFO2DIR(ctinfo)].packets++; - ct->counters[CTINFO2DIR(ctinfo)].bytes += - ntohs(skb->nh.iph->tot_len); - } -#endif -} - -/* Refresh conntrack for this many jiffies and do accounting (if skb != NULL) */ -void ip_ct_refresh_acct(struct ip_conntrack *ct, +/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ +void __ip_ct_refresh_acct(struct ip_conntrack *ct, enum ip_conntrack_info ctinfo, const struct sk_buff *skb, - unsigned long extra_jiffies) + unsigned long extra_jiffies, + int do_acct) { + int event = 0; + IP_NF_ASSERT(ct->timeout.data == (unsigned long)ct); + IP_NF_ASSERT(skb); + + write_lock_bh(&ip_conntrack_lock); /* If not in hash table, timer will not be active yet */ if (!is_confirmed(ct)) { ct->timeout.expires = extra_jiffies; - ct_add_counters(ct, ctinfo, skb); + event = IPCT_REFRESH; } else { - write_lock_bh(&ip_conntrack_lock); /* Need del_timer for race avoidance (may already be dying). */ if (del_timer(&ct->timeout)) { ct->timeout.expires = jiffies + extra_jiffies; add_timer(&ct->timeout); - /* FIXME: We loose some REFRESH events if this function - * is called without an skb. I'll fix this later -HW */ - if (skb) - ip_conntrack_event_cache(IPCT_REFRESH, skb); + event = IPCT_REFRESH; } - ct_add_counters(ct, ctinfo, skb); - write_unlock_bh(&ip_conntrack_lock); } + +#ifdef CONFIG_IP_NF_CT_ACCT + if (do_acct) { + ct->counters[CTINFO2DIR(ctinfo)].packets++; + ct->counters[CTINFO2DIR(ctinfo)].bytes += + ntohs(skb->nh.iph->tot_len); + if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000) + || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000)) + event |= IPCT_COUNTER_FILLING; + } +#endif + + write_unlock_bh(&ip_conntrack_lock); + + /* must be unlocked when calling event cache */ + if (event) + ip_conntrack_event_cache(event, skb); } #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ @@ -1337,14 +1345,13 @@ static int kill_all(struct ip_conntrack *i, void *data) return 1; } -static void free_conntrack_hash(void) +static void free_conntrack_hash(struct list_head *hash, int vmalloced,int size) { - if (ip_conntrack_vmalloc) - vfree(ip_conntrack_hash); + if (vmalloced) + vfree(hash); else - free_pages((unsigned long)ip_conntrack_hash, - get_order(sizeof(struct list_head) - * ip_conntrack_htable_size)); + free_pages((unsigned long)hash, + get_order(sizeof(struct list_head) * size)); } void ip_conntrack_flush() @@ -1374,12 +1381,83 @@ void ip_conntrack_cleanup(void) ip_conntrack_flush(); kmem_cache_destroy(ip_conntrack_cachep); kmem_cache_destroy(ip_conntrack_expect_cachep); - free_conntrack_hash(); + free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc, + ip_conntrack_htable_size); nf_unregister_sockopt(&so_getorigdst); } -static int hashsize; -module_param(hashsize, int, 0400); +static struct list_head *alloc_hashtable(int size, int *vmalloced) +{ + struct list_head *hash; + unsigned int i; + + *vmalloced = 0; + hash = (void*)__get_free_pages(GFP_KERNEL, + get_order(sizeof(struct list_head) + * size)); + if (!hash) { + *vmalloced = 1; + printk(KERN_WARNING"ip_conntrack: falling back to vmalloc.\n"); + hash = vmalloc(sizeof(struct list_head) * size); + } + + if (hash) + for (i = 0; i < size; i++) + INIT_LIST_HEAD(&hash[i]); + + return hash; +} + +int set_hashsize(const char *val, struct kernel_param *kp) +{ + int i, bucket, hashsize, vmalloced; + int old_vmalloced, old_size; + int rnd; + struct list_head *hash, *old_hash; + struct ip_conntrack_tuple_hash *h; + + /* On boot, we can set this without any fancy locking. */ + if (!ip_conntrack_htable_size) + return param_set_int(val, kp); + + hashsize = simple_strtol(val, NULL, 0); + if (!hashsize) + return -EINVAL; + + hash = alloc_hashtable(hashsize, &vmalloced); + if (!hash) + return -ENOMEM; + + /* We have to rehash for the new table anyway, so we also can + * use a new random seed */ + get_random_bytes(&rnd, 4); + + write_lock_bh(&ip_conntrack_lock); + for (i = 0; i < ip_conntrack_htable_size; i++) { + while (!list_empty(&ip_conntrack_hash[i])) { + h = list_entry(ip_conntrack_hash[i].next, + struct ip_conntrack_tuple_hash, list); + list_del(&h->list); + bucket = __hash_conntrack(&h->tuple, hashsize, rnd); + list_add_tail(&h->list, &hash[bucket]); + } + } + old_size = ip_conntrack_htable_size; + old_vmalloced = ip_conntrack_vmalloc; + old_hash = ip_conntrack_hash; + + ip_conntrack_htable_size = hashsize; + ip_conntrack_vmalloc = vmalloced; + ip_conntrack_hash = hash; + ip_conntrack_hash_rnd = rnd; + write_unlock_bh(&ip_conntrack_lock); + + free_conntrack_hash(old_hash, old_vmalloced, old_size); + return 0; +} + +module_param_call(hashsize, set_hashsize, param_get_uint, + &ip_conntrack_htable_size, 0600); int __init ip_conntrack_init(void) { @@ -1388,9 +1466,7 @@ int __init ip_conntrack_init(void) /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB * machine has 256 buckets. >= 1GB machines have 8192 buckets. */ - if (hashsize) { - ip_conntrack_htable_size = hashsize; - } else { + if (!ip_conntrack_htable_size) { ip_conntrack_htable_size = (((num_physpages << PAGE_SHIFT) / 16384) / sizeof(struct list_head)); @@ -1412,20 +1488,8 @@ int __init ip_conntrack_init(void) return ret; } - /* AK: the hash table is twice as big than needed because it - uses list_head. it would be much nicer to caches to use a - single pointer list head here. */ - ip_conntrack_vmalloc = 0; - ip_conntrack_hash - =(void*)__get_free_pages(GFP_KERNEL, - get_order(sizeof(struct list_head) - *ip_conntrack_htable_size)); - if (!ip_conntrack_hash) { - ip_conntrack_vmalloc = 1; - printk(KERN_WARNING "ip_conntrack: falling back to vmalloc.\n"); - ip_conntrack_hash = vmalloc(sizeof(struct list_head) - * ip_conntrack_htable_size); - } + ip_conntrack_hash = alloc_hashtable(ip_conntrack_htable_size, + &ip_conntrack_vmalloc); if (!ip_conntrack_hash) { printk(KERN_ERR "Unable to create ip_conntrack_hash\n"); goto err_unreg_sockopt; @@ -1457,9 +1521,6 @@ int __init ip_conntrack_init(void) ip_ct_protos[IPPROTO_ICMP] = &ip_conntrack_protocol_icmp; write_unlock_bh(&ip_conntrack_lock); - for (i = 0; i < ip_conntrack_htable_size; i++) - INIT_LIST_HEAD(&ip_conntrack_hash[i]); - /* For use by ipt_REJECT */ ip_ct_attach = ip_conntrack_attach; @@ -1474,7 +1535,8 @@ int __init ip_conntrack_init(void) err_free_conntrack_slab: kmem_cache_destroy(ip_conntrack_cachep); err_free_hash: - free_conntrack_hash(); + free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc, + ip_conntrack_htable_size); err_unreg_sockopt: nf_unregister_sockopt(&so_getorigdst); diff --git a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c index 79db5b70d5f6..926a6684643d 100644 --- a/net/ipv4/netfilter/ip_conntrack_helper_pptp.c +++ b/net/ipv4/netfilter/ip_conntrack_helper_pptp.c @@ -172,7 +172,6 @@ static int destroy_sibling_or_exp(const struct ip_conntrack_tuple *t) DEBUGP("setting timeout of conntrack %p to 0\n", sibling); sibling->proto.gre.timeout = 0; sibling->proto.gre.stream_timeout = 0; - /* refresh_acct will not modify counters if skb == NULL */ if (del_timer(&sibling->timeout)) sibling->timeout.function((unsigned long)sibling); ip_conntrack_put(sibling); @@ -223,8 +222,8 @@ static void pptp_destroy_siblings(struct ip_conntrack *ct) static inline int exp_gre(struct ip_conntrack *master, u_int32_t seq, - u_int16_t callid, - u_int16_t peer_callid) + __be16 callid, + __be16 peer_callid) { struct ip_conntrack_tuple inv_tuple; struct ip_conntrack_tuple exp_tuples[] = { @@ -263,7 +262,7 @@ exp_gre(struct ip_conntrack *master, exp_orig->mask.src.ip = 0xffffffff; exp_orig->mask.src.u.all = 0; exp_orig->mask.dst.u.all = 0; - exp_orig->mask.dst.u.gre.key = 0xffff; + exp_orig->mask.dst.u.gre.key = htons(0xffff); exp_orig->mask.dst.ip = 0xffffffff; exp_orig->mask.dst.protonum = 0xff; @@ -340,7 +339,8 @@ pptp_inbound_pkt(struct sk_buff **pskb, unsigned int reqlen; union pptp_ctrl_union _pptpReq, *pptpReq; struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info; - u_int16_t msg, *cid, *pcid; + u_int16_t msg; + __be16 *cid, *pcid; u_int32_t seq; ctlh = skb_header_pointer(*pskb, nexthdr_off, sizeof(_ctlh), &_ctlh); @@ -485,7 +485,7 @@ pptp_inbound_pkt(struct sk_buff **pskb, if (info->pns_call_id != ntohs(*pcid)) { DEBUGP("%s for unknown CallID %u\n", - pptp_msg_name[msg], ntohs(*cid)); + pptp_msg_name[msg], ntohs(*pcid)); break; } @@ -551,7 +551,8 @@ pptp_outbound_pkt(struct sk_buff **pskb, unsigned int reqlen; union pptp_ctrl_union _pptpReq, *pptpReq; struct ip_ct_pptp_master *info = &ct->help.ct_pptp_info; - u_int16_t msg, *cid, *pcid; + u_int16_t msg; + __be16 *cid, *pcid; ctlh = skb_header_pointer(*pskb, nexthdr_off, sizeof(_ctlh), &_ctlh); if (!ctlh) @@ -755,7 +756,7 @@ static struct ip_conntrack_helper pptp = { } }, .mask = { .src = { .ip = 0, - .u = { .tcp = { .port = 0xffff } } + .u = { .tcp = { .port = __constant_htons(0xffff) } } }, .dst = { .ip = 0, .u = { .all = 0 }, diff --git a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c index 71ef19d126d0..186646eb249f 100644 --- a/net/ipv4/netfilter/ip_conntrack_netbios_ns.c +++ b/net/ipv4/netfilter/ip_conntrack_netbios_ns.c @@ -58,7 +58,7 @@ static int help(struct sk_buff **pskb, goto out; rcu_read_lock(); - in_dev = __in_dev_get(rt->u.dst.dev); + in_dev = __in_dev_get_rcu(rt->u.dst.dev); if (in_dev != NULL) { for_primary_ifa(in_dev) { if (ifa->ifa_broadcast == iph->daddr) { @@ -91,7 +91,7 @@ static int help(struct sk_buff **pskb, ip_conntrack_expect_related(exp); ip_conntrack_expect_put(exp); - ip_ct_refresh_acct(ct, ctinfo, NULL, timeout * HZ); + ip_ct_refresh(ct, *pskb, timeout * HZ); out: return NF_ACCEPT; } diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c index b08a432efcf8..166e6069f121 100644 --- a/net/ipv4/netfilter/ip_conntrack_netlink.c +++ b/net/ipv4/netfilter/ip_conntrack_netlink.c @@ -177,11 +177,11 @@ ctnetlink_dump_counters(struct sk_buff *skb, const struct ip_conntrack *ct, struct nfattr *nest_count = NFA_NEST(skb, type); u_int64_t tmp; - tmp = cpu_to_be64(ct->counters[dir].packets); - NFA_PUT(skb, CTA_COUNTERS_PACKETS, sizeof(u_int64_t), &tmp); + tmp = htonl(ct->counters[dir].packets); + NFA_PUT(skb, CTA_COUNTERS32_PACKETS, sizeof(u_int32_t), &tmp); - tmp = cpu_to_be64(ct->counters[dir].bytes); - NFA_PUT(skb, CTA_COUNTERS_BYTES, sizeof(u_int64_t), &tmp); + tmp = htonl(ct->counters[dir].bytes); + NFA_PUT(skb, CTA_COUNTERS32_BYTES, sizeof(u_int32_t), &tmp); NFA_NEST_END(skb, nest_count); @@ -833,7 +833,8 @@ out: static inline int ctnetlink_change_status(struct ip_conntrack *ct, struct nfattr *cda[]) { - unsigned long d, status = *(u_int32_t *)NFA_DATA(cda[CTA_STATUS-1]); + unsigned long d; + unsigned status = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_STATUS-1])); d = ct->status ^ status; if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) @@ -948,6 +949,31 @@ ctnetlink_change_timeout(struct ip_conntrack *ct, struct nfattr *cda[]) return 0; } +static inline int +ctnetlink_change_protoinfo(struct ip_conntrack *ct, struct nfattr *cda[]) +{ + struct nfattr *tb[CTA_PROTOINFO_MAX], *attr = cda[CTA_PROTOINFO-1]; + struct ip_conntrack_protocol *proto; + u_int16_t npt = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum; + int err = 0; + + if (nfattr_parse_nested(tb, CTA_PROTOINFO_MAX, attr) < 0) + goto nfattr_failure; + + proto = ip_conntrack_proto_find_get(npt); + if (!proto) + return -EINVAL; + + if (proto->from_nfattr) + err = proto->from_nfattr(tb, ct); + ip_conntrack_proto_put(proto); + + return err; + +nfattr_failure: + return -ENOMEM; +} + static int ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[]) { @@ -973,6 +999,12 @@ ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[]) return err; } + if (cda[CTA_PROTOINFO-1]) { + err = ctnetlink_change_protoinfo(ct, cda); + if (err < 0) + return err; + } + DEBUGP("all done\n"); return 0; } @@ -1002,6 +1034,12 @@ ctnetlink_create_conntrack(struct nfattr *cda[], if (err < 0) goto err; + if (cda[CTA_PROTOINFO-1]) { + err = ctnetlink_change_protoinfo(ct, cda); + if (err < 0) + return err; + } + ct->helper = ip_conntrack_helper_find_get(rtuple); add_timer(&ct->timeout); diff --git a/net/ipv4/netfilter/ip_conntrack_proto_gre.c b/net/ipv4/netfilter/ip_conntrack_proto_gre.c index de3cb9db6f85..744abb9d377a 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_gre.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_gre.c @@ -247,6 +247,7 @@ static int gre_packet(struct ip_conntrack *ct, ct->proto.gre.stream_timeout); /* Also, more likely to be important, and not a probe. */ set_bit(IPS_ASSURED_BIT, &ct->status); + ip_conntrack_event_cache(IPCT_STATUS, skb); } else ip_ct_refresh_acct(ct, conntrackinfo, skb, ct->proto.gre.timeout); diff --git a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c index 838d1d69b36e..98f0015dd255 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c @@ -296,8 +296,7 @@ static int icmp_nfattr_to_tuple(struct nfattr *tb[], struct ip_conntrack_tuple *tuple) { if (!tb[CTA_PROTO_ICMP_TYPE-1] - || !tb[CTA_PROTO_ICMP_CODE-1] - || !tb[CTA_PROTO_ICMP_ID-1]) + || !tb[CTA_PROTO_ICMP_CODE-1]) return -1; tuple->dst.u.icmp.type = diff --git a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c index a875f35e576d..59a4a0111dd3 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_sctp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_sctp.c @@ -416,6 +416,7 @@ static int sctp_packet(struct ip_conntrack *conntrack, && newconntrack == SCTP_CONNTRACK_ESTABLISHED) { DEBUGP("Setting assured bit\n"); set_bit(IPS_ASSURED_BIT, &conntrack->status); + ip_conntrack_event_cache(IPCT_STATUS, skb); } return NF_ACCEPT; diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c index 1985abc59d24..d6701cafbcc2 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c @@ -341,17 +341,43 @@ static int tcp_print_conntrack(struct seq_file *s, static int tcp_to_nfattr(struct sk_buff *skb, struct nfattr *nfa, const struct ip_conntrack *ct) { + struct nfattr *nest_parms = NFA_NEST(skb, CTA_PROTOINFO_TCP); + read_lock_bh(&tcp_lock); NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t), &ct->proto.tcp.state); read_unlock_bh(&tcp_lock); + NFA_NEST_END(skb, nest_parms); + return 0; nfattr_failure: read_unlock_bh(&tcp_lock); return -1; } + +static int nfattr_to_tcp(struct nfattr *cda[], struct ip_conntrack *ct) +{ + struct nfattr *attr = cda[CTA_PROTOINFO_TCP-1]; + struct nfattr *tb[CTA_PROTOINFO_TCP_MAX]; + + if (nfattr_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr) < 0) + goto nfattr_failure; + + if (!tb[CTA_PROTOINFO_TCP_STATE-1]) + return -EINVAL; + + write_lock_bh(&tcp_lock); + ct->proto.tcp.state = + *(u_int8_t *)NFA_DATA(tb[CTA_PROTOINFO_TCP_STATE-1]); + write_unlock_bh(&tcp_lock); + + return 0; + +nfattr_failure: + return -1; +} #endif static unsigned int get_conntrack_index(const struct tcphdr *tcph) @@ -1014,7 +1040,8 @@ static int tcp_packet(struct ip_conntrack *conntrack, /* Set ASSURED if we see see valid ack in ESTABLISHED after SYN_RECV or a valid answer for a picked up connection. */ - set_bit(IPS_ASSURED_BIT, &conntrack->status); + set_bit(IPS_ASSURED_BIT, &conntrack->status); + ip_conntrack_event_cache(IPCT_STATUS, skb); } ip_ct_refresh_acct(conntrack, ctinfo, skb, timeout); @@ -1122,6 +1149,7 @@ struct ip_conntrack_protocol ip_conntrack_protocol_tcp = #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) .to_nfattr = tcp_to_nfattr, + .from_nfattr = nfattr_to_tcp, .tuple_to_nfattr = ip_ct_port_tuple_to_nfattr, .nfattr_to_tuple = ip_ct_port_nfattr_to_tuple, #endif diff --git a/net/ipv4/netfilter/ip_conntrack_standalone.c b/net/ipv4/netfilter/ip_conntrack_standalone.c index d3c7808010ec..dd476b191f4b 100644 --- a/net/ipv4/netfilter/ip_conntrack_standalone.c +++ b/net/ipv4/netfilter/ip_conntrack_standalone.c @@ -989,7 +989,7 @@ EXPORT_SYMBOL(need_ip_conntrack); EXPORT_SYMBOL(ip_conntrack_helper_register); EXPORT_SYMBOL(ip_conntrack_helper_unregister); EXPORT_SYMBOL(ip_ct_iterate_cleanup); -EXPORT_SYMBOL(ip_ct_refresh_acct); +EXPORT_SYMBOL(__ip_ct_refresh_acct); EXPORT_SYMBOL(ip_conntrack_expect_alloc); EXPORT_SYMBOL(ip_conntrack_expect_put); diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c index c3ea891d38e7..c5e3abd24672 100644 --- a/net/ipv4/netfilter/ip_nat_core.c +++ b/net/ipv4/netfilter/ip_nat_core.c @@ -74,12 +74,14 @@ ip_nat_proto_find_get(u_int8_t protonum) return p; } +EXPORT_SYMBOL_GPL(ip_nat_proto_find_get); void ip_nat_proto_put(struct ip_nat_protocol *p) { module_put(p->me); } +EXPORT_SYMBOL_GPL(ip_nat_proto_put); /* We keep an extra hash for each conntrack, for fast searching. */ static inline unsigned int @@ -111,6 +113,7 @@ ip_nat_cheat_check(u_int32_t oldvalinv, u_int32_t newval, u_int16_t oldcheck) return csum_fold(csum_partial((char *)diffs, sizeof(diffs), oldcheck^0xFFFF)); } +EXPORT_SYMBOL(ip_nat_cheat_check); /* Is this tuple already taken? (not by us) */ int @@ -127,6 +130,7 @@ ip_nat_used_tuple(const struct ip_conntrack_tuple *tuple, invert_tuplepr(&reply, tuple); return ip_conntrack_tuple_taken(&reply, ignored_conntrack); } +EXPORT_SYMBOL(ip_nat_used_tuple); /* If we source map this tuple so reply looks like reply_tuple, will * that meet the constraints of range. */ @@ -347,6 +351,7 @@ ip_nat_setup_info(struct ip_conntrack *conntrack, return NF_ACCEPT; } +EXPORT_SYMBOL(ip_nat_setup_info); /* Returns true if succeeded. */ static int @@ -387,10 +392,10 @@ manip_pkt(u_int16_t proto, } /* Do packet manipulations according to ip_nat_setup_info. */ -unsigned int nat_packet(struct ip_conntrack *ct, - enum ip_conntrack_info ctinfo, - unsigned int hooknum, - struct sk_buff **pskb) +unsigned int ip_nat_packet(struct ip_conntrack *ct, + enum ip_conntrack_info ctinfo, + unsigned int hooknum, + struct sk_buff **pskb) { enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); unsigned long statusbit; @@ -417,12 +422,13 @@ unsigned int nat_packet(struct ip_conntrack *ct, } return NF_ACCEPT; } +EXPORT_SYMBOL_GPL(ip_nat_packet); /* Dir is direction ICMP is coming from (opposite to packet it contains) */ -int icmp_reply_translation(struct sk_buff **pskb, - struct ip_conntrack *ct, - enum ip_nat_manip_type manip, - enum ip_conntrack_dir dir) +int ip_nat_icmp_reply_translation(struct sk_buff **pskb, + struct ip_conntrack *ct, + enum ip_nat_manip_type manip, + enum ip_conntrack_dir dir) { struct { struct icmphdr icmp; @@ -509,6 +515,7 @@ int icmp_reply_translation(struct sk_buff **pskb, return 1; } +EXPORT_SYMBOL_GPL(ip_nat_icmp_reply_translation); /* Protocol registration. */ int ip_nat_protocol_register(struct ip_nat_protocol *proto) @@ -525,6 +532,7 @@ int ip_nat_protocol_register(struct ip_nat_protocol *proto) write_unlock_bh(&ip_nat_lock); return ret; } +EXPORT_SYMBOL(ip_nat_protocol_register); /* Noone stores the protocol anywhere; simply delete it. */ void ip_nat_protocol_unregister(struct ip_nat_protocol *proto) @@ -536,6 +544,7 @@ void ip_nat_protocol_unregister(struct ip_nat_protocol *proto) /* Someone could be still looking at the proto in a bh. */ synchronize_net(); } +EXPORT_SYMBOL(ip_nat_protocol_unregister); #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) @@ -582,7 +591,7 @@ EXPORT_SYMBOL_GPL(ip_nat_port_nfattr_to_range); EXPORT_SYMBOL_GPL(ip_nat_port_range_to_nfattr); #endif -int __init ip_nat_init(void) +static int __init ip_nat_init(void) { size_t i; @@ -624,10 +633,14 @@ static int clean_nat(struct ip_conntrack *i, void *data) return 0; } -/* Not __exit: called from ip_nat_standalone.c:init_or_cleanup() --RR */ -void ip_nat_cleanup(void) +static void __exit ip_nat_cleanup(void) { ip_ct_iterate_cleanup(&clean_nat, NULL); ip_conntrack_destroyed = NULL; vfree(bysource); } + +MODULE_LICENSE("GPL"); + +module_init(ip_nat_init); +module_exit(ip_nat_cleanup); diff --git a/net/ipv4/netfilter/ip_nat_helper.c b/net/ipv4/netfilter/ip_nat_helper.c index d2dd5d313556..5d506e0564d5 100644 --- a/net/ipv4/netfilter/ip_nat_helper.c +++ b/net/ipv4/netfilter/ip_nat_helper.c @@ -199,6 +199,7 @@ ip_nat_mangle_tcp_packet(struct sk_buff **pskb, } return 1; } +EXPORT_SYMBOL(ip_nat_mangle_tcp_packet); /* Generic function for mangling variable-length address changes inside * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX @@ -256,6 +257,7 @@ ip_nat_mangle_udp_packet(struct sk_buff **pskb, return 1; } +EXPORT_SYMBOL(ip_nat_mangle_udp_packet); /* Adjust one found SACK option including checksum correction */ static void @@ -399,6 +401,7 @@ ip_nat_seq_adjust(struct sk_buff **pskb, return 1; } +EXPORT_SYMBOL(ip_nat_seq_adjust); /* Setup NAT on this expected conntrack so it follows master. */ /* If we fail to get a free NAT slot, we'll get dropped on confirm */ @@ -425,3 +428,4 @@ void ip_nat_follow_master(struct ip_conntrack *ct, /* hook doesn't matter, but it has to do destination manip */ ip_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING); } +EXPORT_SYMBOL(ip_nat_follow_master); diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c index 0ff368b131f6..30cd4e18c129 100644 --- a/net/ipv4/netfilter/ip_nat_standalone.c +++ b/net/ipv4/netfilter/ip_nat_standalone.c @@ -108,8 +108,8 @@ ip_nat_fn(unsigned int hooknum, case IP_CT_RELATED: case IP_CT_RELATED+IP_CT_IS_REPLY: if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP) { - if (!icmp_reply_translation(pskb, ct, maniptype, - CTINFO2DIR(ctinfo))) + if (!ip_nat_icmp_reply_translation(pskb, ct, maniptype, + CTINFO2DIR(ctinfo))) return NF_DROP; else return NF_ACCEPT; @@ -152,7 +152,7 @@ ip_nat_fn(unsigned int hooknum, } IP_NF_ASSERT(info); - return nat_packet(ct, ctinfo, hooknum, pskb); + return ip_nat_packet(ct, ctinfo, hooknum, pskb); } static unsigned int @@ -325,15 +325,10 @@ static int init_or_cleanup(int init) printk("ip_nat_init: can't setup rules.\n"); goto cleanup_nothing; } - ret = ip_nat_init(); - if (ret < 0) { - printk("ip_nat_init: can't setup rules.\n"); - goto cleanup_rule_init; - } ret = nf_register_hook(&ip_nat_in_ops); if (ret < 0) { printk("ip_nat_init: can't register in hook.\n"); - goto cleanup_nat; + goto cleanup_rule_init; } ret = nf_register_hook(&ip_nat_out_ops); if (ret < 0) { @@ -374,8 +369,6 @@ static int init_or_cleanup(int init) nf_unregister_hook(&ip_nat_out_ops); cleanup_inops: nf_unregister_hook(&ip_nat_in_ops); - cleanup_nat: - ip_nat_cleanup(); cleanup_rule_init: ip_nat_rule_cleanup(); cleanup_nothing: @@ -395,14 +388,4 @@ static void __exit fini(void) module_init(init); module_exit(fini); -EXPORT_SYMBOL(ip_nat_setup_info); -EXPORT_SYMBOL(ip_nat_protocol_register); -EXPORT_SYMBOL(ip_nat_protocol_unregister); -EXPORT_SYMBOL_GPL(ip_nat_proto_find_get); -EXPORT_SYMBOL_GPL(ip_nat_proto_put); -EXPORT_SYMBOL(ip_nat_cheat_check); -EXPORT_SYMBOL(ip_nat_mangle_tcp_packet); -EXPORT_SYMBOL(ip_nat_mangle_udp_packet); -EXPORT_SYMBOL(ip_nat_used_tuple); -EXPORT_SYMBOL(ip_nat_follow_master); MODULE_LICENSE("GPL"); diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index d54f14d926f6..36339eb39e17 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c @@ -240,8 +240,8 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) pmsg->packet_id = (unsigned long )entry; pmsg->data_len = data_len; - pmsg->timestamp_sec = skb_tv_base.tv_sec + entry->skb->tstamp.off_sec; - pmsg->timestamp_usec = skb_tv_base.tv_usec + entry->skb->tstamp.off_usec; + pmsg->timestamp_sec = entry->skb->tstamp.off_sec; + pmsg->timestamp_usec = entry->skb->tstamp.off_usec; pmsg->mark = entry->skb->nfmark; pmsg->hook = entry->info->hook; pmsg->hw_protocol = entry->skb->protocol; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index eef99a1b5de6..75c27e92f6ab 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -921,8 +922,10 @@ translate_table(const char *name, } /* And one copy for every other CPU */ - for (i = 1; i < num_possible_cpus(); i++) { - memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i, + for_each_cpu(i) { + if (i == 0) + continue; + memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i, newinfo->entries, SMP_ALIGN(newinfo->size)); } @@ -943,7 +946,7 @@ replace_table(struct ipt_table *table, struct ipt_entry *table_base; unsigned int i; - for (i = 0; i < num_possible_cpus(); i++) { + for_each_cpu(i) { table_base = (void *)newinfo->entries + TABLE_OFFSET(newinfo, i); @@ -990,7 +993,7 @@ get_counters(const struct ipt_table_info *t, unsigned int cpu; unsigned int i; - for (cpu = 0; cpu < num_possible_cpus(); cpu++) { + for_each_cpu(cpu) { i = 0; IPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), t->size, @@ -1128,7 +1131,8 @@ do_replace(void __user *user, unsigned int len) return -ENOMEM; newinfo = vmalloc(sizeof(struct ipt_table_info) - + SMP_ALIGN(tmp.size) * num_possible_cpus()); + + SMP_ALIGN(tmp.size) * + (highest_possible_processor_id()+1)); if (!newinfo) return -ENOMEM; @@ -1458,7 +1462,8 @@ int ipt_register_table(struct ipt_table *table, const struct ipt_replace *repl) = { 0, 0, 0, { 0 }, { 0 }, { } }; newinfo = vmalloc(sizeof(struct ipt_table_info) - + SMP_ALIGN(repl->size) * num_possible_cpus()); + + SMP_ALIGN(repl->size) * + (highest_possible_processor_id()+1)); if (!newinfo) return -ENOMEM; diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c index 715cb613405c..5245bfd33d52 100644 --- a/net/ipv4/netfilter/ipt_REDIRECT.c +++ b/net/ipv4/netfilter/ipt_REDIRECT.c @@ -93,7 +93,7 @@ redirect_target(struct sk_buff **pskb, newdst = 0; rcu_read_lock(); - indev = __in_dev_get((*pskb)->dev); + indev = __in_dev_get_rcu((*pskb)->dev); if (indev && (ifa = indev->ifa_list)) newdst = ifa->ifa_local; rcu_read_unlock(); diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c index e2c14f3cb2fc..2883ccd8a91d 100644 --- a/net/ipv4/netfilter/ipt_ULOG.c +++ b/net/ipv4/netfilter/ipt_ULOG.c @@ -225,8 +225,8 @@ static void ipt_ulog_packet(unsigned int hooknum, /* copy hook, prefix, timestamp, payload, etc. */ pm->data_len = copy_len; - pm->timestamp_sec = skb_tv_base.tv_sec + skb->tstamp.off_sec; - pm->timestamp_usec = skb_tv_base.tv_usec + skb->tstamp.off_usec; + pm->timestamp_sec = skb->tstamp.off_sec; + pm->timestamp_usec = skb->tstamp.off_usec; pm->mark = skb->nfmark; pm->hook = hooknum; if (prefix != NULL) diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index f7943ba1f43c..a65e508fbd40 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -90,9 +90,7 @@ fold_field(void *mib[], int offt) unsigned long res = 0; int i; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; + for_each_cpu(i) { res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt); res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt); } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 8549f26e2495..381dd6a6aebb 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2128,7 +2128,7 @@ int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr, struct in_device *in_dev; rcu_read_lock(); - if ((in_dev = __in_dev_get(dev)) != NULL) { + if ((in_dev = __in_dev_get_rcu(dev)) != NULL) { int our = ip_check_mc(in_dev, daddr, saddr, skb->nh.iph->protocol); if (our @@ -2443,7 +2443,9 @@ static int ip_route_output_slow(struct rtable **rp, const struct flowi *oldflp) err = -ENODEV; if (dev_out == NULL) goto out; - if (__in_dev_get(dev_out) == NULL) { + + /* RACE: Check return value of inet_select_addr instead. */ + if (__in_dev_get_rtnl(dev_out) == NULL) { dev_put(dev_out); goto out; /* Wrong error code */ } diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index b940346de4e7..6d80e063c187 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c @@ -136,7 +136,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) else if (cwnd < ca->last_max_cwnd + max_increment*(BICTCP_B-1)) /* slow start */ ca->cnt = (cwnd * (BICTCP_B-1)) - / cwnd-ca->last_max_cwnd; + / (cwnd - ca->last_max_cwnd); else /* linear increase */ ca->cnt = cwnd / max_increment; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a7537c7bbd06..3e98b57578dc 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -355,8 +355,6 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) app_win -= icsk->icsk_ack.rcv_mss; app_win = max(app_win, 2U*tp->advmss); - if (!ofo_win) - tp->window_clamp = min(tp->window_clamp, app_win); tp->rcv_ssthresh = min(tp->window_clamp, 2U*tp->advmss); } } @@ -2241,6 +2239,7 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp, /* Note, it is the only place, where * fast path is recovered for sending TCP. */ + tp->pred_flags = 0; tcp_fast_path_check(sk, tp); if (nwin > tp->max_window) { diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 13dfb391cdf1..c85819d8474b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -130,19 +130,20 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, int dif = sk->sk_bound_dev_if; INET_ADDR_COOKIE(acookie, saddr, daddr) const __u32 ports = INET_COMBINED_PORTS(inet->dport, lport); - const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_hashinfo.ehash_size); - struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash]; + unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport); + struct inet_ehash_bucket *head = inet_ehash_bucket(&tcp_hashinfo, hash); struct sock *sk2; const struct hlist_node *node; struct inet_timewait_sock *tw; + prefetch(head->chain.first); write_lock(&head->lock); /* Check TIME-WAIT sockets first. */ sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) { tw = inet_twsk(sk2); - if (INET_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) { + if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) { const struct tcp_timewait_sock *tcptw = tcp_twsk(sk2); struct tcp_sock *tp = tcp_sk(sk); @@ -179,7 +180,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, /* And established part... */ sk_for_each(sk2, node, &head->chain) { - if (INET_MATCH(sk2, acookie, saddr, daddr, ports, dif)) + if (INET_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) goto not_unique; } @@ -188,7 +189,7 @@ unique: * in hash table socket with a funny identity. */ inet->num = lport; inet->sport = htons(lport); - sk->sk_hashent = hash; + sk->sk_hash = hash; BUG_TRAP(sk_unhashed(sk)); __sk_add_node(sk, &head->chain); sock_prot_inc_use(sk->sk_prot); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 5dd6dd7d091e..b907456a79f4 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -190,7 +190,7 @@ void tcp_select_initial_window(int __space, __u32 mss, } /* Set initial window to value enough for senders, - * following RFC1414. Senders, not following this RFC, + * following RFC2414. Senders, not following this RFC, * will be satisfied with 2. */ if (mss > (1<<*rcv_wscale)) { @@ -435,8 +435,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss int nsize, old_factor; u16 flags; - BUG_ON(len >= skb->len); - + BUG_ON(len > skb->len); nsize = skb_headlen(skb) - len; if (nsize < 0) nsize = 0; @@ -509,7 +508,16 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss tp->lost_out -= diff; tp->left_out -= diff; } + if (diff > 0) { + /* Adjust Reno SACK estimate. */ + if (!tp->rx_opt.sack_ok) { + tp->sacked_out -= diff; + if ((int)tp->sacked_out < 0) + tp->sacked_out = 0; + tcp_sync_left_out(tp); + } + tp->fackets_out -= diff; if ((int)tp->fackets_out < 0) tp->fackets_out = 0; @@ -1601,7 +1609,7 @@ void tcp_send_fin(struct sock *sk) * was unread data in the receive queue. This behavior is recommended * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM */ -void tcp_send_active_reset(struct sock *sk, unsigned int __nocast priority) +void tcp_send_active_reset(struct sock *sk, gfp_t priority) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 2fea3f4402a0..a970b4727ce8 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1806,7 +1806,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) } for (dev = dev_base; dev != NULL; dev = dev->next) { - struct in_device * in_dev = __in_dev_get(dev); + struct in_device * in_dev = __in_dev_get_rtnl(dev); if (in_dev && (dev->flags & IFF_UP)) { struct in_ifaddr * ifa; @@ -3520,6 +3520,8 @@ int __init addrconf_init(void) if (err) return err; + ip6_null_entry.rt6i_idev = in6_dev_get(&loopback_dev); + register_netdevice_notifier(&ipv6_dev_notf); #ifdef CONFIG_IPV6_PRIVACY diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 9b27460f0cc7..40d9a1935ab5 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -66,10 +67,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) alen = esp->auth.icv_trunc_len; tfm = esp->conf.tfm; - blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3; - clen = (clen + 2 + blksize-1)&~(blksize-1); + blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4); + clen = ALIGN(clen + 2, blksize); if (esp->conf.padlen) - clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1); + clen = ALIGN(clen, esp->conf.padlen); if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) { goto error; @@ -133,7 +134,7 @@ static int esp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, stru struct ipv6_esp_hdr *esph; struct esp_data *esp = x->data; struct sk_buff *trailer; - int blksize = crypto_tfm_alg_blocksize(esp->conf.tfm); + int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); int alen = esp->auth.icv_trunc_len; int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen; @@ -235,16 +236,17 @@ out_nofree: static u32 esp6_get_max_size(struct xfrm_state *x, int mtu) { struct esp_data *esp = x->data; - u32 blksize = crypto_tfm_alg_blocksize(esp->conf.tfm); + u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); if (x->props.mode) { - mtu = (mtu + 2 + blksize-1)&~(blksize-1); + mtu = ALIGN(mtu + 2, blksize); } else { /* The worst case. */ - mtu += 2 + blksize; + u32 padsize = ((blksize - 1) & 7) + 1; + mtu = ALIGN(mtu + 2, padsize) + blksize - padsize; } if (esp->conf.padlen) - mtu = (mtu + esp->conf.padlen-1)&~(esp->conf.padlen-1); + mtu = ALIGN(mtu, esp->conf.padlen); return mtu + x->props.header_len + esp->auth.icv_full_len; } diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index b7185fb3377c..23e540365a14 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -700,10 +700,7 @@ int __init icmpv6_init(struct net_proto_family *ops) struct sock *sk; int err, i, j; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; - + for_each_cpu(i) { err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, &per_cpu(__icmpv6_socket, i)); if (err < 0) { @@ -749,9 +746,7 @@ void icmpv6_cleanup(void) { int i; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; + for_each_cpu(i) { sock_release(per_cpu(__icmpv6_socket, i)); } inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index f841bde30c18..bbbe80cdaf72 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -483,7 +483,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) goto done; } fl1 = sfl->fl; - atomic_inc(&fl->users); + atomic_inc(&fl1->users); break; } } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 2f589f24c093..563b442ffab8 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -666,7 +666,7 @@ slow_path: */ fh->nexthdr = nexthdr; fh->reserved = 0; - if (frag_id) { + if (!frag_id) { ipv6_select_ident(skb, fh); frag_id = fh->identification; } else diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 519899fb11d5..39a96c768102 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1393,7 +1393,7 @@ static void mld_sendpack(struct sk_buff *skb) static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel) { - return sizeof(struct mld2_grec) + 4*mld_scount(pmc,type,gdel,sdel); + return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel); } static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 555a31347eda..305d9ee6d7db 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1450,7 +1450,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, static void pndisc_redo(struct sk_buff *skb) { - ndisc_rcv(skb); + ndisc_recv_ns(skb); kfree_skb(skb); } diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index 216fbe1ac65c..bb7ccfe33f23 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig @@ -209,6 +209,17 @@ config IP6_NF_TARGET_REJECT To compile it as a module, choose M here. If unsure, say N. +config IP6_NF_TARGET_NFQUEUE + tristate "NFQUEUE Target Support" + depends on IP_NF_IPTABLES + help + This Target replaced the old obsolete QUEUE target. + + As opposed to QUEUE, it supports 65535 different queues, + not just one. + + To compile it as a module, choose M here. If unsure, say N. + # if [ "$CONFIG_IP6_NF_FILTER" != "n" ]; then # dep_tristate ' REJECT target support' CONFIG_IP6_NF_TARGET_REJECT $CONFIG_IP6_NF_FILTER # if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile index bd9a16a5cbba..2b2c370e8b1c 100644 --- a/net/ipv6/netfilter/Makefile +++ b/net/ipv6/netfilter/Makefile @@ -21,9 +21,9 @@ obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o obj-$(CONFIG_IP6_NF_TARGET_MARK) += ip6t_MARK.o obj-$(CONFIG_IP6_NF_TARGET_HL) += ip6t_HL.o +obj-$(CONFIG_IP6_NF_TARGET_NFQUEUE) += ip6t_NFQUEUE.o obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o -obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += ip6t_NFQUEUE.o diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index aa11cf366efa..5027bbe6415e 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c @@ -238,8 +238,8 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) pmsg->packet_id = (unsigned long )entry; pmsg->data_len = data_len; - pmsg->timestamp_sec = skb_tv_base.tv_sec + entry->skb->tstamp.off_sec; - pmsg->timestamp_usec = skb_tv_base.tv_usec + entry->skb->tstamp.off_usec; + pmsg->timestamp_sec = entry->skb->tstamp.off_sec; + pmsg->timestamp_usec = entry->skb->tstamp.off_usec; pmsg->mark = entry->skb->nfmark; pmsg->hook = entry->info->hook; pmsg->hw_protocol = entry->skb->protocol; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 2da514b16d95..21deec25a12b 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -28,6 +28,7 @@ #include #include #include +#include #include @@ -950,8 +951,10 @@ translate_table(const char *name, } /* And one copy for every other CPU */ - for (i = 1; i < num_possible_cpus(); i++) { - memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i, + for_each_cpu(i) { + if (i == 0) + continue; + memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i, newinfo->entries, SMP_ALIGN(newinfo->size)); } @@ -972,7 +975,7 @@ replace_table(struct ip6t_table *table, struct ip6t_entry *table_base; unsigned int i; - for (i = 0; i < num_possible_cpus(); i++) { + for_each_cpu(i) { table_base = (void *)newinfo->entries + TABLE_OFFSET(newinfo, i); @@ -1019,7 +1022,7 @@ get_counters(const struct ip6t_table_info *t, unsigned int cpu; unsigned int i; - for (cpu = 0; cpu < num_possible_cpus(); cpu++) { + for_each_cpu(cpu) { i = 0; IP6T_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), t->size, @@ -1153,7 +1156,8 @@ do_replace(void __user *user, unsigned int len) return -ENOMEM; newinfo = vmalloc(sizeof(struct ip6t_table_info) - + SMP_ALIGN(tmp.size) * num_possible_cpus()); + + SMP_ALIGN(tmp.size) * + (highest_possible_processor_id()+1)); if (!newinfo) return -ENOMEM; @@ -1467,7 +1471,8 @@ int ip6t_register_table(struct ip6t_table *table, = { 0, 0, 0, { 0 }, { 0 }, { } }; newinfo = vmalloc(sizeof(struct ip6t_table_info) - + SMP_ALIGN(repl->size) * num_possible_cpus()); + + SMP_ALIGN(repl->size) * + (highest_possible_processor_id()+1)); if (!newinfo) return -ENOMEM; diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 334a5967831e..50a13e75d70e 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -140,9 +140,7 @@ fold_field(void *mib[], int offt) unsigned long res = 0; int i; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; + for_each_cpu(i) { res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt); res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt); } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 80643e6b346b..d693cb988b78 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -209,9 +209,11 @@ static __inline__ void __tcp_v6_hash(struct sock *sk) lock = &tcp_hashinfo.lhash_lock; inet_listen_wlock(&tcp_hashinfo); } else { - sk->sk_hashent = inet6_sk_ehashfn(sk, tcp_hashinfo.ehash_size); - list = &tcp_hashinfo.ehash[sk->sk_hashent].chain; - lock = &tcp_hashinfo.ehash[sk->sk_hashent].lock; + unsigned int hash; + sk->sk_hash = hash = inet6_sk_ehashfn(sk); + hash &= (tcp_hashinfo.ehash_size - 1); + list = &tcp_hashinfo.ehash[hash].chain; + lock = &tcp_hashinfo.ehash[hash].lock; write_lock(lock); } @@ -322,13 +324,13 @@ static int __tcp_v6_check_established(struct sock *sk, const __u16 lport, const struct in6_addr *saddr = &np->daddr; const int dif = sk->sk_bound_dev_if; const u32 ports = INET_COMBINED_PORTS(inet->dport, lport); - const int hash = inet6_ehashfn(daddr, inet->num, saddr, inet->dport, - tcp_hashinfo.ehash_size); - struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash]; + unsigned int hash = inet6_ehashfn(daddr, inet->num, saddr, inet->dport); + struct inet_ehash_bucket *head = inet_ehash_bucket(&tcp_hashinfo, hash); struct sock *sk2; const struct hlist_node *node; struct inet_timewait_sock *tw; + prefetch(head->chain.first); write_lock(&head->lock); /* Check TIME-WAIT sockets first. */ @@ -365,14 +367,14 @@ static int __tcp_v6_check_established(struct sock *sk, const __u16 lport, /* And established part... */ sk_for_each(sk2, node, &head->chain) { - if (INET6_MATCH(sk2, saddr, daddr, ports, dif)) + if (INET6_MATCH(sk2, hash, saddr, daddr, ports, dif)) goto not_unique; } unique: BUG_TRAP(sk_unhashed(sk)); __sk_add_node(sk, &head->chain); - sk->sk_hashent = hash; + sk->sk_hash = hash; sock_prot_inc_use(sk->sk_prot); write_unlock(&head->lock); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 6001948600f3..bf9519341fd3 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -99,7 +99,7 @@ static int udp_v6_get_port(struct sock *sk, unsigned short snum) next:; } result = best; - for(;; result += UDP_HTABLE_SIZE) { + for(i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++, result += UDP_HTABLE_SIZE) { if (result > sysctl_local_port_range[1]) result = sysctl_local_port_range[0] + ((result - sysctl_local_port_range[0]) & @@ -107,6 +107,8 @@ static int udp_v6_get_port(struct sock *sk, unsigned short snum) if (!udp_lport_inuse(result)) break; } + if (i >= (1 << 16) / UDP_HTABLE_SIZE) + goto fail; gotit: udp_port_rover = snum = result; } else { @@ -852,10 +854,16 @@ do_append_data: else if (!corkreq) err = udp_v6_push_pending_frames(sk, up); - if (dst && connected) - ip6_dst_store(sk, dst, - ipv6_addr_equal(&fl->fl6_dst, &np->daddr) ? - &np->daddr : NULL); + if (dst) { + if (connected) { + ip6_dst_store(sk, dst, + ipv6_addr_equal(&fl->fl6_dst, &np->daddr) ? + &np->daddr : NULL); + } else { + dst_release(dst); + } + } + if (err > 0) err = np->recverr ? net_xmit_errno(err) : 0; release_sock(sk); diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c index 071cd2cefd8a..953e255d2bc8 100644 --- a/net/irda/irlan/irlan_eth.c +++ b/net/irda/irlan/irlan_eth.c @@ -310,7 +310,7 @@ void irlan_eth_send_gratuitous_arp(struct net_device *dev) #ifdef CONFIG_INET IRDA_DEBUG(4, "IrLAN: Sending gratuitous ARP\n"); rcu_read_lock(); - in_dev = __in_dev_get(dev); + in_dev = __in_dev_get_rcu(dev); if (in_dev == NULL) goto out; if (in_dev->ifa_list) diff --git a/net/irda/irttp.c b/net/irda/irttp.c index 6602d901f8b1..8aff254cb418 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c @@ -38,7 +38,7 @@ #include #include -static struct irttp_cb *irttp = NULL; +static struct irttp_cb *irttp; static void __irttp_close_tsap(struct tsap_cb *self); @@ -86,12 +86,9 @@ static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 }; */ int __init irttp_init(void) { - /* Initialize the irttp structure. */ - if (irttp == NULL) { - irttp = kmalloc(sizeof(struct irttp_cb), GFP_KERNEL); - if (irttp == NULL) - return -ENOMEM; - } + irttp = kmalloc(sizeof(struct irttp_cb), GFP_KERNEL); + if (irttp == NULL) + return -ENOMEM; memset(irttp, 0, sizeof(struct irttp_cb)); irttp->magic = TTP_MAGIC; @@ -100,6 +97,7 @@ int __init irttp_init(void) if (!irttp->tsaps) { IRDA_ERROR("%s: can't allocate IrTTP hashbin!\n", __FUNCTION__); + kfree(irttp); return -ENOMEM; } @@ -115,7 +113,6 @@ int __init irttp_init(void) void __exit irttp_cleanup(void) { /* Check for main structure */ - IRDA_ASSERT(irttp != NULL, return;); IRDA_ASSERT(irttp->magic == TTP_MAGIC, return;); /* @@ -382,7 +379,6 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify) struct lsap_cb *lsap; notify_t ttp_notify; - IRDA_ASSERT(irttp != NULL, return NULL;); IRDA_ASSERT(irttp->magic == TTP_MAGIC, return NULL;); /* The IrLMP spec (IrLMP 1.1 p10) says that we have the right to @@ -1880,8 +1876,6 @@ static int irttp_seq_open(struct inode *inode, struct file *file) struct seq_file *seq; int rc = -ENOMEM; struct irttp_iter_state *s; - - IRDA_ASSERT(irttp != NULL, return -EINVAL;); s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) diff --git a/net/key/af_key.c b/net/key/af_key.c index 4879743b945a..39031684b65c 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -185,7 +185,7 @@ static int pfkey_release(struct socket *sock) } static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, - int allocation, struct sock *sk) + gfp_t allocation, struct sock *sk) { int err = -ENOBUFS; @@ -217,7 +217,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, #define BROADCAST_ONE 1 #define BROADCAST_REGISTERED 2 #define BROADCAST_PROMISC_ONLY 4 -static int pfkey_broadcast(struct sk_buff *skb, int allocation, +static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, int broadcast_flags, struct sock *one_sk) { struct sock *sk; @@ -1416,7 +1416,8 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, return 0; } -static struct sk_buff *compose_sadb_supported(struct sadb_msg *orig, int allocation) +static struct sk_buff *compose_sadb_supported(struct sadb_msg *orig, + gfp_t allocation) { struct sk_buff *skb; struct sadb_msg *hdr; @@ -2153,6 +2154,7 @@ out: static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) { + unsigned int dir; int err; struct sadb_x_policy *pol; struct xfrm_policy *xp; @@ -2161,7 +2163,11 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h if ((pol = ext_hdrs[SADB_X_EXT_POLICY-1]) == NULL) return -EINVAL; - xp = xfrm_policy_byid(0, pol->sadb_x_policy_id, + dir = xfrm_policy_id2dir(pol->sadb_x_policy_id); + if (dir >= XFRM_POLICY_MAX) + return -EINVAL; + + xp = xfrm_policy_byid(dir, pol->sadb_x_policy_id, hdr->sadb_msg_type == SADB_X_SPDDELETE2); if (xp == NULL) return -ENOENT; @@ -2173,9 +2179,9 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h if (hdr->sadb_msg_type == SADB_X_SPDDELETE2) { c.data.byid = 1; c.event = XFRM_MSG_DELPOLICY; - km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c); + km_policy_notify(xp, dir, &c); } else { - err = key_pol_get_resp(sk, xp, hdr, pol->sadb_x_policy_dir-1); + err = key_pol_get_resp(sk, xp, hdr, dir); } xfrm_pol_put(xp); diff --git a/net/llc/Makefile b/net/llc/Makefile index 5ebd4ed2bd42..4e260cff3c5d 100644 --- a/net/llc/Makefile +++ b/net/llc/Makefile @@ -22,3 +22,4 @@ llc2-y := llc_if.o llc_c_ev.o llc_c_ac.o llc_conn.o llc_c_st.o llc_pdu.o \ llc_sap.o llc_s_ac.o llc_s_ev.o llc_s_st.o af_llc.o llc_station.o llc2-$(CONFIG_PROC_FS) += llc_proc.o +llc2-$(CONFIG_SYSCTL) += sysctl_net_llc.o diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 66f55e514b56..59d02cbbeb9e 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -21,6 +21,7 @@ * See the GNU General Public License for more details. */ #include +#include #include #include #include @@ -37,10 +38,9 @@ static u16 llc_ui_sap_link_no_max[256]; static struct sockaddr_llc llc_ui_addrnull; static struct proto_ops llc_ui_ops; -static int llc_ui_wait_for_conn(struct sock *sk, int timeout); -static int llc_ui_wait_for_disc(struct sock *sk, int timeout); -static int llc_ui_wait_for_data(struct sock *sk, int timeout); -static int llc_ui_wait_for_busy_core(struct sock *sk, int timeout); +static int llc_ui_wait_for_conn(struct sock *sk, long timeout); +static int llc_ui_wait_for_disc(struct sock *sk, long timeout); +static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout); #if 0 #define dprintk(args...) printk(KERN_DEBUG args) @@ -116,12 +116,12 @@ static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock) struct llc_sock* llc = llc_sk(sk); int rc = 0; - if (llc_data_accept_state(llc->state) || llc->p_flag) { - int timeout = sock_sndtimeo(sk, noblock); + if (unlikely(llc_data_accept_state(llc->state) || llc->p_flag)) { + long timeout = sock_sndtimeo(sk, noblock); rc = llc_ui_wait_for_busy_core(sk, timeout); } - if (!rc) + if (unlikely(!rc)) rc = llc_build_and_send_pkt(sk, skb); return rc; } @@ -155,7 +155,7 @@ static int llc_ui_create(struct socket *sock, int protocol) struct sock *sk; int rc = -ESOCKTNOSUPPORT; - if (sock->type == SOCK_DGRAM || sock->type == SOCK_STREAM) { + if (likely(sock->type == SOCK_DGRAM || sock->type == SOCK_STREAM)) { rc = -ENOMEM; sk = llc_sk_alloc(PF_LLC, GFP_KERNEL, &llc_proto); if (sk) { @@ -177,7 +177,7 @@ static int llc_ui_release(struct socket *sock) struct sock *sk = sock->sk; struct llc_sock *llc; - if (!sk) + if (unlikely(sk == NULL)) goto out; sock_hold(sk); lock_sock(sk); @@ -189,10 +189,6 @@ static int llc_ui_release(struct socket *sock) if (!sock_flag(sk, SOCK_ZAPPED)) llc_sap_remove_socket(llc->sap, sk); release_sock(sk); - if (llc->sap && hlist_empty(&llc->sap->sk_list.list)) { - llc_release_sockets(llc->sap); - llc_sap_close(llc->sap); - } if (llc->dev) dev_put(llc->dev); sock_put(sk); @@ -221,6 +217,7 @@ static int llc_ui_autoport(void) llc_ui_sap_last_autoport = i + 2; goto out; } + llc_sap_put(sap); } llc_ui_sap_last_autoport = LLC_SAP_DYN_START; tries++; @@ -231,20 +228,13 @@ out: } /** - * llc_ui_autobind - Bind a socket to a specific address. - * @sk: Socket to bind an address to. - * @addr: Address the user wants the socket bound to. + * llc_ui_autobind - automatically bind a socket to a sap + * @sock: socket to bind + * @addr: address to connect to + * + * Used by llc_ui_connect and llc_ui_sendmsg when the user hasn't + * specifically used llc_ui_bind to bind to an specific address/sap * - * Bind a socket to a specific address. For llc a user is able to bind to - * a specific sap only or mac + sap. If the user only specifies a sap and - * a null dmac (all zeros) the user is attempting to bind to an entire - * sap. This will stop anyone else on the local system from using that - * sap. If someone else has a mac + sap open the bind to null + sap will - * fail. - * If the user desires to bind to a specific mac + sap, it is possible to - * have multiple sap connections via multiple macs. - * Bind and autobind for that matter must enforce the correct sap usage - * otherwise all hell will break loose. * Returns: 0 upon success, negative otherwise. */ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr) @@ -285,11 +275,7 @@ out: * @addrlen: Length of the uaddr structure. * * Bind a socket to a specific address. For llc a user is able to bind to - * a specific sap only or mac + sap. If the user only specifies a sap and - * a null dmac (all zeros) the user is attempting to bind to an entire - * sap. This will stop anyone else on the local system from using that - * sap. If someone else has a mac + sap open the bind to null + sap will - * fail. + * a specific sap only or mac + sap. * If the user desires to bind to a specific mac + sap, it is possible to * have multiple sap connections via multiple macs. * Bind and autobind for that matter must enforce the correct sap usage @@ -305,10 +291,16 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) int rc = -EINVAL; dprintk("%s: binding %02X\n", __FUNCTION__, addr->sllc_sap); - if (!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)) + if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) goto out; rc = -EAFNOSUPPORT; - if (addr->sllc_family != AF_LLC) + if (unlikely(addr->sllc_family != AF_LLC)) + goto out; + rc = -ENODEV; + rtnl_lock(); + llc->dev = dev_getbyhwaddr(addr->sllc_arphrd, addr->sllc_mac); + rtnl_unlock(); + if (!llc->dev) goto out; if (!addr->sllc_sap) { rc = -EUSERS; @@ -322,6 +314,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) rc = -EBUSY; /* some other network layer is using the sap */ if (!sap) goto out; + llc_sap_hold(sap); } else { struct llc_addr laddr, daddr; struct sock *ask; @@ -338,7 +331,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) ask = llc_lookup_established(sap, &daddr, &laddr); if (ask) { sock_put(ask); - goto out; + goto out_put; } } llc->laddr.lsap = addr->sllc_sap; @@ -348,6 +341,8 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) llc_sap_add_socket(sap, sk); sock_reset_flag(sk, SOCK_ZAPPED); rc = 0; +out_put: + llc_sap_put(sap); out: return rc; } @@ -369,7 +364,7 @@ static int llc_ui_shutdown(struct socket *sock, int how) int rc = -ENOTCONN; lock_sock(sk); - if (sk->sk_state != TCP_ESTABLISHED) + if (unlikely(sk->sk_state != TCP_ESTABLISHED)) goto out; rc = -EINVAL; if (how != 2) @@ -404,14 +399,18 @@ static int llc_ui_connect(struct socket *sock, struct sockaddr *uaddr, struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); struct sockaddr_llc *addr = (struct sockaddr_llc *)uaddr; - struct net_device *dev; int rc = -EINVAL; lock_sock(sk); - if (addrlen != sizeof(*addr)) + if (unlikely(addrlen != sizeof(*addr))) goto out; rc = -EAFNOSUPPORT; - if (addr->sllc_family != AF_LLC) + if (unlikely(addr->sllc_family != AF_LLC)) + goto out; + if (unlikely(sk->sk_type != SOCK_STREAM)) + goto out; + rc = -EALREADY; + if (unlikely(sock->state == SS_CONNECTING)) goto out; /* bind connection to sap if user hasn't done it. */ if (sock_flag(sk, SOCK_ZAPPED)) { @@ -419,19 +418,13 @@ static int llc_ui_connect(struct socket *sock, struct sockaddr *uaddr, rc = llc_ui_autobind(sock, addr); if (rc) goto out; - llc->daddr.lsap = addr->sllc_sap; - memcpy(llc->daddr.mac, addr->sllc_mac, IFHWADDRLEN); } - dev = llc->dev; - if (sk->sk_type != SOCK_STREAM) - goto out; - rc = -EALREADY; - if (sock->state == SS_CONNECTING) - goto out; + llc->daddr.lsap = addr->sllc_sap; + memcpy(llc->daddr.mac, addr->sllc_mac, IFHWADDRLEN); sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; llc->link = llc_ui_next_link_no(llc->sap->laddr.lsap); - rc = llc_establish_connection(sk, dev->dev_addr, + rc = llc_establish_connection(sk, llc->dev->dev_addr, addr->sllc_mac, addr->sllc_sap); if (rc) { dprintk("%s: llc_ui_send_conn failed :-(\n", __FUNCTION__); @@ -439,12 +432,30 @@ static int llc_ui_connect(struct socket *sock, struct sockaddr *uaddr, sk->sk_state = TCP_CLOSE; goto out; } - rc = llc_ui_wait_for_conn(sk, sk->sk_rcvtimeo); - if (rc) - dprintk("%s: llc_ui_wait_for_conn failed=%d\n", __FUNCTION__, rc); + + if (sk->sk_state == TCP_SYN_SENT) { + const long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); + + if (!timeo || !llc_ui_wait_for_conn(sk, timeo)) + goto out; + + rc = sock_intr_errno(timeo); + if (signal_pending(current)) + goto out; + } + + if (sk->sk_state == TCP_CLOSE) + goto sock_error; + + sock->state = SS_CONNECTED; + rc = 0; out: release_sock(sk); return rc; +sock_error: + rc = sock_error(sk) ? : -ECONNABORTED; + sock->state = SS_UNCONNECTED; + goto out; } /** @@ -461,10 +472,10 @@ static int llc_ui_listen(struct socket *sock, int backlog) int rc = -EINVAL; lock_sock(sk); - if (sock->state != SS_UNCONNECTED) + if (unlikely(sock->state != SS_UNCONNECTED)) goto out; rc = -EOPNOTSUPP; - if (sk->sk_type != SOCK_STREAM) + if (unlikely(sk->sk_type != SOCK_STREAM)) goto out; rc = -EAGAIN; if (sock_flag(sk, SOCK_ZAPPED)) @@ -483,86 +494,14 @@ out: return rc; } -static int llc_ui_wait_for_disc(struct sock *sk, int timeout) +static int llc_ui_wait_for_disc(struct sock *sk, long timeout) { - DECLARE_WAITQUEUE(wait, current); - int rc; - - add_wait_queue_exclusive(sk->sk_sleep, &wait); - for (;;) { - __set_current_state(TASK_INTERRUPTIBLE); - rc = 0; - if (sk->sk_state != TCP_CLOSE) { - release_sock(sk); - timeout = schedule_timeout(timeout); - lock_sock(sk); - } else - break; - rc = -ERESTARTSYS; - if (signal_pending(current)) - break; - rc = -EAGAIN; - if (!timeout) - break; - } - __set_current_state(TASK_RUNNING); - remove_wait_queue(sk->sk_sleep, &wait); - return rc; -} - -static int llc_ui_wait_for_conn(struct sock *sk, int timeout) -{ - DECLARE_WAITQUEUE(wait, current); - int rc; - - add_wait_queue_exclusive(sk->sk_sleep, &wait); - for (;;) { - __set_current_state(TASK_INTERRUPTIBLE); - rc = -EAGAIN; - if (sk->sk_state == TCP_CLOSE) - break; - rc = 0; - if (sk->sk_state != TCP_ESTABLISHED) { - release_sock(sk); - timeout = schedule_timeout(timeout); - lock_sock(sk); - } else - break; - rc = -ERESTARTSYS; - if (signal_pending(current)) - break; - rc = -EAGAIN; - if (!timeout) - break; - } - __set_current_state(TASK_RUNNING); - remove_wait_queue(sk->sk_sleep, &wait); - return rc; -} - -static int llc_ui_wait_for_data(struct sock *sk, int timeout) -{ - DECLARE_WAITQUEUE(wait, current); + DEFINE_WAIT(wait); int rc = 0; - add_wait_queue_exclusive(sk->sk_sleep, &wait); - for (;;) { - __set_current_state(TASK_INTERRUPTIBLE); - if (sk->sk_shutdown & RCV_SHUTDOWN) - break; - /* - * Well, if we have backlog, try to process it now. - */ - if (sk->sk_backlog.tail) { - release_sock(sk); - lock_sock(sk); - } - rc = 0; - if (skb_queue_empty(&sk->sk_receive_queue)) { - release_sock(sk); - timeout = schedule_timeout(timeout); - lock_sock(sk); - } else + while (1) { + prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); + if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE)) break; rc = -ERESTARTSYS; if (signal_pending(current)) @@ -570,31 +509,40 @@ static int llc_ui_wait_for_data(struct sock *sk, int timeout) rc = -EAGAIN; if (!timeout) break; + rc = 0; } - __set_current_state(TASK_RUNNING); - remove_wait_queue(sk->sk_sleep, &wait); + finish_wait(sk->sk_sleep, &wait); return rc; } -static int llc_ui_wait_for_busy_core(struct sock *sk, int timeout) +static int llc_ui_wait_for_conn(struct sock *sk, long timeout) { - DECLARE_WAITQUEUE(wait, current); + DEFINE_WAIT(wait); + + while (1) { + prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); + if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT)) + break; + if (signal_pending(current) || !timeout) + break; + } + finish_wait(sk->sk_sleep, &wait); + return timeout; +} + +static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout) +{ + DEFINE_WAIT(wait); struct llc_sock *llc = llc_sk(sk); int rc; - add_wait_queue_exclusive(sk->sk_sleep, &wait); - for (;;) { - dprintk("%s: looping...\n", __FUNCTION__); - __set_current_state(TASK_INTERRUPTIBLE); - rc = -ENOTCONN; - if (sk->sk_shutdown & RCV_SHUTDOWN) - break; + while (1) { + prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); rc = 0; - if (llc_data_accept_state(llc->state) || llc->p_flag) { - release_sock(sk); - timeout = schedule_timeout(timeout); - lock_sock(sk); - } else + if (sk_wait_event(sk, &timeout, + (sk->sk_shutdown & RCV_SHUTDOWN) || + (!llc_data_accept_state(llc->state) && + !llc->p_flag))) break; rc = -ERESTARTSYS; if (signal_pending(current)) @@ -603,8 +551,35 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, int timeout) if (!timeout) break; } - __set_current_state(TASK_RUNNING); - remove_wait_queue(sk->sk_sleep, &wait); + finish_wait(sk->sk_sleep, &wait); + return rc; +} + +static int llc_wait_data(struct sock *sk, long timeo) +{ + int rc; + + while (1) { + /* + * POSIX 1003.1g mandates this order. + */ + if (sk->sk_err) { + rc = sock_error(sk); + break; + } + rc = 0; + if (sk->sk_shutdown & RCV_SHUTDOWN) + break; + rc = -EAGAIN; + if (!timeo) + break; + rc = sock_intr_errno(timeo); + if (signal_pending(current)) + break; + rc = 0; + if (sk_wait_data(sk, &timeo)) + break; + } return rc; } @@ -627,15 +602,18 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags) dprintk("%s: accepting on %02X\n", __FUNCTION__, llc_sk(sk)->laddr.lsap); lock_sock(sk); - if (sk->sk_type != SOCK_STREAM) + if (unlikely(sk->sk_type != SOCK_STREAM)) goto out; rc = -EINVAL; - if (sock->state != SS_UNCONNECTED || sk->sk_state != TCP_LISTEN) + if (unlikely(sock->state != SS_UNCONNECTED || + sk->sk_state != TCP_LISTEN)) goto out; /* wait for a connection to arrive. */ - rc = llc_ui_wait_for_data(sk, sk->sk_rcvtimeo); - if (rc) - goto out; + if (skb_queue_empty(&sk->sk_receive_queue)) { + rc = llc_wait_data(sk, sk->sk_rcvtimeo); + if (rc) + goto out; + } dprintk("%s: got a new connection on %02X\n", __FUNCTION__, llc_sk(sk)->laddr.lsap); skb = skb_dequeue(&sk->sk_receive_queue); @@ -657,7 +635,6 @@ static int llc_ui_accept(struct socket *sock, struct socket *newsock, int flags) /* put original socket back into a clean listen state. */ sk->sk_state = TCP_LISTEN; sk->sk_ack_backlog--; - skb->sk = NULL; dprintk("%s: ok success on %02X, client on %02X\n", __FUNCTION__, llc_sk(sk)->addr.sllc_sap, newllc->daddr.lsap); frees: @@ -671,56 +648,167 @@ out: * llc_ui_recvmsg - copy received data to the socket user. * @sock: Socket to copy data from. * @msg: Various user space related information. - * @size: Size of user buffer. + * @len: Size of user buffer. * @flags: User specified flags. * * Copy received data to the socket user. * Returns non-negative upon success, negative otherwise. */ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t size, int flags) + struct msghdr *msg, size_t len, int flags) { - struct sock *sk = sock->sk; struct sockaddr_llc *uaddr = (struct sockaddr_llc *)msg->msg_name; - struct sk_buff *skb; + const int nonblock = flags & MSG_DONTWAIT; + struct sk_buff *skb = NULL; + struct sock *sk = sock->sk; + struct llc_sock *llc = llc_sk(sk); size_t copied = 0; - int rc = -ENOMEM, timeout; - int noblock = flags & MSG_DONTWAIT; + u32 peek_seq = 0; + u32 *seq; + unsigned long used; + int target; /* Read at least this many bytes */ + long timeo; - dprintk("%s: receiving in %02X from %02X\n", __FUNCTION__, - llc_sk(sk)->laddr.lsap, llc_sk(sk)->daddr.lsap); lock_sock(sk); - timeout = sock_rcvtimeo(sk, noblock); - rc = llc_ui_wait_for_data(sk, timeout); - if (rc) { - dprintk("%s: llc_ui_wait_for_data failed recv " - "in %02X from %02X\n", __FUNCTION__, - llc_sk(sk)->laddr.lsap, llc_sk(sk)->daddr.lsap); + copied = -ENOTCONN; + if (sk->sk_state == TCP_LISTEN) goto out; - } - skb = skb_dequeue(&sk->sk_receive_queue); - if (!skb) /* shutdown */ - goto out; - copied = skb->len; - if (copied > size) - copied = size; - rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); - if (rc) - goto dgram_free; - if (skb->len > copied) { - skb_pull(skb, copied); - skb_queue_head(&sk->sk_receive_queue, skb); - } - if (uaddr) - memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr)); - msg->msg_namelen = sizeof(*uaddr); - if (!skb->next) { -dgram_free: - kfree_skb(skb); - } + + timeo = sock_rcvtimeo(sk, nonblock); + + seq = &llc->copied_seq; + if (flags & MSG_PEEK) { + peek_seq = llc->copied_seq; + seq = &peek_seq; + } + + target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); + copied = 0; + + do { + u32 offset; + + /* + * We need to check signals first, to get correct SIGURG + * handling. FIXME: Need to check this doesn't impact 1003.1g + * and move it down to the bottom of the loop + */ + if (signal_pending(current)) { + if (copied) + break; + copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; + break; + } + + /* Next get a buffer. */ + + skb = skb_peek(&sk->sk_receive_queue); + if (skb) { + offset = *seq; + goto found_ok_skb; + } + /* Well, if we have backlog, try to process it now yet. */ + + if (copied >= target && !sk->sk_backlog.tail) + break; + + if (copied) { + if (sk->sk_err || + sk->sk_state == TCP_CLOSE || + (sk->sk_shutdown & RCV_SHUTDOWN) || + !timeo || + (flags & MSG_PEEK)) + break; + } else { + if (sock_flag(sk, SOCK_DONE)) + break; + + if (sk->sk_err) { + copied = sock_error(sk); + break; + } + if (sk->sk_shutdown & RCV_SHUTDOWN) + break; + + if (sk->sk_state == TCP_CLOSE) { + if (!sock_flag(sk, SOCK_DONE)) { + /* + * This occurs when user tries to read + * from never connected socket. + */ + copied = -ENOTCONN; + break; + } + break; + } + if (!timeo) { + copied = -EAGAIN; + break; + } + } + + if (copied >= target) { /* Do not sleep, just process backlog. */ + release_sock(sk); + lock_sock(sk); + } else + sk_wait_data(sk, &timeo); + + if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) { + if (net_ratelimit()) + printk(KERN_DEBUG "LLC(%s:%d): Application " + "bug, race in MSG_PEEK.\n", + current->comm, current->pid); + peek_seq = llc->copied_seq; + } + continue; + found_ok_skb: + /* Ok so how much can we use? */ + used = skb->len - offset; + if (len < used) + used = len; + + if (!(flags & MSG_TRUNC)) { + int rc = skb_copy_datagram_iovec(skb, offset, + msg->msg_iov, used); + if (rc) { + /* Exception. Bailout! */ + if (!copied) + copied = -EFAULT; + break; + } + } + + *seq += used; + copied += used; + len -= used; + + if (used + offset < skb->len) + continue; + + if (!(flags & MSG_PEEK)) { + sk_eat_skb(sk, skb); + *seq = 0; + } + } while (len > 0); + + /* + * According to UNIX98, msg_name/msg_namelen are ignored + * on connected socket. -ANK + * But... af_llc still doesn't have separate sets of methods for + * SOCK_DGRAM and SOCK_STREAM :-( So we have to do this test, will + * eventually fix this tho :-) -acme + */ + if (sk->sk_type == SOCK_DGRAM) + goto copy_uaddr; out: release_sock(sk); - return rc ? : copied; + return copied; +copy_uaddr: + if (uaddr != NULL && skb != NULL) { + memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr)); + msg->msg_namelen = sizeof(*uaddr); + } + goto out; } /** @@ -740,7 +828,6 @@ static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock, struct sockaddr_llc *addr = (struct sockaddr_llc *)msg->msg_name; int flags = msg->msg_flags; int noblock = flags & MSG_DONTWAIT; - struct net_device *dev; struct sk_buff *skb; size_t size = 0; int rc = -EINVAL, copied = 0, hdrlen; @@ -763,19 +850,17 @@ static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock, if (rc) goto release; } - dev = llc->dev; - hdrlen = dev->hard_header_len + llc_ui_header_len(sk, addr); + hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr); size = hdrlen + len; - if (size > dev->mtu) - size = dev->mtu; + if (size > llc->dev->mtu) + size = llc->dev->mtu; copied = size - hdrlen; release_sock(sk); skb = sock_alloc_send_skb(sk, size, noblock, &rc); lock_sock(sk); if (!skb) goto release; - skb->sk = sk; - skb->dev = dev; + skb->dev = llc->dev; skb->protocol = llc_proto_type(addr->sllc_arphrd); skb_reserve(skb, hdrlen); rc = memcpy_fromiovec(skb_put(skb, copied), msg->msg_iov, copied); @@ -800,15 +885,13 @@ static int llc_ui_sendmsg(struct kiocb *iocb, struct socket *sock, if (!(sk->sk_type == SOCK_STREAM && !addr->sllc_ua)) goto out; rc = llc_ui_send_data(sk, skb, noblock); - if (rc) - dprintk("%s: llc_ui_send_data failed: %d\n", __FUNCTION__, rc); out: - if (rc) + if (rc) { kfree_skb(skb); release: - if (rc) dprintk("%s: failed sending from %02X to %02X: %d\n", __FUNCTION__, llc->laddr.lsap, llc->daddr.lsap, rc); + } release_sock(sk); return rc ? : copied; } @@ -895,7 +978,7 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname, int rc = -EINVAL, opt; lock_sock(sk); - if (level != SOL_LLC || optlen != sizeof(int)) + if (unlikely(level != SOL_LLC || optlen != sizeof(int))) goto out; rc = get_user(opt, (int __user *)optval); if (rc) @@ -915,22 +998,22 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname, case LLC_OPT_ACK_TMR_EXP: if (opt > LLC_OPT_MAX_ACK_TMR_EXP) goto out; - llc->ack_timer.expire = opt; + llc->ack_timer.expire = opt * HZ; break; case LLC_OPT_P_TMR_EXP: if (opt > LLC_OPT_MAX_P_TMR_EXP) goto out; - llc->pf_cycle_timer.expire = opt; + llc->pf_cycle_timer.expire = opt * HZ; break; case LLC_OPT_REJ_TMR_EXP: if (opt > LLC_OPT_MAX_REJ_TMR_EXP) goto out; - llc->rej_sent_timer.expire = opt; + llc->rej_sent_timer.expire = opt * HZ; break; case LLC_OPT_BUSY_TMR_EXP: if (opt > LLC_OPT_MAX_BUSY_TMR_EXP) goto out; - llc->busy_state_timer.expire = opt; + llc->busy_state_timer.expire = opt * HZ; break; case LLC_OPT_TX_WIN: if (opt > LLC_OPT_MAX_WIN) @@ -970,7 +1053,7 @@ static int llc_ui_getsockopt(struct socket *sock, int level, int optname, int val = 0, len = 0, rc = -EINVAL; lock_sock(sk); - if (level != SOL_LLC) + if (unlikely(level != SOL_LLC)) goto out; rc = get_user(len, optlen); if (rc) @@ -980,17 +1063,17 @@ static int llc_ui_getsockopt(struct socket *sock, int level, int optname, goto out; switch (optname) { case LLC_OPT_RETRY: - val = llc->n2; break; + val = llc->n2; break; case LLC_OPT_SIZE: - val = llc->n1; break; + val = llc->n1; break; case LLC_OPT_ACK_TMR_EXP: - val = llc->ack_timer.expire; break; + val = llc->ack_timer.expire / HZ; break; case LLC_OPT_P_TMR_EXP: - val = llc->pf_cycle_timer.expire; break; + val = llc->pf_cycle_timer.expire / HZ; break; case LLC_OPT_REJ_TMR_EXP: - val = llc->rej_sent_timer.expire; break; + val = llc->rej_sent_timer.expire / HZ; break; case LLC_OPT_BUSY_TMR_EXP: - val = llc->busy_state_timer.expire; break; + val = llc->busy_state_timer.expire / HZ; break; case LLC_OPT_TX_WIN: val = llc->k; break; case LLC_OPT_RX_WIN: @@ -1034,8 +1117,12 @@ static struct proto_ops llc_ui_ops = { .sendpage = sock_no_sendpage, }; -extern void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb); -extern void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb); +static char llc_proc_err_msg[] __initdata = + KERN_CRIT "LLC: Unable to register the proc_fs entries\n"; +static char llc_sysctl_err_msg[] __initdata = + KERN_CRIT "LLC: Unable to register the sysctl entries\n"; +static char llc_sock_err_msg[] __initdata = + KERN_CRIT "LLC: Unable to register the network family\n"; static int __init llc2_init(void) { @@ -1048,13 +1135,28 @@ static int __init llc2_init(void) llc_station_init(); llc_ui_sap_last_autoport = LLC_SAP_DYN_START; rc = llc_proc_init(); - if (rc != 0) + if (rc != 0) { + printk(llc_proc_err_msg); goto out_unregister_llc_proto; - sock_register(&llc_ui_family_ops); + } + rc = llc_sysctl_init(); + if (rc) { + printk(llc_sysctl_err_msg); + goto out_proc; + } + rc = sock_register(&llc_ui_family_ops); + if (rc) { + printk(llc_sock_err_msg); + goto out_sysctl; + } llc_add_pack(LLC_DEST_SAP, llc_sap_handler); llc_add_pack(LLC_DEST_CONN, llc_conn_handler); out: return rc; +out_sysctl: + llc_sysctl_exit(); +out_proc: + llc_proc_exit(); out_unregister_llc_proto: proto_unregister(&llc_proto); goto out; @@ -1067,6 +1169,7 @@ static void __exit llc2_exit(void) llc_remove_pack(LLC_DEST_CONN); sock_unregister(PF_LLC); llc_proc_exit(); + llc_sysctl_exit(); proto_unregister(&llc_proto); } diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c index b218be4c10ec..b0bcfb1f12dd 100644 --- a/net/llc/llc_c_ac.c +++ b/net/llc/llc_c_ac.c @@ -60,23 +60,10 @@ int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb) int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb) { - int rc = -ENOTCONN; - u8 dsap; - struct llc_sap *sap; + struct llc_conn_state_ev *ev = llc_conn_ev(skb); - llc_pdu_decode_dsap(skb, &dsap); - sap = llc_sap_find(dsap); - if (sap) { - struct llc_conn_state_ev *ev = llc_conn_ev(skb); - struct llc_sock *llc = llc_sk(sk); - - llc_pdu_decode_sa(skb, llc->daddr.mac); - llc_pdu_decode_da(skb, llc->laddr.mac); - llc->dev = skb->dev; - ev->ind_prim = LLC_CONN_PRIM; - rc = 0; - } - return rc; + ev->ind_prim = LLC_CONN_PRIM; + return 0; } int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb) @@ -120,10 +107,8 @@ int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb) reason = LLC_DISC_REASON_RX_DISC_CMD_PDU; } else if (ev->type == LLC_CONN_EV_TYPE_ACK_TMR) reason = LLC_DISC_REASON_ACK_TMR_EXP; - else { - reason = 0; + else rc = -EINVAL; - } if (!rc) { ev->reason = reason; ev->ind_prim = LLC_DISC_PRIM; @@ -160,9 +145,6 @@ int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb) LLC_U_PDU_CMD(pdu) == LLC_2_PDU_CMD_SABME) { reason = LLC_RESET_REASON_REMOTE; rc = 0; - } else { - reason = 0; - rc = 1; } break; case LLC_CONN_EV_TYPE_ACK_TMR: @@ -172,8 +154,7 @@ int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb) if (llc->retry_count > llc->n2) { reason = LLC_RESET_REASON_LOCAL; rc = 0; - } else - rc = 1; + } break; } if (!rc) { @@ -217,18 +198,17 @@ int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk, int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); + struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_disc_cmd(nskb, 1); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); llc_conn_ac_set_p_flag_1(sk, skb); @@ -243,20 +223,19 @@ free: int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); + struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; u8 f_bit; - nskb->dev = llc->dev; llc_pdu_decode_pf_bit(skb, &f_bit); llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_dm_rsp(nskb, f_bit); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } @@ -270,19 +249,17 @@ free: int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); + struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; - u8 f_bit = 1; - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); - llc_pdu_init_as_dm_rsp(nskb, f_bit); + llc_pdu_init_as_dm_rsp(nskb, 1); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } @@ -306,17 +283,16 @@ int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb) llc_pdu_decode_pf_bit(skb, &f_bit); else f_bit = 0; - nskb = llc_alloc_frame(); + nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { struct llc_sap *sap = llc->sap; - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_frmr_rsp(nskb, pdu, f_bit, llc->vS, llc->vR, INCORRECT); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } @@ -330,21 +306,19 @@ free: int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); + struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { - u8 f_bit = 0; - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; struct llc_pdu_sn *pdu = (struct llc_pdu_sn *)&llc->rx_pdu_hdr; - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); - llc_pdu_init_as_frmr_rsp(nskb, pdu, f_bit, llc->vS, + llc_pdu_init_as_frmr_rsp(nskb, pdu, 0, llc->vS, llc->vR, INCORRECT); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } @@ -360,21 +334,20 @@ int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) u8 f_bit; int rc = -ENOBUFS; struct sk_buff *nskb; + struct llc_sock *llc = llc_sk(sk); llc_pdu_decode_pf_bit(skb, &f_bit); - nskb = llc_alloc_frame(); + nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_frmr_rsp(nskb, pdu, f_bit, llc->vS, llc->vR, INCORRECT); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } @@ -395,7 +368,7 @@ int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_i_cmd(skb, 1, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); - if (!rc) { + if (likely(!rc)) { llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } @@ -412,7 +385,7 @@ static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb) llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); - if (!rc) { + if (likely(!rc)) { llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } @@ -429,7 +402,7 @@ int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); - if (!rc) { + if (likely(!rc)) { llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } @@ -451,18 +424,17 @@ int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock *sk, u8 nr; struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); + struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (!rc) + if (likely(!rc)) llc_conn_send_pdu(sk, nskb); else kfree_skb(skb); @@ -487,18 +459,17 @@ int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); + struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_rej_cmd(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } @@ -512,19 +483,17 @@ free: int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); + struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { - u8 f_bit = 1; - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); - llc_pdu_init_as_rej_rsp(nskb, f_bit, llc->vR); + llc_pdu_init_as_rej_rsp(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } @@ -538,19 +507,17 @@ free: int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); + struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; - u8 f_bit = 0; - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); - llc_pdu_init_as_rej_rsp(nskb, f_bit, llc->vR); + llc_pdu_init_as_rej_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } @@ -564,18 +531,17 @@ free: int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); + struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_rnr_cmd(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } @@ -589,19 +555,17 @@ free: int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); + struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; - u8 f_bit = 1; - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); - llc_pdu_init_as_rnr_rsp(nskb, f_bit, llc->vR); + llc_pdu_init_as_rnr_rsp(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } @@ -615,19 +579,17 @@ free: int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); + struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { - u8 f_bit = 0; - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); - llc_pdu_init_as_rnr_rsp(nskb, f_bit, llc->vR); + llc_pdu_init_as_rnr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } @@ -645,7 +607,7 @@ int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb) if (!llc->remote_busy_flag) { llc->remote_busy_flag = 1; mod_timer(&llc->busy_state_timer.timer, - jiffies + llc->busy_state_timer.expire * HZ); + jiffies + llc->busy_state_timer.expire); } return 0; } @@ -653,18 +615,17 @@ int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb) int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); + struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rnr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } @@ -678,18 +639,17 @@ free: int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); + struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_rr_cmd(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } @@ -703,19 +663,18 @@ free: int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); + struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; u8 f_bit = 1; - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, f_bit, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } @@ -729,19 +688,17 @@ free: int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); + struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; - u8 f_bit = 1; - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); - llc_pdu_init_as_rr_rsp(nskb, f_bit, llc->vR); + llc_pdu_init_as_rr_rsp(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } @@ -755,18 +712,17 @@ free: int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); + struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } @@ -780,18 +736,17 @@ free: int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); + struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } @@ -815,8 +770,8 @@ void llc_conn_set_p_flag(struct sock *sk, u8 value) int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { struct llc_sap *sap = llc->sap; @@ -824,12 +779,11 @@ int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb) if (llc->dev->flags & IFF_LOOPBACK) dmac = llc->dev->dev_addr; - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_sabme_cmd(nskb, 1); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, dmac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); llc_conn_set_p_flag(sk, 1); @@ -845,11 +799,11 @@ int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) { u8 f_bit; int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); + struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); llc_pdu_decode_pf_bit(skb, &f_bit); if (nskb) { - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; nskb->dev = llc->dev; @@ -857,7 +811,7 @@ int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_ua_rsp(nskb, f_bit); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } @@ -886,7 +840,7 @@ int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb) llc_conn_set_p_flag(sk, 1); mod_timer(&llc->pf_cycle_timer.timer, - jiffies + llc->pf_cycle_timer.expire * HZ); + jiffies + llc->pf_cycle_timer.expire); return 0; } @@ -957,7 +911,7 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); - if (!rc) { + if (likely(!rc)) { llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } @@ -1001,18 +955,17 @@ static int llc_conn_ac_send_rr_rsp_f_set_ackpf(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; - struct sk_buff *nskb = llc_alloc_frame(); + struct llc_sock *llc = llc_sk(sk); + struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev); if (nskb) { - struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; - nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, llc->ack_pf, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); - if (rc) + if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } @@ -1165,7 +1118,7 @@ int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); - mod_timer(&llc->ack_timer.timer, jiffies + llc->ack_timer.expire * HZ); + mod_timer(&llc->ack_timer.timer, jiffies + llc->ack_timer.expire); return 0; } @@ -1174,7 +1127,7 @@ int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb) struct llc_sock *llc = llc_sk(sk); mod_timer(&llc->rej_sent_timer.timer, - jiffies + llc->rej_sent_timer.expire * HZ); + jiffies + llc->rej_sent_timer.expire); return 0; } @@ -1185,7 +1138,7 @@ int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk, if (!timer_pending(&llc->ack_timer.timer)) mod_timer(&llc->ack_timer.timer, - jiffies + llc->ack_timer.expire * HZ); + jiffies + llc->ack_timer.expire); return 0; } @@ -1233,7 +1186,7 @@ int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb) } if (unacked) mod_timer(&llc->ack_timer.timer, - jiffies + llc->ack_timer.expire * HZ); + jiffies + llc->ack_timer.expire); } else if (llc->failed_data_req) { u8 f_bit; @@ -1354,13 +1307,13 @@ int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb) return 0; } -int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb) +static int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->vS = (llc_sk(sk)->vS + 1) % 128; return 0; } -void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data) +static void llc_conn_tmr_common_cb(unsigned long timeout_data, u8 type) { struct sock *sk = (struct sock *)timeout_data; struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC); @@ -1369,59 +1322,31 @@ void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data) if (skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); - skb->sk = sk; - ev->type = LLC_CONN_EV_TYPE_P_TMR; + skb_set_owner_r(skb, sk); + ev->type = type; llc_process_tmr_ev(sk, skb); } bh_unlock_sock(sk); } +void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data) +{ + llc_conn_tmr_common_cb(timeout_data, LLC_CONN_EV_TYPE_P_TMR); +} + void llc_conn_busy_tmr_cb(unsigned long timeout_data) { - struct sock *sk = (struct sock *)timeout_data; - struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC); - - bh_lock_sock(sk); - if (skb) { - struct llc_conn_state_ev *ev = llc_conn_ev(skb); - - skb->sk = sk; - ev->type = LLC_CONN_EV_TYPE_BUSY_TMR; - llc_process_tmr_ev(sk, skb); - } - bh_unlock_sock(sk); + llc_conn_tmr_common_cb(timeout_data, LLC_CONN_EV_TYPE_BUSY_TMR); } void llc_conn_ack_tmr_cb(unsigned long timeout_data) { - struct sock* sk = (struct sock *)timeout_data; - struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC); - - bh_lock_sock(sk); - if (skb) { - struct llc_conn_state_ev *ev = llc_conn_ev(skb); - - skb->sk = sk; - ev->type = LLC_CONN_EV_TYPE_ACK_TMR; - llc_process_tmr_ev(sk, skb); - } - bh_unlock_sock(sk); + llc_conn_tmr_common_cb(timeout_data, LLC_CONN_EV_TYPE_ACK_TMR); } void llc_conn_rej_tmr_cb(unsigned long timeout_data) { - struct sock *sk = (struct sock *)timeout_data; - struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC); - - bh_lock_sock(sk); - if (skb) { - struct llc_conn_state_ev *ev = llc_conn_ev(skb); - - skb->sk = sk; - ev->type = LLC_CONN_EV_TYPE_REJ_TMR; - llc_process_tmr_ev(sk, skb); - } - bh_unlock_sock(sk); + llc_conn_tmr_common_cb(timeout_data, LLC_CONN_EV_TYPE_REJ_TMR); } int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb) diff --git a/net/llc/llc_c_ev.c b/net/llc/llc_c_ev.c index d5bdb53a348f..c5deda246614 100644 --- a/net/llc/llc_c_ev.c +++ b/net/llc/llc_c_ev.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -46,8 +47,6 @@ #define dprintk(args...) #endif -extern u16 llc_circular_between(u8 a, u8 b, u8 c); - /** * llc_util_ns_inside_rx_window - check if sequence number is in rx window * @ns: sequence number of received pdu. @@ -99,7 +98,7 @@ out: int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb) { - struct llc_conn_state_ev *ev = llc_conn_ev(skb); + const struct llc_conn_state_ev *ev = llc_conn_ev(skb); return ev->prim == LLC_CONN_PRIM && ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1; @@ -107,7 +106,7 @@ int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb) { - struct llc_conn_state_ev *ev = llc_conn_ev(skb); + const struct llc_conn_state_ev *ev = llc_conn_ev(skb); return ev->prim == LLC_DATA_PRIM && ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1; @@ -115,7 +114,7 @@ int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb) { - struct llc_conn_state_ev *ev = llc_conn_ev(skb); + const struct llc_conn_state_ev *ev = llc_conn_ev(skb); return ev->prim == LLC_DISC_PRIM && ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1; @@ -123,7 +122,7 @@ int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb) { - struct llc_conn_state_ev *ev = llc_conn_ev(skb); + const struct llc_conn_state_ev *ev = llc_conn_ev(skb); return ev->prim == LLC_RESET_PRIM && ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1; @@ -131,7 +130,7 @@ int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb) { - struct llc_conn_state_ev *ev = llc_conn_ev(skb); + const struct llc_conn_state_ev *ev = llc_conn_ev(skb); return ev->type == LLC_CONN_EV_TYPE_SIMPLE && ev->prim_type == LLC_CONN_EV_LOCAL_BUSY_DETECTED ? 0 : 1; @@ -139,7 +138,7 @@ int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb) { - struct llc_conn_state_ev *ev = llc_conn_ev(skb); + const struct llc_conn_state_ev *ev = llc_conn_ev(skb); return ev->type == LLC_CONN_EV_TYPE_SIMPLE && ev->prim_type == LLC_CONN_EV_LOCAL_BUSY_CLEARED ? 0 : 1; @@ -152,7 +151,7 @@ int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); + const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_CMD(pdu) == LLC_2_PDU_CMD_DISC ? 0 : 1; @@ -160,7 +159,7 @@ int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); + const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_RSP(pdu) == LLC_2_PDU_RSP_DM ? 0 : 1; @@ -168,7 +167,7 @@ int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); + const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_RSP(pdu) == LLC_2_PDU_RSP_FRMR ? 0 : 1; @@ -176,7 +175,7 @@ int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); return llc_conn_space(sk, skb) && LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) && @@ -186,7 +185,7 @@ int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); return llc_conn_space(sk, skb) && LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) && @@ -197,9 +196,9 @@ int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); - u8 vr = llc_sk(sk)->vR; - u8 ns = LLC_I_GET_NS(pdu); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const u8 vr = llc_sk(sk)->vR; + const u8 ns = LLC_I_GET_NS(pdu); return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) && LLC_I_PF_IS_0(pdu) && ns != vr && @@ -209,9 +208,9 @@ int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk, int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); - u8 vr = llc_sk(sk)->vR; - u8 ns = LLC_I_GET_NS(pdu); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const u8 vr = llc_sk(sk)->vR; + const u8 ns = LLC_I_GET_NS(pdu); return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) && LLC_I_PF_IS_1(pdu) && ns != vr && @@ -221,10 +220,11 @@ int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk, int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn * pdu = llc_pdu_sn_hdr(skb); - u8 vr = llc_sk(sk)->vR; - u8 ns = LLC_I_GET_NS(pdu); - u16 rc = LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) && ns != vr && + const struct llc_pdu_sn * pdu = llc_pdu_sn_hdr(skb); + const u8 vr = llc_sk(sk)->vR; + const u8 ns = LLC_I_GET_NS(pdu); + const u16 rc = LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_I(pdu) && + ns != vr && llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; if (!rc) dprintk("%s: matched, state=%d, ns=%d, vr=%d\n", @@ -234,7 +234,7 @@ int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk, int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); return llc_conn_space(sk, skb) && LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && @@ -244,7 +244,7 @@ int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && LLC_I_PF_IS_1(pdu) && @@ -253,7 +253,7 @@ int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); return llc_conn_space(sk, skb) && LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && @@ -263,9 +263,9 @@ int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); - u8 vr = llc_sk(sk)->vR; - u8 ns = LLC_I_GET_NS(pdu); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const u8 vr = llc_sk(sk)->vR; + const u8 ns = LLC_I_GET_NS(pdu); return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && LLC_I_PF_IS_0(pdu) && ns != vr && @@ -275,9 +275,9 @@ int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk, int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); - u8 vr = llc_sk(sk)->vR; - u8 ns = LLC_I_GET_NS(pdu); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const u8 vr = llc_sk(sk)->vR; + const u8 ns = LLC_I_GET_NS(pdu); return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && LLC_I_PF_IS_1(pdu) && ns != vr && @@ -287,9 +287,9 @@ int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk, int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); - u8 vr = llc_sk(sk)->vR; - u8 ns = LLC_I_GET_NS(pdu); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const u8 vr = llc_sk(sk)->vR; + const u8 ns = LLC_I_GET_NS(pdu); return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && ns != vr && !llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; @@ -298,10 +298,11 @@ int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk, int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); - u8 vr = llc_sk(sk)->vR; - u8 ns = LLC_I_GET_NS(pdu); - u16 rc = LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && ns != vr && + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const u8 vr = llc_sk(sk)->vR; + const u8 ns = LLC_I_GET_NS(pdu); + const u16 rc = LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && + ns != vr && llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1; if (!rc) dprintk("%s: matched, state=%d, ns=%d, vr=%d\n", @@ -311,7 +312,7 @@ int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk, int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) && LLC_S_PF_IS_0(pdu) && @@ -320,7 +321,7 @@ int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) && LLC_S_PF_IS_1(pdu) && @@ -329,7 +330,7 @@ int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) && LLC_S_PF_IS_0(pdu) && @@ -338,7 +339,7 @@ int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) && LLC_S_PF_IS_1(pdu) && @@ -347,7 +348,7 @@ int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); + const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) && LLC_S_PDU_RSP(pdu) == LLC_2_PDU_RSP_REJ ? 0 : 1; @@ -355,7 +356,7 @@ int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) && LLC_S_PF_IS_0(pdu) && @@ -364,7 +365,7 @@ int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) && LLC_S_PF_IS_1(pdu) && @@ -373,7 +374,7 @@ int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) && LLC_S_PF_IS_0(pdu) && @@ -382,7 +383,7 @@ int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); return LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) && LLC_S_PF_IS_1(pdu) && @@ -391,7 +392,7 @@ int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) && LLC_S_PF_IS_0(pdu) && @@ -400,7 +401,7 @@ int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_S(pdu) && LLC_S_PF_IS_1(pdu) && @@ -409,7 +410,7 @@ int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); return llc_conn_space(sk, skb) && LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) && @@ -419,7 +420,7 @@ int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); return llc_conn_space(sk, skb) && LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_S(pdu) && @@ -429,7 +430,7 @@ int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb) { - struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); + const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_CMD(pdu) == LLC_2_PDU_CMD_SABME ? 0 : 1; @@ -446,7 +447,7 @@ int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) { u16 rc = 1; - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); if (LLC_PDU_IS_CMD(pdu)) { if (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) { @@ -461,7 +462,7 @@ int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb) { u16 rc = 1; - struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); + const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); if (LLC_PDU_IS_CMD(pdu)) { if (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) @@ -477,32 +478,10 @@ int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb) return rc; } -int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb) -{ - u16 rc = 1; - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); - - if (LLC_PDU_IS_RSP(pdu)) { - if (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) { - if (LLC_I_PF_IS_1(pdu)) - rc = 0; - } else if (LLC_PDU_TYPE_IS_U(pdu)) - switch (LLC_U_PDU_RSP(pdu)) { - case LLC_2_PDU_RSP_UA: - case LLC_2_PDU_RSP_DM: - case LLC_2_PDU_RSP_FRMR: - if (LLC_U_PF_IS_1(pdu)) - rc = 0; - break; - } - } - return rc; -} - int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb) { u16 rc = 1; - struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); + const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); if (LLC_PDU_IS_RSP(pdu)) { if (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) @@ -524,9 +503,9 @@ int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk, struct sk_buff *skb) { u16 rc = 1; - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); - u8 vs = llc_sk(sk)->vS; - u8 nr = LLC_I_GET_NR(pdu); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const u8 vs = llc_sk(sk)->vS; + const u8 nr = LLC_I_GET_NR(pdu); if (LLC_PDU_IS_CMD(pdu) && (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) && @@ -542,9 +521,9 @@ int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk, struct sk_buff *skb) { u16 rc = 1; - struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); - u8 vs = llc_sk(sk)->vS; - u8 nr = LLC_I_GET_NR(pdu); + const struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); + const u8 vs = llc_sk(sk)->vS; + const u8 nr = LLC_I_GET_NR(pdu); if (LLC_PDU_IS_RSP(pdu) && (LLC_PDU_TYPE_IS_I(pdu) || LLC_PDU_TYPE_IS_S(pdu)) && @@ -563,28 +542,28 @@ int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb) { - struct llc_conn_state_ev *ev = llc_conn_ev(skb); + const struct llc_conn_state_ev *ev = llc_conn_ev(skb); return ev->type != LLC_CONN_EV_TYPE_P_TMR; } int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb) { - struct llc_conn_state_ev *ev = llc_conn_ev(skb); + const struct llc_conn_state_ev *ev = llc_conn_ev(skb); return ev->type != LLC_CONN_EV_TYPE_ACK_TMR; } int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb) { - struct llc_conn_state_ev *ev = llc_conn_ev(skb); + const struct llc_conn_state_ev *ev = llc_conn_ev(skb); return ev->type != LLC_CONN_EV_TYPE_REJ_TMR; } int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb) { - struct llc_conn_state_ev *ev = llc_conn_ev(skb); + const struct llc_conn_state_ev *ev = llc_conn_ev(skb); return ev->type != LLC_CONN_EV_TYPE_BUSY_TMR; } @@ -596,7 +575,7 @@ int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb) int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb) { - struct llc_conn_state_ev *ev = llc_conn_ev(skb); + const struct llc_conn_state_ev *ev = llc_conn_ev(skb); return ev->type == LLC_CONN_EV_TYPE_SIMPLE && ev->prim_type == LLC_CONN_EV_TX_BUFF_FULL ? 0 : 1; diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index 4c644bc70eae..c761c15da421 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -40,6 +40,11 @@ static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk, /* Offset table on connection states transition diagram */ static int llc_offset_table[NBR_CONN_STATES][NBR_CONN_EV]; +int sysctl_llc2_ack_timeout = LLC2_ACK_TIME * HZ; +int sysctl_llc2_p_timeout = LLC2_P_TIME * HZ; +int sysctl_llc2_rej_timeout = LLC2_REJ_TIME * HZ; +int sysctl_llc2_busy_timeout = LLC2_BUSY_TIME * HZ; + /** * llc_conn_state_process - sends event to connection state machine * @sk: connection @@ -53,7 +58,7 @@ static int llc_offset_table[NBR_CONN_STATES][NBR_CONN_EV]; int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) { int rc; - struct llc_sock *llc = llc_sk(sk); + struct llc_sock *llc = llc_sk(skb->sk); struct llc_conn_state_ev *ev = llc_conn_ev(skb); /* @@ -63,13 +68,16 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) */ skb_get(skb); ev->ind_prim = ev->cfm_prim = 0; - rc = llc_conn_service(sk, skb); /* sending event to state machine */ - if (rc) { + /* + * Send event to state machine + */ + rc = llc_conn_service(skb->sk, skb); + if (unlikely(rc != 0)) { printk(KERN_ERR "%s: llc_conn_service failed\n", __FUNCTION__); goto out_kfree_skb; } - if (!ev->ind_prim && !ev->cfm_prim) { + if (unlikely(!ev->ind_prim && !ev->cfm_prim)) { /* indicate or confirm not required */ /* XXX this is not very pretty, perhaps we should store * XXX indicate/confirm-needed state in the llc_conn_state_ev @@ -80,13 +88,13 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) goto out_skb_put; } - if (ev->ind_prim && ev->cfm_prim) /* Paranoia */ + if (unlikely(ev->ind_prim && ev->cfm_prim)) /* Paranoia */ skb_get(skb); switch (ev->ind_prim) { case LLC_DATA_PRIM: - llc_save_primitive(skb, LLC_DATA_PRIM); - if (sock_queue_rcv_skb(sk, skb)) { + llc_save_primitive(sk, skb, LLC_DATA_PRIM); + if (unlikely(sock_queue_rcv_skb(sk, skb))) { /* * shouldn't happen */ @@ -95,13 +103,14 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) kfree_skb(skb); } break; - case LLC_CONN_PRIM: { - struct sock *parent = skb->sk; - - skb->sk = sk; - skb_queue_tail(&parent->sk_receive_queue, skb); - sk->sk_state_change(parent); - } + case LLC_CONN_PRIM: + /* + * Can't be sock_queue_rcv_skb, because we have to leave the + * skb->sk pointing to the newly created struct sock in + * llc_conn_handler. -acme + */ + skb_queue_tail(&sk->sk_receive_queue, skb); + sk->sk_state_change(sk); break; case LLC_DISC_PRIM: sock_hold(sk); @@ -111,8 +120,8 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb) sk->sk_socket->state = SS_UNCONNECTED; sk->sk_state = TCP_CLOSE; if (!sock_flag(sk, SOCK_DEAD)) { - sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); + sk->sk_state_change(sk); } } kfree_skb(skb); @@ -465,7 +474,7 @@ static int llc_exec_conn_trans_actions(struct sock *sk, } /** - * llc_lookup_established - Finds connection for the remote/local sap/mac + * __llc_lookup_established - Finds connection for the remote/local sap/mac * @sap: SAP * @daddr: address of remote LLC (MAC + SAP) * @laddr: address of local LLC (MAC + SAP) @@ -473,14 +482,16 @@ static int llc_exec_conn_trans_actions(struct sock *sk, * Search connection list of the SAP and finds connection using the remote * mac, remote sap, local mac, and local sap. Returns pointer for * connection found, %NULL otherwise. + * Caller has to make sure local_bh is disabled. */ -struct sock *llc_lookup_established(struct llc_sap *sap, struct llc_addr *daddr, - struct llc_addr *laddr) +static struct sock *__llc_lookup_established(struct llc_sap *sap, + struct llc_addr *daddr, + struct llc_addr *laddr) { struct sock *rc; struct hlist_node *node; - read_lock_bh(&sap->sk_list.lock); + read_lock(&sap->sk_list.lock); sk_for_each(rc, node, &sap->sk_list.list) { struct llc_sock *llc = llc_sk(rc); @@ -494,10 +505,22 @@ struct sock *llc_lookup_established(struct llc_sap *sap, struct llc_addr *daddr, } rc = NULL; found: - read_unlock_bh(&sap->sk_list.lock); + read_unlock(&sap->sk_list.lock); return rc; } +struct sock *llc_lookup_established(struct llc_sap *sap, + struct llc_addr *daddr, + struct llc_addr *laddr) +{ + struct sock *sk; + + local_bh_disable(); + sk = __llc_lookup_established(sap, daddr, laddr); + local_bh_enable(); + return sk; +} + /** * llc_lookup_listener - Finds listener for local MAC + SAP * @sap: SAP @@ -506,6 +529,7 @@ found: * Search connection list of the SAP and finds connection listening on * local mac, and local sap. Returns pointer for parent socket found, * %NULL otherwise. + * Caller has to make sure local_bh is disabled. */ static struct sock *llc_lookup_listener(struct llc_sap *sap, struct llc_addr *laddr) @@ -513,7 +537,7 @@ static struct sock *llc_lookup_listener(struct llc_sap *sap, struct sock *rc; struct hlist_node *node; - read_lock_bh(&sap->sk_list.lock); + read_lock(&sap->sk_list.lock); sk_for_each(rc, node, &sap->sk_list.list) { struct llc_sock *llc = llc_sk(rc); @@ -527,10 +551,19 @@ static struct sock *llc_lookup_listener(struct llc_sap *sap, } rc = NULL; found: - read_unlock_bh(&sap->sk_list.lock); + read_unlock(&sap->sk_list.lock); return rc; } +static struct sock *__llc_lookup(struct llc_sap *sap, + struct llc_addr *daddr, + struct llc_addr *laddr) +{ + struct sock *sk = __llc_lookup_established(sap, daddr, laddr); + + return sk ? : llc_lookup_listener(sap, laddr); +} + /** * llc_data_accept_state - designates if in this state data can be sent. * @state: state of connection. @@ -544,14 +577,14 @@ u8 llc_data_accept_state(u8 state) } /** - * find_next_offset - finds offset for next category of transitions + * llc_find_next_offset - finds offset for next category of transitions * @state: state table. * @offset: start offset. * * Finds offset of next category of transitions in transition table. * Returns the start index of next category. */ -static u16 find_next_offset(struct llc_conn_state *state, u16 offset) +static u16 __init llc_find_next_offset(struct llc_conn_state *state, u16 offset) { u16 cnt = 0; struct llc_conn_state_trans **next_trans; @@ -578,8 +611,8 @@ void __init llc_build_offset_table(void) next_offset = 0; for (ev_type = 0; ev_type < NBR_CONN_EV; ev_type++) { llc_offset_table[state][ev_type] = next_offset; - next_offset += find_next_offset(curr_state, - next_offset) + 1; + next_offset += llc_find_next_offset(curr_state, + next_offset) + 1; } } } @@ -623,6 +656,7 @@ static int llc_find_offset(int state, int ev_type) */ void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk) { + llc_sap_hold(sap); write_lock_bh(&sap->sk_list.lock); llc_sk(sk)->sap = sap; sk_add_node(sk, &sap->sk_list.list); @@ -642,6 +676,7 @@ void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk) write_lock_bh(&sap->sk_list.lock); sk_del_node_init(sk); write_unlock_bh(&sap->sk_list.lock); + llc_sap_put(sap); } /** @@ -654,15 +689,34 @@ void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk) static int llc_conn_rcv(struct sock* sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); - struct llc_sock *llc = llc_sk(sk); - if (!llc->dev) - llc->dev = skb->dev; ev->type = LLC_CONN_EV_TYPE_PDU; ev->reason = 0; return llc_conn_state_process(sk, skb); } +static struct sock *llc_create_incoming_sock(struct sock *sk, + struct net_device *dev, + struct llc_addr *saddr, + struct llc_addr *daddr) +{ + struct sock *newsk = llc_sk_alloc(sk->sk_family, GFP_ATOMIC, + sk->sk_prot); + struct llc_sock *newllc, *llc = llc_sk(sk); + + if (!newsk) + goto out; + newllc = llc_sk(newsk); + memcpy(&newllc->laddr, daddr, sizeof(newllc->laddr)); + memcpy(&newllc->daddr, saddr, sizeof(newllc->daddr)); + newllc->dev = dev; + dev_hold(dev); + llc_sap_add_socket(llc->sap, newsk); + llc_sap_hold(llc->sap); +out: + return newsk; +} + void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb) { struct llc_addr saddr, daddr; @@ -673,35 +727,35 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb) llc_pdu_decode_da(skb, daddr.mac); llc_pdu_decode_dsap(skb, &daddr.lsap); - sk = llc_lookup_established(sap, &saddr, &daddr); - if (!sk) { - /* - * Didn't find an active connection; verify if there - * is a listening socket for this llc addr - */ - struct llc_sock *llc; - struct sock *parent = llc_lookup_listener(sap, &daddr); + sk = __llc_lookup(sap, &saddr, &daddr); + if (!sk) + goto drop; - if (!parent) { - dprintk("llc_lookup_listener failed!\n"); - goto drop; - } - - sk = llc_sk_alloc(parent->sk_family, GFP_ATOMIC, parent->sk_prot); - if (!sk) { - sock_put(parent); - goto drop; - } - llc = llc_sk(sk); - memcpy(&llc->laddr, &daddr, sizeof(llc->laddr)); - memcpy(&llc->daddr, &saddr, sizeof(llc->daddr)); - llc_sap_add_socket(sap, sk); - sock_hold(sk); - sock_put(parent); - skb->sk = parent; - } else - skb->sk = sk; bh_lock_sock(sk); + /* + * This has to be done here and not at the upper layer ->accept + * method because of the way the PROCOM state machine works: + * it needs to set several state variables (see, for instance, + * llc_adm_actions_2 in net/llc/llc_c_st.c) and send a packet to + * the originator of the new connection, and this state has to be + * in the newly created struct sock private area. -acme + */ + if (unlikely(sk->sk_state == TCP_LISTEN)) { + struct sock *newsk = llc_create_incoming_sock(sk, skb->dev, + &saddr, &daddr); + if (!newsk) + goto drop_unlock; + skb_set_owner_r(skb, newsk); + } else { + /* + * Can't be skb_set_owner_r, this will be done at the + * llc_conn_state_process function, later on, when we will use + * skb_queue_rcv_skb to send it to upper layers, this is + * another trick required to cope with how the PROCOM state + * machine works. -acme + */ + skb->sk = sk; + } if (!sock_owned_by_user(sk)) llc_conn_rcv(sk, skb); else { @@ -709,11 +763,16 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb) llc_set_backlog_type(skb, LLC_PACKET); sk_add_backlog(sk, skb); } +out: bh_unlock_sock(sk); sock_put(sk); return; drop: kfree_skb(skb); + return; +drop_unlock: + kfree_skb(skb); + goto out; } #undef LLC_REFCNT_DEBUG @@ -721,32 +780,6 @@ drop: static atomic_t llc_sock_nr; #endif -/** - * llc_release_sockets - releases all sockets in a sap - * @sap: sap to release its sockets - * - * Releases all connections of a sap. Returns 0 if all actions complete - * successfully, nonzero otherwise - */ -int llc_release_sockets(struct llc_sap *sap) -{ - int rc = 0; - struct sock *sk; - struct hlist_node *node; - - write_lock_bh(&sap->sk_list.lock); - - sk_for_each(sk, node, &sap->sk_list.list) { - llc_sk(sk)->state = LLC_CONN_STATE_TEMP; - - if (llc_send_disc(sk)) - rc = 1; - } - - write_unlock_bh(&sap->sk_list.lock); - return rc; -} - /** * llc_backlog_rcv - Processes rx frames and expired timers. * @sk: LLC sock (p8022 connection) @@ -762,14 +795,14 @@ static int llc_backlog_rcv(struct sock *sk, struct sk_buff *skb) int rc = 0; struct llc_sock *llc = llc_sk(sk); - if (llc_backlog_type(skb) == LLC_PACKET) { - if (llc->state > 1) /* not closed */ + if (likely(llc_backlog_type(skb) == LLC_PACKET)) { + if (likely(llc->state > 1)) /* not closed */ rc = llc_conn_rcv(sk, skb); else goto out_kfree_skb; } else if (llc_backlog_type(skb) == LLC_EVENT) { /* timer expiration event */ - if (llc->state > 1) /* not closed */ + if (likely(llc->state > 1)) /* not closed */ rc = llc_conn_state_process(sk, skb); else goto out_kfree_skb; @@ -799,22 +832,22 @@ static void llc_sk_init(struct sock* sk) llc->dec_step = llc->connect_step = 1; init_timer(&llc->ack_timer.timer); - llc->ack_timer.expire = LLC_ACK_TIME; + llc->ack_timer.expire = sysctl_llc2_ack_timeout; llc->ack_timer.timer.data = (unsigned long)sk; llc->ack_timer.timer.function = llc_conn_ack_tmr_cb; init_timer(&llc->pf_cycle_timer.timer); - llc->pf_cycle_timer.expire = LLC_P_TIME; + llc->pf_cycle_timer.expire = sysctl_llc2_p_timeout; llc->pf_cycle_timer.timer.data = (unsigned long)sk; llc->pf_cycle_timer.timer.function = llc_conn_pf_cycle_tmr_cb; init_timer(&llc->rej_sent_timer.timer); - llc->rej_sent_timer.expire = LLC_REJ_TIME; + llc->rej_sent_timer.expire = sysctl_llc2_rej_timeout; llc->rej_sent_timer.timer.data = (unsigned long)sk; llc->rej_sent_timer.timer.function = llc_conn_rej_tmr_cb; init_timer(&llc->busy_state_timer.timer); - llc->busy_state_timer.expire = LLC_BUSY_TIME; + llc->busy_state_timer.expire = sysctl_llc2_busy_timeout; llc->busy_state_timer.timer.data = (unsigned long)sk; llc->busy_state_timer.timer.function = llc_conn_busy_tmr_cb; @@ -834,7 +867,7 @@ static void llc_sk_init(struct sock* sk) * Allocates a LLC sock and initializes it. Returns the new LLC sock * or %NULL if there's no memory available for one */ -struct sock *llc_sk_alloc(int family, int priority, struct proto *prot) +struct sock *llc_sk_alloc(int family, gfp_t priority, struct proto *prot) { struct sock *sk = sk_alloc(family, priority, prot, 1); diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c index 9727455bf0e7..ab0fcd32fd84 100644 --- a/net/llc/llc_core.c +++ b/net/llc/llc_core.c @@ -40,6 +40,7 @@ static struct llc_sap *llc_sap_alloc(void) sap->state = LLC_SAP_STATE_ACTIVE; memcpy(sap->laddr.mac, llc_station_mac_sa, ETH_ALEN); rwlock_init(&sap->sk_list.lock); + atomic_set(&sap->refcnt, 1); } return sap; } @@ -52,9 +53,7 @@ static struct llc_sap *llc_sap_alloc(void) */ static void llc_add_sap(struct llc_sap *sap) { - write_lock_bh(&llc_sap_list_lock); list_add_tail(&sap->node, &llc_sap_list); - write_unlock_bh(&llc_sap_list_lock); } /** @@ -70,11 +69,25 @@ static void llc_del_sap(struct llc_sap *sap) write_unlock_bh(&llc_sap_list_lock); } +static struct llc_sap *__llc_sap_find(unsigned char sap_value) +{ + struct llc_sap* sap; + + list_for_each_entry(sap, &llc_sap_list, node) + if (sap->laddr.lsap == sap_value) + goto out; + sap = NULL; +out: + return sap; +} + /** * llc_sap_find - searchs a SAP in station * @sap_value: sap to be found * * Searchs for a sap in the sap list of the LLC's station upon the sap ID. + * If the sap is found it will be refcounted and the user will have to do + * a llc_sap_put after use. * Returns the sap or %NULL if not found. */ struct llc_sap *llc_sap_find(unsigned char sap_value) @@ -82,11 +95,9 @@ struct llc_sap *llc_sap_find(unsigned char sap_value) struct llc_sap* sap; read_lock_bh(&llc_sap_list_lock); - list_for_each_entry(sap, &llc_sap_list, node) - if (sap->laddr.lsap == sap_value) - goto out; - sap = NULL; -out: + sap = __llc_sap_find(sap_value); + if (sap) + llc_sap_hold(sap); read_unlock_bh(&llc_sap_list_lock); return sap; } @@ -106,19 +117,20 @@ struct llc_sap *llc_sap_open(unsigned char lsap, struct packet_type *pt, struct net_device *orig_dev)) { - struct llc_sap *sap = llc_sap_find(lsap); + struct llc_sap *sap = NULL; - if (sap) { /* SAP already exists */ - sap = NULL; + write_lock_bh(&llc_sap_list_lock); + if (__llc_sap_find(lsap)) /* SAP already exists */ goto out; - } sap = llc_sap_alloc(); if (!sap) goto out; sap->laddr.lsap = lsap; sap->rcv_func = func; + llc_sap_hold(sap); llc_add_sap(sap); out: + write_unlock_bh(&llc_sap_list_lock); return sap; } diff --git a/net/llc/llc_if.c b/net/llc/llc_if.c index 0f84f66018e4..ba90f7f0801a 100644 --- a/net/llc/llc_if.c +++ b/net/llc/llc_if.c @@ -47,14 +47,11 @@ int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb) int rc = -ECONNABORTED; struct llc_sock *llc = llc_sk(sk); - if (llc->state == LLC_CONN_STATE_ADM) + if (unlikely(llc->state == LLC_CONN_STATE_ADM)) goto out; rc = -EBUSY; - if (llc_data_accept_state(llc->state)) { /* data_conn_refuse */ - llc->failed_data_req = 1; - goto out; - } - if (llc->p_flag) { + if (unlikely(llc_data_accept_state(llc->state) || /* data_conn_refuse */ + llc->p_flag)) { llc->failed_data_req = 1; goto out; } @@ -110,6 +107,7 @@ int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap) ev->type = LLC_CONN_EV_TYPE_PRIM; ev->prim = LLC_CONN_PRIM; ev->prim_type = LLC_PRIM_TYPE_REQ; + skb_set_owner_w(skb, sk); rc = llc_conn_state_process(sk, skb); } out_put: @@ -144,6 +142,7 @@ int llc_send_disc(struct sock *sk) skb = alloc_skb(0, GFP_ATOMIC); if (!skb) goto out; + skb_set_owner_w(skb, sk); sk->sk_state = TCP_CLOSING; ev = llc_conn_ev(skb); ev->type = LLC_CONN_EV_TYPE_PRIM; diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c index 13b46240b7a1..8f3addf0724c 100644 --- a/net/llc/llc_input.c +++ b/net/llc/llc_input.c @@ -99,15 +99,19 @@ out: static inline int llc_fixup_skb(struct sk_buff *skb) { u8 llc_len = 2; - struct llc_pdu_sn *pdu; + struct llc_pdu_un *pdu; - if (!pskb_may_pull(skb, sizeof(*pdu))) + if (unlikely(!pskb_may_pull(skb, sizeof(*pdu)))) return 0; - pdu = (struct llc_pdu_sn *)skb->data; + pdu = (struct llc_pdu_un *)skb->data; if ((pdu->ctrl_1 & LLC_PDU_TYPE_MASK) == LLC_PDU_TYPE_U) llc_len = 1; llc_len += 2; + + if (unlikely(!pskb_may_pull(skb, llc_len))) + return 0; + skb->h.raw += llc_len; skb_pull(skb, llc_len); if (skb->protocol == htons(ETH_P_802_2)) { @@ -166,17 +170,22 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev, */ if (sap->rcv_func) { sap->rcv_func(skb, dev, pt, orig_dev); - goto out; + goto out_put; } dest = llc_pdu_type(skb); if (unlikely(!dest || !llc_type_handlers[dest - 1])) - goto drop; + goto drop_put; llc_type_handlers[dest - 1](sap, skb); +out_put: + llc_sap_put(sap); out: return 0; drop: kfree_skb(skb); goto out; +drop_put: + kfree_skb(skb); + goto out_put; handle_station: if (!llc_station_handler) goto drop; diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c index ab5784cf163e..b4d55b6abb67 100644 --- a/net/llc/llc_output.c +++ b/net/llc/llc_output.c @@ -98,7 +98,7 @@ int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb, dsap, LLC_PDU_CMD); llc_pdu_init_as_ui_cmd(skb); rc = llc_mac_hdr_init(skb, skb->dev->dev_addr, dmac); - if (!rc) + if (likely(!rc)) rc = dev_queue_xmit(skb); return rc; } diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c index 36e8db3fa1a2..bd531cb235a7 100644 --- a/net/llc/llc_proc.c +++ b/net/llc/llc_proc.c @@ -134,7 +134,7 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v) llc_ui_format_mac(seq, llc->daddr.mac); seq_printf(seq, "@%02X %8d %8d %2d %3d %4d\n", llc->daddr.lsap, atomic_read(&sk->sk_wmem_alloc), - atomic_read(&sk->sk_rmem_alloc), + atomic_read(&sk->sk_rmem_alloc) - llc->copied_seq, sk->sk_state, sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : -1, llc->link); diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c index ed8ba7de6122..bb3580fb8cfe 100644 --- a/net/llc/llc_s_ac.c +++ b/net/llc/llc_s_ac.c @@ -58,7 +58,7 @@ int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb) ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_ui_cmd(skb); rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); - if (!rc) + if (likely(!rc)) rc = dev_queue_xmit(skb); return rc; } @@ -81,7 +81,7 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb) ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0); rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); - if (!rc) + if (likely(!rc)) rc = dev_queue_xmit(skb); return rc; } @@ -103,15 +103,14 @@ int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb) llc_pdu_decode_sa(skb, mac_da); llc_pdu_decode_da(skb, mac_sa); llc_pdu_decode_ssap(skb, &dsap); - nskb = llc_alloc_frame(); + nskb = llc_alloc_frame(NULL, skb->dev); if (!nskb) goto out; - nskb->dev = skb->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap, LLC_PDU_RSP); llc_pdu_init_as_xid_rsp(nskb, LLC_XID_NULL_CLASS_2, 0); rc = llc_mac_hdr_init(nskb, mac_sa, mac_da); - if (!rc) + if (likely(!rc)) rc = dev_queue_xmit(nskb); out: return rc; @@ -135,7 +134,7 @@ int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb) ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_test_cmd(skb); rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); - if (!rc) + if (likely(!rc)) rc = dev_queue_xmit(skb); return rc; } @@ -149,15 +148,14 @@ int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb) llc_pdu_decode_sa(skb, mac_da); llc_pdu_decode_da(skb, mac_sa); llc_pdu_decode_ssap(skb, &dsap); - nskb = llc_alloc_frame(); + nskb = llc_alloc_frame(NULL, skb->dev); if (!nskb) goto out; - nskb->dev = skb->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap, LLC_PDU_RSP); llc_pdu_init_as_test_rsp(nskb, skb); rc = llc_mac_hdr_init(nskb, mac_sa, mac_da); - if (!rc) + if (likely(!rc)) rc = dev_queue_xmit(nskb); out: return rc; diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index 34228ef14985..4029ceee9b91 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c @@ -26,11 +26,12 @@ /** * llc_alloc_frame - allocates sk_buff for frame + * @dev: network device this skb will be sent over * * Allocates an sk_buff for frame and initializes sk_buff fields. * Returns allocated skb or %NULL when out of memory. */ -struct sk_buff *llc_alloc_frame(void) +struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev) { struct sk_buff *skb = alloc_skb(128, GFP_ATOMIC); @@ -38,18 +39,23 @@ struct sk_buff *llc_alloc_frame(void) skb_reserve(skb, 50); skb->nh.raw = skb->h.raw = skb->data; skb->protocol = htons(ETH_P_802_2); - skb->dev = dev_base->next; + skb->dev = dev; skb->mac.raw = skb->head; + if (sk != NULL) + skb_set_owner_w(skb, sk); } return skb; } -void llc_save_primitive(struct sk_buff* skb, u8 prim) +void llc_save_primitive(struct sock *sk, struct sk_buff* skb, u8 prim) { - struct sockaddr_llc *addr = llc_ui_skb_cb(skb); + struct sockaddr_llc *addr; + if (skb->sk->sk_type == SOCK_STREAM) /* See UNIX98 */ + return; /* save primitive for use by the user. */ - addr->sllc_family = skb->sk->sk_family; + addr = llc_ui_skb_cb(skb); + addr->sllc_family = sk->sk_family; addr->sllc_arphrd = skb->dev->type; addr->sllc_test = prim == LLC_TEST_PRIM; addr->sllc_xid = prim == LLC_XID_PRIM; @@ -189,7 +195,7 @@ static void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb) if (skb->sk->sk_state == TCP_LISTEN) kfree_skb(skb); else { - llc_save_primitive(skb, ev->prim); + llc_save_primitive(skb->sk, skb, ev->prim); /* queue skb to the user. */ if (sock_queue_rcv_skb(skb->sk, skb)) @@ -308,7 +314,7 @@ void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb) sk = llc_lookup_dgram(sap, &laddr); if (sk) { - skb->sk = sk; + skb_set_owner_r(skb, sk); llc_sap_rcv(sap, skb); sock_put(sk); } else diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c index 8fe48a24bad5..f37dbf8ef126 100644 --- a/net/llc/llc_station.c +++ b/net/llc/llc_station.c @@ -50,6 +50,10 @@ struct llc_station { struct sk_buff_head mac_pdu_q; }; +#define LLC_STATION_ACK_TIME (3 * HZ) + +int sysctl_llc_station_ack_timeout = LLC_STATION_ACK_TIME; + /* Types of events (possible values in 'ev->type') */ #define LLC_STATION_EV_TYPE_SIMPLE 1 #define LLC_STATION_EV_TYPE_CONDITION 2 @@ -218,7 +222,8 @@ static void llc_station_send_pdu(struct sk_buff *skb) static int llc_station_ac_start_ack_timer(struct sk_buff *skb) { - mod_timer(&llc_main_station.ack_timer, jiffies + LLC_ACK_TIME * HZ); + mod_timer(&llc_main_station.ack_timer, + jiffies + sysctl_llc_station_ack_timeout); return 0; } @@ -249,14 +254,14 @@ static int llc_station_ac_inc_xid_r_cnt_by_1(struct sk_buff *skb) static int llc_station_ac_send_null_dsap_xid_c(struct sk_buff *skb) { int rc = 1; - struct sk_buff *nskb = llc_alloc_frame(); + struct sk_buff *nskb = llc_alloc_frame(NULL, skb->dev); if (!nskb) goto out; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, 0, LLC_PDU_CMD); llc_pdu_init_as_xid_cmd(nskb, LLC_XID_NULL_CLASS_2, 127); rc = llc_mac_hdr_init(nskb, llc_station_mac_sa, llc_station_mac_sa); - if (rc) + if (unlikely(rc)) goto free; llc_station_send_pdu(nskb); out: @@ -270,18 +275,17 @@ static int llc_station_ac_send_xid_r(struct sk_buff *skb) { u8 mac_da[ETH_ALEN], dsap; int rc = 1; - struct sk_buff* nskb = llc_alloc_frame(); + struct sk_buff* nskb = llc_alloc_frame(NULL, skb->dev); if (!nskb) goto out; rc = 0; - nskb->dev = skb->dev; llc_pdu_decode_sa(skb, mac_da); llc_pdu_decode_ssap(skb, &dsap); llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, dsap, LLC_PDU_RSP); llc_pdu_init_as_xid_rsp(nskb, LLC_XID_NULL_CLASS_2, 127); rc = llc_mac_hdr_init(nskb, llc_station_mac_sa, mac_da); - if (rc) + if (unlikely(rc)) goto free; llc_station_send_pdu(nskb); out: @@ -295,18 +299,17 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb) { u8 mac_da[ETH_ALEN], dsap; int rc = 1; - struct sk_buff *nskb = llc_alloc_frame(); + struct sk_buff *nskb = llc_alloc_frame(NULL, skb->dev); if (!nskb) goto out; rc = 0; - nskb->dev = skb->dev; llc_pdu_decode_sa(skb, mac_da); llc_pdu_decode_ssap(skb, &dsap); llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, 0, dsap, LLC_PDU_RSP); llc_pdu_init_as_test_rsp(nskb, skb); rc = llc_mac_hdr_init(nskb, llc_station_mac_sa, mac_da); - if (rc) + if (unlikely(rc)) goto free; llc_station_send_pdu(nskb); out: @@ -689,7 +692,8 @@ int __init llc_station_init(void) init_timer(&llc_main_station.ack_timer); llc_main_station.ack_timer.data = (unsigned long)&llc_main_station; llc_main_station.ack_timer.function = llc_station_ack_tmr_cb; - + llc_main_station.ack_timer.expires = jiffies + + sysctl_llc_station_ack_timeout; skb = alloc_skb(0, GFP_ATOMIC); if (!skb) goto out; @@ -697,7 +701,6 @@ int __init llc_station_init(void) llc_set_station_handler(llc_station_rcv); ev = llc_station_ev(skb); memset(ev, 0, sizeof(*ev)); - llc_main_station.ack_timer.expires = jiffies + 3 * HZ; llc_main_station.maximum_retry = 1; llc_main_station.state = LLC_STATION_STATE_DOWN; ev->type = LLC_STATION_EV_TYPE_SIMPLE; diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c new file mode 100644 index 000000000000..d1eaddb13633 --- /dev/null +++ b/net/llc/sysctl_net_llc.c @@ -0,0 +1,131 @@ +/* + * sysctl_net_llc.c: sysctl interface to LLC net subsystem. + * + * Arnaldo Carvalho de Melo + */ + +#include +#include +#include +#include +#include + +#ifndef CONFIG_SYSCTL +#error This file should not be compiled without CONFIG_SYSCTL defined +#endif + +static struct ctl_table llc2_timeout_table[] = { + { + .ctl_name = NET_LLC2_ACK_TIMEOUT, + .procname = "ack", + .data = &sysctl_llc2_ack_timeout, + .maxlen = sizeof(long), + .mode = 0644, + .proc_handler = &proc_dointvec_jiffies, + .strategy = &sysctl_jiffies, + }, + { + .ctl_name = NET_LLC2_BUSY_TIMEOUT, + .procname = "busy", + .data = &sysctl_llc2_busy_timeout, + .maxlen = sizeof(long), + .mode = 0644, + .proc_handler = &proc_dointvec_jiffies, + .strategy = &sysctl_jiffies, + }, + { + .ctl_name = NET_LLC2_P_TIMEOUT, + .procname = "p", + .data = &sysctl_llc2_p_timeout, + .maxlen = sizeof(long), + .mode = 0644, + .proc_handler = &proc_dointvec_jiffies, + .strategy = &sysctl_jiffies, + }, + { + .ctl_name = NET_LLC2_REJ_TIMEOUT, + .procname = "rej", + .data = &sysctl_llc2_rej_timeout, + .maxlen = sizeof(long), + .mode = 0644, + .proc_handler = &proc_dointvec_jiffies, + .strategy = &sysctl_jiffies, + }, + { 0 }, +}; + +static struct ctl_table llc_station_table[] = { + { + .ctl_name = NET_LLC_STATION_ACK_TIMEOUT, + .procname = "ack_timeout", + .data = &sysctl_llc_station_ack_timeout, + .maxlen = sizeof(long), + .mode = 0644, + .proc_handler = &proc_dointvec_jiffies, + .strategy = &sysctl_jiffies, + }, + { 0 }, +}; + +static struct ctl_table llc2_dir_timeout_table[] = { + { + .ctl_name = NET_LLC2, + .procname = "timeout", + .mode = 0555, + .child = llc2_timeout_table, + }, + { 0 }, +}; + +static struct ctl_table llc_table[] = { + { + .ctl_name = NET_LLC2, + .procname = "llc2", + .mode = 0555, + .child = llc2_dir_timeout_table, + }, + { + .ctl_name = NET_LLC_STATION, + .procname = "station", + .mode = 0555, + .child = llc_station_table, + }, + { 0 }, +}; + +static struct ctl_table llc_dir_table[] = { + { + .ctl_name = NET_LLC, + .procname = "llc", + .mode = 0555, + .child = llc_table, + }, + { 0 }, +}; + +static struct ctl_table llc_root_table[] = { + { + .ctl_name = CTL_NET, + .procname = "net", + .mode = 0555, + .child = llc_dir_table, + }, + { 0 }, +}; + +static struct ctl_table_header *llc_table_header; + +int __init llc_sysctl_init(void) +{ + llc_table_header = register_sysctl_table(llc_root_table, 1); + + return llc_table_header ? 0 : -ENOMEM; +} + +void llc_sysctl_exit(void) +{ + if (llc_table_header) { + unregister_sysctl_table(llc_table_header); + llc_table_header = NULL; + } +} diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 49a3900e3d32..4bc27a6334c1 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -133,7 +133,7 @@ int nfattr_parse(struct nfattr *tb[], int maxattr, struct nfattr *nfa, int len) memset(tb, 0, sizeof(struct nfattr *) * maxattr); while (NFA_OK(nfa, len)) { - unsigned flavor = nfa->nfa_type; + unsigned flavor = NFA_TYPE(nfa); if (flavor && flavor <= maxattr) tb[flavor-1] = nfa; nfa = NFA_NEXT(nfa, len); @@ -177,7 +177,7 @@ nfnetlink_check_attributes(struct nfnetlink_subsystem *subsys, int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len); while (NFA_OK(attr, attrlen)) { - unsigned flavor = attr->nfa_type; + unsigned flavor = NFA_TYPE(attr); if (flavor) { if (flavor > attr_count) return -EINVAL; @@ -195,7 +195,7 @@ nfnetlink_check_attributes(struct nfnetlink_subsystem *subsys, int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo) { - int allocation = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; + gfp_t allocation = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; int err = 0; NETLINK_CB(skb).dst_group = group; diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index ff5601ceedcb..efcd10f996ba 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -494,8 +494,8 @@ __build_packet_message(struct nfulnl_instance *inst, if (skb->tstamp.off_sec) { struct nfulnl_msg_packet_timestamp ts; - ts.sec = cpu_to_be64(skb_tv_base.tv_sec + skb->tstamp.off_sec); - ts.usec = cpu_to_be64(skb_tv_base.tv_usec + skb->tstamp.off_usec); + ts.sec = cpu_to_be64(skb->tstamp.off_sec); + ts.usec = cpu_to_be64(skb->tstamp.off_usec); NFA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts); } diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index f81fe8c52e99..eaa44c49567b 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -492,8 +492,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, if (entry->skb->tstamp.off_sec) { struct nfqnl_msg_packet_timestamp ts; - ts.sec = cpu_to_be64(skb_tv_base.tv_sec + entry->skb->tstamp.off_sec); - ts.usec = cpu_to_be64(skb_tv_base.tv_usec + entry->skb->tstamp.off_usec); + ts.sec = cpu_to_be64(entry->skb->tstamp.off_sec); + ts.usec = cpu_to_be64(entry->skb->tstamp.off_usec); NFA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts); } diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index a64e1d5ce3ca..5ca283537bc6 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -740,11 +740,8 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long t int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol) { - struct netlink_sock *nlk; int len = skb->len; - nlk = nlk_sk(sk); - skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, len); sock_put(sk); @@ -758,7 +755,7 @@ void netlink_detachskb(struct sock *sk, struct sk_buff *skb) } static inline struct sk_buff *netlink_trim(struct sk_buff *skb, - unsigned int __nocast allocation) + gfp_t allocation) { int delta; @@ -827,7 +824,7 @@ struct netlink_broadcast_data { int failure; int congested; int delivered; - unsigned int allocation; + gfp_t allocation; struct sk_buff *skb, *skb2; }; @@ -880,7 +877,7 @@ out: } int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, - u32 group, unsigned int __nocast allocation) + u32 group, gfp_t allocation) { struct netlink_broadcast_data info; struct hlist_node *node; diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c index 4e66eef9a034..509afddae569 100644 --- a/net/netrom/nr_dev.c +++ b/net/netrom/nr_dev.c @@ -58,7 +58,7 @@ int nr_rx_ip(struct sk_buff *skb, struct net_device *dev) /* Spoof incoming device */ skb->dev = dev; - skb->h.raw = skb->data; + skb->mac.raw = skb->nh.raw; skb->nh.raw = skb->data; skb->pkt_type = PACKET_HOST; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ee865d88183b..499ae3df4a44 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -654,8 +654,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe __net_timestamp(skb); sock_enable_timestamp(sk); } - h->tp_sec = skb_tv_base.tv_sec + skb->tstamp.off_sec; - h->tp_usec = skb_tv_base.tv_usec + skb->tstamp.off_usec; + h->tp_sec = skb->tstamp.off_sec; + h->tp_usec = skb->tstamp.off_usec; sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h))); sll->sll_halen = 0; @@ -761,12 +761,6 @@ static int packet_sendmsg(struct kiocb *iocb, struct socket *sock, if (dev->hard_header) { int res; err = -EINVAL; - if (saddr) { - if (saddr->sll_halen != dev->addr_len) - goto out_free; - if (saddr->sll_hatype != dev->type) - goto out_free; - } res = dev->hard_header(skb, dev, ntohs(proto), addr, NULL, len); if (sock->type != SOCK_DGRAM) { skb->tail = skb->data; diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 5acb1680524a..829fdbc4400b 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1472,22 +1472,25 @@ static const char banner[] = KERN_INFO "F6FBB/G4KLX ROSE for Linux. Version 0.62 static int __init rose_proto_init(void) { int i; - int rc = proto_register(&rose_proto, 0); + int rc; + if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) { + printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n"); + rc = -EINVAL; + goto out; + } + + rc = proto_register(&rose_proto, 0); if (rc != 0) goto out; rose_callsign = null_ax25_address; - if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) { - printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter to large\n"); - return -1; - } - dev_rose = kmalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); if (dev_rose == NULL) { printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); - return -1; + rc = -ENOMEM; + goto out_proto_unregister; } memset(dev_rose, 0x00, rose_ndevs * sizeof(struct net_device*)); @@ -1500,10 +1503,12 @@ static int __init rose_proto_init(void) name, rose_setup); if (!dev) { printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n"); + rc = -ENOMEM; goto fail; } - if (register_netdev(dev)) { - printk(KERN_ERR "ROSE: netdevice regeistration failed\n"); + rc = register_netdev(dev); + if (rc) { + printk(KERN_ERR "ROSE: netdevice registration failed\n"); free_netdev(dev); goto fail; } @@ -1536,8 +1541,9 @@ fail: free_netdev(dev_rose[i]); } kfree(dev_rose); +out_proto_unregister: proto_unregister(&rose_proto); - return -ENOMEM; + goto out; } module_init(rose_proto_init); diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index e556d92c0bc4..b18fe5043019 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c @@ -727,7 +727,7 @@ int rose_rt_ioctl(unsigned int cmd, void __user *arg) } if (rose_route.mask > 10) /* Mask can't be more than 10 digits */ return -EINVAL; - if (rose_route.ndigis > 8) /* No more than 8 digipeats */ + if (rose_route.ndigis > AX25_MAX_DIGIS) return -EINVAL; err = rose_add_node(&rose_route, dev); dev_put(dev); diff --git a/net/rxrpc/call.c b/net/rxrpc/call.c index 5cfd4cadee42..c4aeb7d40266 100644 --- a/net/rxrpc/call.c +++ b/net/rxrpc/call.c @@ -1923,7 +1923,7 @@ int rxrpc_call_write_data(struct rxrpc_call *call, size_t sioc, struct kvec *siov, u8 rxhdr_flags, - int alloc_flags, + gfp_t alloc_flags, int dup_data, size_t *size_sent) { diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c index 61463c74f8cc..2ba14a75dbbe 100644 --- a/net/rxrpc/connection.c +++ b/net/rxrpc/connection.c @@ -522,7 +522,7 @@ int rxrpc_conn_newmsg(struct rxrpc_connection *conn, uint8_t type, int dcount, struct kvec diov[], - int alloc_flags, + gfp_t alloc_flags, struct rxrpc_message **_msg) { struct rxrpc_message *msg; diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 45d3bc0812c8..81510da31792 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -72,9 +72,11 @@ config NET_SCH_CLK_GETTIMEOFDAY Choose this if you need a high resolution clock source but can't use the CPU's cycle counter. +# don't allow on SMP x86 because they can have unsynchronized TSCs. +# gettimeofday is a good alternative config NET_SCH_CLK_CPU bool "CPU cycle counter" - depends on X86_TSC || X86_64 || ALPHA || SPARC64 || PPC64 || IA64 + depends on ((X86_TSC || X86_64) && !SMP) || ALPHA || SPARC64 || PPC64 || IA64 help Say Y here if you want to use the CPU's cycle counter as clock source. This is a cheap and high resolution clock source, but on some diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 00eae5f9a01a..cf68a59fdc5a 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -393,10 +393,10 @@ META_COLLECTOR(int_sk_route_caps) dst->value = skb->sk->sk_route_caps; } -META_COLLECTOR(int_sk_hashent) +META_COLLECTOR(int_sk_hash) { SKIP_NONLOCAL(skb); - dst->value = skb->sk->sk_hashent; + dst->value = skb->sk->sk_hash; } META_COLLECTOR(int_sk_lingertime) @@ -515,7 +515,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = { [META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc), [META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc), [META_ID(SK_ROUTE_CAPS)] = META_FUNC(int_sk_route_caps), - [META_ID(SK_HASHENT)] = META_FUNC(int_sk_hashent), + [META_ID(SK_HASH)] = META_FUNC(int_sk_hash), [META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime), [META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl), [META_ID(SK_MAX_ACK_BACKLOG)] = META_FUNC(int_sk_max_ack_bl), diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 5b24ae0650d3..12b0f582a66b 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -71,7 +71,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a const struct sctp_endpoint *ep, const struct sock *sk, sctp_scope_t scope, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_sock *sp; int i; @@ -273,7 +273,7 @@ fail_init: struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, const struct sock *sk, sctp_scope_t scope, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_association *asoc; @@ -479,7 +479,7 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc, /* Add a transport address to an association. */ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, const union sctp_addr *addr, - const unsigned int __nocast gfp, + const gfp_t gfp, const int peer_state) { struct sctp_transport *peer; @@ -1231,7 +1231,7 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len) * local endpoint and the remote peer. */ int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, - unsigned int __nocast gfp) + gfp_t gfp) { sctp_scope_t scope; int flags; @@ -1254,7 +1254,7 @@ int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, /* Build the association's bind address list from the cookie. */ int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc, struct sctp_cookie *cookie, - unsigned int __nocast gfp) + gfp_t gfp) { int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length); int var_size3 = cookie->raw_addr_list_len; diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index f71549710f2e..2b962627f631 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c @@ -53,7 +53,7 @@ /* Forward declarations for internal helpers. */ static int sctp_copy_one_addr(struct sctp_bind_addr *, union sctp_addr *, - sctp_scope_t scope, unsigned int __nocast gfp, + sctp_scope_t scope, gfp_t gfp, int flags); static void sctp_bind_addr_clean(struct sctp_bind_addr *); @@ -64,7 +64,7 @@ static void sctp_bind_addr_clean(struct sctp_bind_addr *); */ int sctp_bind_addr_copy(struct sctp_bind_addr *dest, const struct sctp_bind_addr *src, - sctp_scope_t scope, unsigned int __nocast gfp, + sctp_scope_t scope, gfp_t gfp, int flags) { struct sctp_sockaddr_entry *addr; @@ -146,7 +146,7 @@ void sctp_bind_addr_free(struct sctp_bind_addr *bp) /* Add an address to the bind address list in the SCTP_bind_addr structure. */ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_sockaddr_entry *addr; @@ -200,7 +200,7 @@ int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) */ union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp, int *addrs_len, - unsigned int __nocast gfp) + gfp_t gfp) { union sctp_params addrparms; union sctp_params retval; @@ -252,7 +252,7 @@ end_raw: * address parameters). */ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list, - int addrs_len, __u16 port, unsigned int __nocast gfp) + int addrs_len, __u16 port, gfp_t gfp) { union sctp_addr_param *rawaddr; struct sctp_paramhdr *param; @@ -350,7 +350,7 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp, /* Copy out addresses from the global local address list. */ static int sctp_copy_one_addr(struct sctp_bind_addr *dest, union sctp_addr *addr, - sctp_scope_t scope, unsigned int __nocast gfp, + sctp_scope_t scope, gfp_t gfp, int flags) { int error = 0; diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 61da2937e641..83ef411772f4 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -62,7 +62,7 @@ static void sctp_datamsg_init(struct sctp_datamsg *msg) } /* Allocate and initialize datamsg. */ -SCTP_STATIC struct sctp_datamsg *sctp_datamsg_new(unsigned int __nocast gfp) +SCTP_STATIC struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp) { struct sctp_datamsg *msg; msg = kmalloc(sizeof(struct sctp_datamsg), gfp); diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index e22ccd655965..96984f7a2d69 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -68,7 +68,7 @@ static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep); */ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, struct sock *sk, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_sock *sp = sctp_sk(sk); memset(ep, 0, sizeof(struct sctp_endpoint)); @@ -138,8 +138,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, /* Create a sctp_endpoint with all that boring stuff initialized. * Returns NULL if there isn't enough memory. */ -struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, - unsigned int __nocast gfp) +struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp) { struct sctp_endpoint *ep; diff --git a/net/sctp/proc.c b/net/sctp/proc.c index b74f7772b576..6e4dc28874d7 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -69,9 +69,7 @@ fold_field(void *mib[], int nr) unsigned long res = 0; int i; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; + for_each_cpu(i) { res += *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) + sizeof (unsigned long) * nr)); diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index e7025be77691..26de4d3e1bd9 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -147,7 +147,7 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist, struct sctp_sockaddr_entry *addr; rcu_read_lock(); - if ((in_dev = __in_dev_get(dev)) == NULL) { + if ((in_dev = __in_dev_get_rcu(dev)) == NULL) { rcu_read_unlock(); return; } @@ -219,7 +219,7 @@ static void sctp_free_local_addr_list(void) /* Copy the local addresses which are valid for 'scope' into 'bp'. */ int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, - unsigned int __nocast gfp, int copy_flags) + gfp_t gfp, int copy_flags) { struct sctp_sockaddr_entry *addr; int error = 0; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 3868a8d70cc0..10e82ec2ebd3 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -78,7 +78,7 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, static int sctp_process_param(struct sctp_association *asoc, union sctp_params param, const union sctp_addr *peer_addr, - unsigned int __nocast gfp); + gfp_t gfp); /* What was the inbound interface for this chunk? */ int sctp_chunk_iif(const struct sctp_chunk *chunk) @@ -174,7 +174,7 @@ void sctp_init_cause(struct sctp_chunk *chunk, __u16 cause_code, */ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, const struct sctp_bind_addr *bp, - unsigned int __nocast gfp, int vparam_len) + gfp_t gfp, int vparam_len) { sctp_inithdr_t init; union sctp_params addrs; @@ -261,7 +261,7 @@ nodata: struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk, - unsigned int __nocast gfp, int unkparam_len) + gfp_t gfp, int unkparam_len) { sctp_inithdr_t initack; struct sctp_chunk *retval; @@ -1234,7 +1234,7 @@ void sctp_chunk_assign_tsn(struct sctp_chunk *chunk) /* Create a CLOSED association to use with an incoming packet. */ struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep, struct sctp_chunk *chunk, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_association *asoc; struct sk_buff *skb; @@ -1349,7 +1349,7 @@ nodata: struct sctp_association *sctp_unpack_cookie( const struct sctp_endpoint *ep, const struct sctp_association *asoc, - struct sctp_chunk *chunk, unsigned int __nocast gfp, + struct sctp_chunk *chunk, gfp_t gfp, int *error, struct sctp_chunk **errp) { struct sctp_association *retval = NULL; @@ -1814,7 +1814,7 @@ int sctp_verify_init(const struct sctp_association *asoc, */ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid, const union sctp_addr *peer_addr, - sctp_init_chunk_t *peer_init, unsigned int __nocast gfp) + sctp_init_chunk_t *peer_init, gfp_t gfp) { union sctp_params param; struct sctp_transport *transport; @@ -1985,7 +1985,7 @@ nomem: static int sctp_process_param(struct sctp_association *asoc, union sctp_params param, const union sctp_addr *peer_addr, - unsigned int __nocast gfp) + gfp_t gfp) { union sctp_addr addr; int i; diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 39c970b5b198..f84173ea8ec1 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -63,7 +63,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, void *event_arg, sctp_disposition_t status, sctp_cmd_seq_t *commands, - unsigned int __nocast gfp); + gfp_t gfp); static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, sctp_state_t state, struct sctp_endpoint *ep, @@ -71,7 +71,7 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, void *event_arg, sctp_disposition_t status, sctp_cmd_seq_t *commands, - unsigned int __nocast gfp); + gfp_t gfp); /******************************************************************** * Helper functions @@ -498,7 +498,7 @@ static int sctp_cmd_process_init(sctp_cmd_seq_t *commands, struct sctp_association *asoc, struct sctp_chunk *chunk, sctp_init_chunk_t *peer_init, - unsigned int __nocast gfp) + gfp_t gfp) { int error; @@ -853,7 +853,7 @@ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, - unsigned int __nocast gfp) + gfp_t gfp) { sctp_cmd_seq_t commands; const sctp_sm_table_entry_t *state_fn; @@ -898,7 +898,7 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, void *event_arg, sctp_disposition_t status, sctp_cmd_seq_t *commands, - unsigned int __nocast gfp) + gfp_t gfp) { int error; @@ -986,7 +986,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, void *event_arg, sctp_disposition_t status, sctp_cmd_seq_t *commands, - unsigned int __nocast gfp) + gfp_t gfp) { int error = 0; int force; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 86073df418f5..505c7de10c50 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -2414,6 +2414,17 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep, skb_pull(chunk->skb, sizeof(sctp_shutdownhdr_t)); chunk->subh.shutdown_hdr = sdh; + /* API 5.3.1.5 SCTP_SHUTDOWN_EVENT + * When a peer sends a SHUTDOWN, SCTP delivers this notification to + * inform the application that it should cease sending data. + */ + ev = sctp_ulpevent_make_shutdown_event(asoc, 0, GFP_ATOMIC); + if (!ev) { + disposition = SCTP_DISPOSITION_NOMEM; + goto out; + } + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); + /* Upon the reception of the SHUTDOWN, the peer endpoint shall * - enter the SHUTDOWN-RECEIVED state, * - stop accepting new data from its SCTP user @@ -2439,17 +2450,6 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep, sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN, SCTP_U32(chunk->subh.shutdown_hdr->cum_tsn_ack)); - /* API 5.3.1.5 SCTP_SHUTDOWN_EVENT - * When a peer sends a SHUTDOWN, SCTP delivers this notification to - * inform the application that it should cease sending data. - */ - ev = sctp_ulpevent_make_shutdown_event(asoc, 0, GFP_ATOMIC); - if (!ev) { - disposition = SCTP_DISPOSITION_NOMEM; - goto out; - } - sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); - out: return disposition; } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 91ec8c936913..02e068d3450d 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3159,8 +3159,9 @@ static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval return 0; } -static int sctp_getsockopt_peer_addrs_num(struct sock *sk, int len, - char __user *optval, int __user *optlen) +static int sctp_getsockopt_peer_addrs_num_old(struct sock *sk, int len, + char __user *optval, + int __user *optlen) { sctp_assoc_t id; struct sctp_association *asoc; @@ -3185,23 +3186,28 @@ static int sctp_getsockopt_peer_addrs_num(struct sock *sk, int len, return cnt; } -static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, - char __user *optval, int __user *optlen) +/* + * Old API for getting list of peer addresses. Does not work for 32-bit + * programs running on a 64-bit kernel + */ +static int sctp_getsockopt_peer_addrs_old(struct sock *sk, int len, + char __user *optval, + int __user *optlen) { struct sctp_association *asoc; struct list_head *pos; int cnt = 0; - struct sctp_getaddrs getaddrs; + struct sctp_getaddrs_old getaddrs; struct sctp_transport *from; void __user *to; union sctp_addr temp; struct sctp_sock *sp = sctp_sk(sk); int addrlen; - if (len != sizeof(struct sctp_getaddrs)) + if (len != sizeof(struct sctp_getaddrs_old)) return -EINVAL; - if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) + if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs_old))) return -EFAULT; if (getaddrs.addr_num <= 0) return -EINVAL; @@ -3225,15 +3231,69 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, if (cnt >= getaddrs.addr_num) break; } getaddrs.addr_num = cnt; - if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs))) + if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old))) return -EFAULT; return 0; } -static int sctp_getsockopt_local_addrs_num(struct sock *sk, int len, - char __user *optval, - int __user *optlen) +static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_association *asoc; + struct list_head *pos; + int cnt = 0; + struct sctp_getaddrs getaddrs; + struct sctp_transport *from; + void __user *to; + union sctp_addr temp; + struct sctp_sock *sp = sctp_sk(sk); + int addrlen; + size_t space_left; + int bytes_copied; + + if (len < sizeof(struct sctp_getaddrs)) + return -EINVAL; + + if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) + return -EFAULT; + + /* For UDP-style sockets, id specifies the association to query. */ + asoc = sctp_id2assoc(sk, getaddrs.assoc_id); + if (!asoc) + return -EINVAL; + + to = optval + offsetof(struct sctp_getaddrs,addrs); + space_left = len - sizeof(struct sctp_getaddrs) - + offsetof(struct sctp_getaddrs,addrs); + + list_for_each(pos, &asoc->peer.transport_addr_list) { + from = list_entry(pos, struct sctp_transport, transports); + memcpy(&temp, &from->ipaddr, sizeof(temp)); + sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); + addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len; + if(space_left < addrlen) + return -ENOMEM; + temp.v4.sin_port = htons(temp.v4.sin_port); + if (copy_to_user(to, &temp, addrlen)) + return -EFAULT; + to += addrlen; + cnt++; + space_left -= addrlen; + } + + if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) + return -EFAULT; + bytes_copied = ((char __user *)to) - optval; + if (put_user(bytes_copied, optlen)) + return -EFAULT; + + return 0; +} + +static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len, + char __user *optval, + int __user *optlen) { sctp_assoc_t id; struct sctp_bind_addr *bp; @@ -3306,8 +3366,8 @@ done: /* Helper function that copies local addresses to user and returns the number * of addresses copied. */ -static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port, int max_addrs, - void __user *to) +static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_addrs, + void __user *to) { struct list_head *pos; struct sctp_sockaddr_entry *addr; @@ -3341,14 +3401,54 @@ static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port, int max_addrs, return cnt; } -static int sctp_getsockopt_local_addrs(struct sock *sk, int len, - char __user *optval, int __user *optlen) +static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port, + void * __user *to, size_t space_left) +{ + struct list_head *pos; + struct sctp_sockaddr_entry *addr; + unsigned long flags; + union sctp_addr temp; + int cnt = 0; + int addrlen; + + sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags); + list_for_each(pos, &sctp_local_addr_list) { + addr = list_entry(pos, struct sctp_sockaddr_entry, list); + if ((PF_INET == sk->sk_family) && + (AF_INET6 == addr->a.sa.sa_family)) + continue; + memcpy(&temp, &addr->a, sizeof(temp)); + sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), + &temp); + addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; + if(space_leftaddress_list.next, struct sctp_sockaddr_entry, list); if (sctp_is_any(&addr->a)) { - cnt = sctp_copy_laddrs_to_user(sk, bp->port, - getaddrs.addr_num, to); + cnt = sctp_copy_laddrs_to_user_old(sk, bp->port, + getaddrs.addr_num, + to); if (cnt < 0) { err = cnt; goto unlock; @@ -3419,7 +3520,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, copy_getaddrs: getaddrs.addr_num = cnt; - if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs))) + if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old))) err = -EFAULT; unlock: @@ -3427,6 +3528,99 @@ unlock: return err; } +static int sctp_getsockopt_local_addrs(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_bind_addr *bp; + struct sctp_association *asoc; + struct list_head *pos; + int cnt = 0; + struct sctp_getaddrs getaddrs; + struct sctp_sockaddr_entry *addr; + void __user *to; + union sctp_addr temp; + struct sctp_sock *sp = sctp_sk(sk); + int addrlen; + rwlock_t *addr_lock; + int err = 0; + size_t space_left; + int bytes_copied; + + if (len <= sizeof(struct sctp_getaddrs)) + return -EINVAL; + + if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) + return -EFAULT; + + /* + * For UDP-style sockets, id specifies the association to query. + * If the id field is set to the value '0' then the locally bound + * addresses are returned without regard to any particular + * association. + */ + if (0 == getaddrs.assoc_id) { + bp = &sctp_sk(sk)->ep->base.bind_addr; + addr_lock = &sctp_sk(sk)->ep->base.addr_lock; + } else { + asoc = sctp_id2assoc(sk, getaddrs.assoc_id); + if (!asoc) + return -EINVAL; + bp = &asoc->base.bind_addr; + addr_lock = &asoc->base.addr_lock; + } + + to = optval + offsetof(struct sctp_getaddrs,addrs); + space_left = len - sizeof(struct sctp_getaddrs) - + offsetof(struct sctp_getaddrs,addrs); + + sctp_read_lock(addr_lock); + + /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid + * addresses from the global local address list. + */ + if (sctp_list_single_entry(&bp->address_list)) { + addr = list_entry(bp->address_list.next, + struct sctp_sockaddr_entry, list); + if (sctp_is_any(&addr->a)) { + cnt = sctp_copy_laddrs_to_user(sk, bp->port, + &to, space_left); + if (cnt < 0) { + err = cnt; + goto unlock; + } + goto copy_getaddrs; + } + } + + list_for_each(pos, &bp->address_list) { + addr = list_entry(pos, struct sctp_sockaddr_entry, list); + memcpy(&temp, &addr->a, sizeof(temp)); + sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); + addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; + if(space_left < addrlen) + return -ENOMEM; /*fixme: right error?*/ + temp.v4.sin_port = htons(temp.v4.sin_port); + if (copy_to_user(to, &temp, addrlen)) { + err = -EFAULT; + goto unlock; + } + to += addrlen; + cnt ++; + space_left -= addrlen; + } + +copy_getaddrs: + if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) + return -EFAULT; + bytes_copied = ((char __user *)to) - optval; + if (put_user(bytes_copied, optlen)) + return -EFAULT; + +unlock: + sctp_read_unlock(addr_lock); + return err; +} + /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) * * Requests that the local SCTP stack use the enclosed peer address as @@ -3807,12 +4001,20 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, case SCTP_INITMSG: retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); break; - case SCTP_GET_PEER_ADDRS_NUM: - retval = sctp_getsockopt_peer_addrs_num(sk, len, optval, + case SCTP_GET_PEER_ADDRS_NUM_OLD: + retval = sctp_getsockopt_peer_addrs_num_old(sk, len, optval, + optlen); + break; + case SCTP_GET_LOCAL_ADDRS_NUM_OLD: + retval = sctp_getsockopt_local_addrs_num_old(sk, len, optval, + optlen); + break; + case SCTP_GET_PEER_ADDRS_OLD: + retval = sctp_getsockopt_peer_addrs_old(sk, len, optval, optlen); break; - case SCTP_GET_LOCAL_ADDRS_NUM: - retval = sctp_getsockopt_local_addrs_num(sk, len, optval, + case SCTP_GET_LOCAL_ADDRS_OLD: + retval = sctp_getsockopt_local_addrs_old(sk, len, optval, optlen); break; case SCTP_GET_PEER_ADDRS: diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c index 25037daf3fa0..cbe2513d2822 100644 --- a/net/sctp/ssnmap.c +++ b/net/sctp/ssnmap.c @@ -58,7 +58,7 @@ static inline size_t sctp_ssnmap_size(__u16 in, __u16 out) * Allocate room to store at least 'len' contiguous TSNs. */ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_ssnmap *retval; int size; diff --git a/net/sctp/transport.c b/net/sctp/transport.c index d2f04ebe5081..6bc27200e6ca 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -57,7 +57,7 @@ /* Initialize a new transport from provided memory. */ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, const union sctp_addr *addr, - unsigned int __nocast gfp) + gfp_t gfp) { /* Copy in the address. */ peer->ipaddr = *addr; @@ -122,7 +122,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, /* Allocate and initialize a new transport. */ struct sctp_transport *sctp_transport_new(const union sctp_addr *addr, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_transport *transport; diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 0abd5101107c..057e7fac3af0 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -74,7 +74,7 @@ SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags) /* Create a new sctp_ulpevent. */ SCTP_STATIC struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_ulpevent *event; struct sk_buff *skb; @@ -136,7 +136,7 @@ static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( const struct sctp_association *asoc, __u16 flags, __u16 state, __u16 error, __u16 outbound, - __u16 inbound, unsigned int __nocast gfp) + __u16 inbound, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_assoc_change *sac; @@ -237,7 +237,7 @@ fail: struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change( const struct sctp_association *asoc, const struct sockaddr_storage *aaddr, - int flags, int state, int error, unsigned int __nocast gfp) + int flags, int state, int error, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_paddr_change *spc; @@ -350,7 +350,7 @@ fail: */ struct sctp_ulpevent *sctp_ulpevent_make_remote_error( const struct sctp_association *asoc, struct sctp_chunk *chunk, - __u16 flags, unsigned int __nocast gfp) + __u16 flags, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_remote_error *sre; @@ -448,7 +448,7 @@ fail: */ struct sctp_ulpevent *sctp_ulpevent_make_send_failed( const struct sctp_association *asoc, struct sctp_chunk *chunk, - __u16 flags, __u32 error, unsigned int __nocast gfp) + __u16 flags, __u32 error, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_send_failed *ssf; @@ -557,7 +557,7 @@ fail: */ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event( const struct sctp_association *asoc, - __u16 flags, unsigned int __nocast gfp) + __u16 flags, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_shutdown_event *sse; @@ -620,7 +620,7 @@ fail: * 5.3.1.6 SCTP_ADAPTION_INDICATION */ struct sctp_ulpevent *sctp_ulpevent_make_adaption_indication( - const struct sctp_association *asoc, unsigned int __nocast gfp) + const struct sctp_association *asoc, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_adaption_event *sai; @@ -657,7 +657,7 @@ fail: */ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, struct sctp_chunk *chunk, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_ulpevent *event = NULL; struct sk_buff *skb; @@ -719,7 +719,7 @@ fail: */ struct sctp_ulpevent *sctp_ulpevent_make_pdapi( const struct sctp_association *asoc, __u32 indication, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_pdapi_event *pd; diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index ec2c857eae7f..2080b2d28c98 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -100,7 +100,7 @@ void sctp_ulpq_free(struct sctp_ulpq *ulpq) /* Process an incoming DATA chunk. */ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, - unsigned int __nocast gfp) + gfp_t gfp) { struct sk_buff_head temp; sctp_data_chunk_t *hdr; @@ -792,7 +792,7 @@ static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) /* Partial deliver the first message as there is pressure on rwnd. */ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_association *asoc; @@ -816,7 +816,7 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, /* Renege some packets to make room for an incoming chunk. */ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_association *asoc; __u16 needed, freed; @@ -855,7 +855,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, /* Notify the application if an association is aborted and in * partial delivery mode. Send up any pending received messages. */ -void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, unsigned int __nocast gfp) +void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) { struct sctp_ulpevent *ev = NULL; struct sock *sk; diff --git a/net/socket.c b/net/socket.c index f9264472377f..3145103cdf54 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1145,8 +1145,11 @@ static int __sock_create(int family, int type, int protocol, struct socket **res if (!try_module_get(net_families[family]->owner)) goto out_release; - if ((err = net_families[family]->create(sock, protocol)) < 0) + if ((err = net_families[family]->create(sock, protocol)) < 0) { + sock->ops = NULL; goto out_module_put; + } + /* * Now to bump the refcnt of the [loadable] module that owns this * socket at sock_release time we decrement its refcnt. @@ -1360,16 +1363,16 @@ asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, int _ newsock->type = sock->type; newsock->ops = sock->ops; - err = security_socket_accept(sock, newsock); - if (err) - goto out_release; - /* * We don't need try_module_get here, as the listening socket (sock) * has the protocol module (sock->ops->owner) held. */ __module_get(newsock->ops->owner); + err = security_socket_accept(sock, newsock); + if (err) + goto out_release; + err = sock->ops->accept(sock, newsock, sock->file->f_flags); if (err < 0) goto out_release; @@ -1700,7 +1703,9 @@ asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags) struct socket *sock; char address[MAX_SOCK_ADDR]; struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; - unsigned char ctl[sizeof(struct cmsghdr) + 20]; /* 20 is size of ipv6_pktinfo */ + unsigned char ctl[sizeof(struct cmsghdr) + 20] + __attribute__ ((aligned (sizeof(__kernel_size_t)))); + /* 20 is size of ipv6_pktinfo */ unsigned char *ctl_buf = ctl; struct msghdr msg_sys; int err, ctl_len, iov_size, total_len; diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile index 46a2ce00a29b..cdcab9ca4c60 100644 --- a/net/sunrpc/Makefile +++ b/net/sunrpc/Makefile @@ -6,7 +6,7 @@ obj-$(CONFIG_SUNRPC) += sunrpc.o obj-$(CONFIG_SUNRPC_GSS) += auth_gss/ -sunrpc-y := clnt.o xprt.o sched.o \ +sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \ auth.o auth_null.o auth_unix.o \ svc.o svcsock.o svcauth.o svcauth_unix.o \ pmap_clnt.o timer.o xdr.o \ diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c index 505e2d4b3d62..a415d99c394d 100644 --- a/net/sunrpc/auth.c +++ b/net/sunrpc/auth.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile index fe1b874084bc..f3431a7e33da 100644 --- a/net/sunrpc/auth_gss/Makefile +++ b/net/sunrpc/auth_gss/Makefile @@ -10,7 +10,7 @@ auth_rpcgss-objs := auth_gss.o gss_generic_token.o \ obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \ - gss_krb5_seqnum.o + gss_krb5_seqnum.o gss_krb5_wrap.o obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 2f7b867161d2..f44f46f1d8e0 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -42,9 +42,8 @@ #include #include #include -#include -#include #include +#include #include #include #include @@ -846,10 +845,8 @@ gss_marshal(struct rpc_task *task, u32 *p) /* We compute the checksum for the verifier over the xdr-encoded bytes * starting with the xid and ending at the end of the credential: */ - iov.iov_base = req->rq_snd_buf.head[0].iov_base; - if (task->tk_client->cl_xprt->stream) - /* See clnt.c:call_header() */ - iov.iov_base += 4; + iov.iov_base = xprt_skip_transport_header(task->tk_xprt, + req->rq_snd_buf.head[0].iov_base); iov.iov_len = (u8 *)p - (u8 *)iov.iov_base; xdr_buf_from_iov(&iov, &verf_buf); @@ -857,9 +854,7 @@ gss_marshal(struct rpc_task *task, u32 *p) *p++ = htonl(RPC_AUTH_GSS); mic.data = (u8 *)(p + 1); - maj_stat = gss_get_mic(ctx->gc_gss_ctx, - GSS_C_QOP_DEFAULT, - &verf_buf, &mic); + maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); if (maj_stat == GSS_S_CONTEXT_EXPIRED) { cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; } else if (maj_stat != 0) { @@ -890,10 +885,8 @@ static u32 * gss_validate(struct rpc_task *task, u32 *p) { struct rpc_cred *cred = task->tk_msg.rpc_cred; - struct gss_cred *gss_cred = container_of(cred, struct gss_cred, - gc_base); struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); - u32 seq, qop_state; + u32 seq; struct kvec iov; struct xdr_buf verf_buf; struct xdr_netobj mic; @@ -914,23 +907,14 @@ gss_validate(struct rpc_task *task, u32 *p) mic.data = (u8 *)p; mic.len = len; - maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic, &qop_state); + maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); if (maj_stat == GSS_S_CONTEXT_EXPIRED) cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; if (maj_stat) goto out_bad; - switch (gss_cred->gc_service) { - case RPC_GSS_SVC_NONE: - /* verifier data, flavor, length: */ - task->tk_auth->au_rslack = XDR_QUADLEN(len) + 2; - break; - case RPC_GSS_SVC_INTEGRITY: - /* verifier data, flavor, length, length, sequence number: */ - task->tk_auth->au_rslack = XDR_QUADLEN(len) + 4; - break; - case RPC_GSS_SVC_PRIVACY: - goto out_bad; - } + /* We leave it to unwrap to calculate au_rslack. For now we just + * calculate the length of the verifier: */ + task->tk_auth->au_verfsize = XDR_QUADLEN(len) + 2; gss_put_ctx(ctx); dprintk("RPC: %4u GSS gss_validate: gss_verify_mic succeeded.\n", task->tk_pid); @@ -975,8 +959,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, p = iov->iov_base + iov->iov_len; mic.data = (u8 *)(p + 1); - maj_stat = gss_get_mic(ctx->gc_gss_ctx, - GSS_C_QOP_DEFAULT, &integ_buf, &mic); + maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic); status = -EIO; /* XXX? */ if (maj_stat == GSS_S_CONTEXT_EXPIRED) cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; @@ -990,6 +973,113 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, return 0; } +static void +priv_release_snd_buf(struct rpc_rqst *rqstp) +{ + int i; + + for (i=0; i < rqstp->rq_enc_pages_num; i++) + __free_page(rqstp->rq_enc_pages[i]); + kfree(rqstp->rq_enc_pages); +} + +static int +alloc_enc_pages(struct rpc_rqst *rqstp) +{ + struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; + int first, last, i; + + if (snd_buf->page_len == 0) { + rqstp->rq_enc_pages_num = 0; + return 0; + } + + first = snd_buf->page_base >> PAGE_CACHE_SHIFT; + last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT; + rqstp->rq_enc_pages_num = last - first + 1 + 1; + rqstp->rq_enc_pages + = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *), + GFP_NOFS); + if (!rqstp->rq_enc_pages) + goto out; + for (i=0; i < rqstp->rq_enc_pages_num; i++) { + rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS); + if (rqstp->rq_enc_pages[i] == NULL) + goto out_free; + } + rqstp->rq_release_snd_buf = priv_release_snd_buf; + return 0; +out_free: + for (i--; i >= 0; i--) { + __free_page(rqstp->rq_enc_pages[i]); + } +out: + return -EAGAIN; +} + +static inline int +gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, + kxdrproc_t encode, struct rpc_rqst *rqstp, u32 *p, void *obj) +{ + struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; + u32 offset; + u32 maj_stat; + int status; + u32 *opaque_len; + struct page **inpages; + int first; + int pad; + struct kvec *iov; + char *tmp; + + opaque_len = p++; + offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; + *p++ = htonl(rqstp->rq_seqno); + + status = encode(rqstp, p, obj); + if (status) + return status; + + status = alloc_enc_pages(rqstp); + if (status) + return status; + first = snd_buf->page_base >> PAGE_CACHE_SHIFT; + inpages = snd_buf->pages + first; + snd_buf->pages = rqstp->rq_enc_pages; + snd_buf->page_base -= first << PAGE_CACHE_SHIFT; + /* Give the tail its own page, in case we need extra space in the + * head when wrapping: */ + if (snd_buf->page_len || snd_buf->tail[0].iov_len) { + tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]); + memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len); + snd_buf->tail[0].iov_base = tmp; + } + maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); + /* RPC_SLACK_SPACE should prevent this ever happening: */ + BUG_ON(snd_buf->len > snd_buf->buflen); + status = -EIO; + /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was + * done anyway, so it's safe to put the request on the wire: */ + if (maj_stat == GSS_S_CONTEXT_EXPIRED) + cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; + else if (maj_stat) + return status; + + *opaque_len = htonl(snd_buf->len - offset); + /* guess whether we're in the head or the tail: */ + if (snd_buf->page_len || snd_buf->tail[0].iov_len) + iov = snd_buf->tail; + else + iov = snd_buf->head; + p = iov->iov_base + iov->iov_len; + pad = 3 - ((snd_buf->len - offset - 1) & 3); + memset(p, 0, pad); + iov->iov_len += pad; + snd_buf->len += pad; + + return 0; +} + static int gss_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, u32 *p, void *obj) @@ -1017,6 +1107,8 @@ gss_wrap_req(struct rpc_task *task, rqstp, p, obj); break; case RPC_GSS_SVC_PRIVACY: + status = gss_wrap_req_priv(cred, ctx, encode, + rqstp, p, obj); break; } out: @@ -1054,8 +1146,7 @@ gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset)) return status; - maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, - &mic, NULL); + maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); if (maj_stat == GSS_S_CONTEXT_EXPIRED) cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; if (maj_stat != GSS_S_COMPLETE) @@ -1063,6 +1154,35 @@ gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, return 0; } +static inline int +gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, + struct rpc_rqst *rqstp, u32 **p) +{ + struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; + u32 offset; + u32 opaque_len; + u32 maj_stat; + int status = -EIO; + + opaque_len = ntohl(*(*p)++); + offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base; + if (offset + opaque_len > rcv_buf->len) + return status; + /* remove padding: */ + rcv_buf->len = offset + opaque_len; + + maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); + if (maj_stat == GSS_S_CONTEXT_EXPIRED) + cred->cr_flags &= ~RPCAUTH_CRED_UPTODATE; + if (maj_stat != GSS_S_COMPLETE) + return status; + if (ntohl(*(*p)++) != rqstp->rq_seqno) + return status; + + return 0; +} + + static int gss_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, u32 *p, void *obj) @@ -1071,6 +1191,9 @@ gss_unwrap_resp(struct rpc_task *task, struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); + u32 *savedp = p; + struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head; + int savedlen = head->iov_len; int status = -EIO; if (ctx->gc_proc != RPC_GSS_PROC_DATA) @@ -1084,8 +1207,14 @@ gss_unwrap_resp(struct rpc_task *task, goto out; break; case RPC_GSS_SVC_PRIVACY: + status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p); + if (status) + goto out; break; } + /* take into account extra slack for integrity and privacy cases: */ + task->tk_auth->au_rslack = task->tk_auth->au_verfsize + (p - savedp) + + (savedlen - head->iov_len); out_decode: status = decode(rqstp, p, obj); out: diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index ee6ae74cd1b2..3f3d5437f02d 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -139,17 +139,91 @@ buf_to_sg(struct scatterlist *sg, char *ptr, int len) { sg->length = len; } +static int +process_xdr_buf(struct xdr_buf *buf, int offset, int len, + int (*actor)(struct scatterlist *, void *), void *data) +{ + int i, page_len, thislen, page_offset, ret = 0; + struct scatterlist sg[1]; + + if (offset >= buf->head[0].iov_len) { + offset -= buf->head[0].iov_len; + } else { + thislen = buf->head[0].iov_len - offset; + if (thislen > len) + thislen = len; + buf_to_sg(sg, buf->head[0].iov_base + offset, thislen); + ret = actor(sg, data); + if (ret) + goto out; + offset = 0; + len -= thislen; + } + if (len == 0) + goto out; + + if (offset >= buf->page_len) { + offset -= buf->page_len; + } else { + page_len = buf->page_len - offset; + if (page_len > len) + page_len = len; + len -= page_len; + page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1); + i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT; + thislen = PAGE_CACHE_SIZE - page_offset; + do { + if (thislen > page_len) + thislen = page_len; + sg->page = buf->pages[i]; + sg->offset = page_offset; + sg->length = thislen; + ret = actor(sg, data); + if (ret) + goto out; + page_len -= thislen; + i++; + page_offset = 0; + thislen = PAGE_CACHE_SIZE; + } while (page_len != 0); + offset = 0; + } + if (len == 0) + goto out; + + if (offset < buf->tail[0].iov_len) { + thislen = buf->tail[0].iov_len - offset; + if (thislen > len) + thislen = len; + buf_to_sg(sg, buf->tail[0].iov_base + offset, thislen); + ret = actor(sg, data); + len -= thislen; + } + if (len != 0) + ret = -EINVAL; +out: + return ret; +} + +static int +checksummer(struct scatterlist *sg, void *data) +{ + struct crypto_tfm *tfm = (struct crypto_tfm *)data; + + crypto_digest_update(tfm, sg, 1); + + return 0; +} + /* checksum the plaintext data and hdrlen bytes of the token header */ s32 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, - struct xdr_netobj *cksum) + int body_offset, struct xdr_netobj *cksum) { char *cksumname; struct crypto_tfm *tfm = NULL; /* XXX add to ctx? */ struct scatterlist sg[1]; u32 code = GSS_S_FAILURE; - int len, thislen, offset; - int i; switch (cksumtype) { case CKSUMTYPE_RSA_MD5: @@ -169,33 +243,8 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, crypto_digest_init(tfm); buf_to_sg(sg, header, hdrlen); crypto_digest_update(tfm, sg, 1); - if (body->head[0].iov_len) { - buf_to_sg(sg, body->head[0].iov_base, body->head[0].iov_len); - crypto_digest_update(tfm, sg, 1); - } - - len = body->page_len; - if (len != 0) { - offset = body->page_base & (PAGE_CACHE_SIZE - 1); - i = body->page_base >> PAGE_CACHE_SHIFT; - thislen = PAGE_CACHE_SIZE - offset; - do { - if (thislen > len) - thislen = len; - sg->page = body->pages[i]; - sg->offset = offset; - sg->length = thislen; - crypto_digest_update(tfm, sg, 1); - len -= thislen; - i++; - offset = 0; - thislen = PAGE_CACHE_SIZE; - } while(len != 0); - } - if (body->tail[0].iov_len) { - buf_to_sg(sg, body->tail[0].iov_base, body->tail[0].iov_len); - crypto_digest_update(tfm, sg, 1); - } + process_xdr_buf(body, body_offset, body->len - body_offset, + checksummer, tfm); crypto_digest_final(tfm, cksum->data); code = 0; out: @@ -204,3 +253,154 @@ out: } EXPORT_SYMBOL(make_checksum); + +struct encryptor_desc { + u8 iv[8]; /* XXX hard-coded blocksize */ + struct crypto_tfm *tfm; + int pos; + struct xdr_buf *outbuf; + struct page **pages; + struct scatterlist infrags[4]; + struct scatterlist outfrags[4]; + int fragno; + int fraglen; +}; + +static int +encryptor(struct scatterlist *sg, void *data) +{ + struct encryptor_desc *desc = data; + struct xdr_buf *outbuf = desc->outbuf; + struct page *in_page; + int thislen = desc->fraglen + sg->length; + int fraglen, ret; + int page_pos; + + /* Worst case is 4 fragments: head, end of page 1, start + * of page 2, tail. Anything more is a bug. */ + BUG_ON(desc->fragno > 3); + desc->infrags[desc->fragno] = *sg; + desc->outfrags[desc->fragno] = *sg; + + page_pos = desc->pos - outbuf->head[0].iov_len; + if (page_pos >= 0 && page_pos < outbuf->page_len) { + /* pages are not in place: */ + int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT; + in_page = desc->pages[i]; + } else { + in_page = sg->page; + } + desc->infrags[desc->fragno].page = in_page; + desc->fragno++; + desc->fraglen += sg->length; + desc->pos += sg->length; + + fraglen = thislen & 7; /* XXX hardcoded blocksize */ + thislen -= fraglen; + + if (thislen == 0) + return 0; + + ret = crypto_cipher_encrypt_iv(desc->tfm, desc->outfrags, desc->infrags, + thislen, desc->iv); + if (ret) + return ret; + if (fraglen) { + desc->outfrags[0].page = sg->page; + desc->outfrags[0].offset = sg->offset + sg->length - fraglen; + desc->outfrags[0].length = fraglen; + desc->infrags[0] = desc->outfrags[0]; + desc->infrags[0].page = in_page; + desc->fragno = 1; + desc->fraglen = fraglen; + } else { + desc->fragno = 0; + desc->fraglen = 0; + } + return 0; +} + +int +gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset, + struct page **pages) +{ + int ret; + struct encryptor_desc desc; + + BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0); + + memset(desc.iv, 0, sizeof(desc.iv)); + desc.tfm = tfm; + desc.pos = offset; + desc.outbuf = buf; + desc.pages = pages; + desc.fragno = 0; + desc.fraglen = 0; + + ret = process_xdr_buf(buf, offset, buf->len - offset, encryptor, &desc); + return ret; +} + +EXPORT_SYMBOL(gss_encrypt_xdr_buf); + +struct decryptor_desc { + u8 iv[8]; /* XXX hard-coded blocksize */ + struct crypto_tfm *tfm; + struct scatterlist frags[4]; + int fragno; + int fraglen; +}; + +static int +decryptor(struct scatterlist *sg, void *data) +{ + struct decryptor_desc *desc = data; + int thislen = desc->fraglen + sg->length; + int fraglen, ret; + + /* Worst case is 4 fragments: head, end of page 1, start + * of page 2, tail. Anything more is a bug. */ + BUG_ON(desc->fragno > 3); + desc->frags[desc->fragno] = *sg; + desc->fragno++; + desc->fraglen += sg->length; + + fraglen = thislen & 7; /* XXX hardcoded blocksize */ + thislen -= fraglen; + + if (thislen == 0) + return 0; + + ret = crypto_cipher_decrypt_iv(desc->tfm, desc->frags, desc->frags, + thislen, desc->iv); + if (ret) + return ret; + if (fraglen) { + desc->frags[0].page = sg->page; + desc->frags[0].offset = sg->offset + sg->length - fraglen; + desc->frags[0].length = fraglen; + desc->fragno = 1; + desc->fraglen = fraglen; + } else { + desc->fragno = 0; + desc->fraglen = 0; + } + return 0; +} + +int +gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset) +{ + struct decryptor_desc desc; + + /* XXXJBF: */ + BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0); + + memset(desc.iv, 0, sizeof(desc.iv)); + desc.tfm = tfm; + desc.fragno = 0; + desc.fraglen = 0; + return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc); +} + +EXPORT_SYMBOL(gss_decrypt_xdr_buf); diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 606a8a82cafb..5f1f806a0b11 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include @@ -191,43 +190,12 @@ gss_delete_sec_context_kerberos(void *internal_ctx) { kfree(kctx); } -static u32 -gss_verify_mic_kerberos(struct gss_ctx *ctx, - struct xdr_buf *message, - struct xdr_netobj *mic_token, - u32 *qstate) { - u32 maj_stat = 0; - int qop_state; - struct krb5_ctx *kctx = ctx->internal_ctx_id; - - maj_stat = krb5_read_token(kctx, mic_token, message, &qop_state, - KG_TOK_MIC_MSG); - if (!maj_stat && qop_state) - *qstate = qop_state; - - dprintk("RPC: gss_verify_mic_kerberos returning %d\n", maj_stat); - return maj_stat; -} - -static u32 -gss_get_mic_kerberos(struct gss_ctx *ctx, - u32 qop, - struct xdr_buf *message, - struct xdr_netobj *mic_token) { - u32 err = 0; - struct krb5_ctx *kctx = ctx->internal_ctx_id; - - err = krb5_make_token(kctx, qop, message, mic_token, KG_TOK_MIC_MSG); - - dprintk("RPC: gss_get_mic_kerberos returning %d\n",err); - - return err; -} - static struct gss_api_ops gss_kerberos_ops = { .gss_import_sec_context = gss_import_sec_context_kerberos, .gss_get_mic = gss_get_mic_kerberos, .gss_verify_mic = gss_verify_mic_kerberos, + .gss_wrap = gss_wrap_kerberos, + .gss_unwrap = gss_unwrap_kerberos, .gss_delete_sec_context = gss_delete_sec_context_kerberos, }; @@ -242,6 +210,11 @@ static struct pf_desc gss_kerberos_pfs[] = { .service = RPC_GSS_SVC_INTEGRITY, .name = "krb5i", }, + [2] = { + .pseudoflavor = RPC_AUTH_GSS_KRB5P, + .service = RPC_GSS_SVC_PRIVACY, + .name = "krb5p", + }, }; static struct gss_api_mech gss_kerberos_mech = { diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index afeeb8715a77..13f8ae979454 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -70,22 +70,13 @@ # define RPCDBG_FACILITY RPCDBG_AUTH #endif -static inline int -gss_krb5_padding(int blocksize, int length) { - /* Most of the code is block-size independent but in practice we - * use only 8: */ - BUG_ON(blocksize != 8); - return 8 - (length & 7); -} - u32 -krb5_make_token(struct krb5_ctx *ctx, int qop_req, - struct xdr_buf *text, struct xdr_netobj *token, - int toktype) +gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, + struct xdr_netobj *token) { + struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; s32 checksum_type; struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; - int blocksize = 0, tmsglen; unsigned char *ptr, *krb5_hdr, *msg_start; s32 now; @@ -93,9 +84,6 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req, now = get_seconds(); - if (qop_req != 0) - goto out_err; - switch (ctx->signalg) { case SGN_ALG_DES_MAC_MD5: checksum_type = CKSUMTYPE_RSA_MD5; @@ -111,21 +99,13 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req, goto out_err; } - if (toktype == KG_TOK_WRAP_MSG) { - blocksize = crypto_tfm_alg_blocksize(ctx->enc); - tmsglen = blocksize + text->len - + gss_krb5_padding(blocksize, blocksize + text->len); - } else { - tmsglen = 0; - } - - token->len = g_token_size(&ctx->mech_used, 22 + tmsglen); + token->len = g_token_size(&ctx->mech_used, 22); ptr = token->data; - g_make_token_header(&ctx->mech_used, 22 + tmsglen, &ptr); + g_make_token_header(&ctx->mech_used, 22, &ptr); - *ptr++ = (unsigned char) ((toktype>>8)&0xff); - *ptr++ = (unsigned char) (toktype&0xff); + *ptr++ = (unsigned char) ((KG_TOK_MIC_MSG>>8)&0xff); + *ptr++ = (unsigned char) (KG_TOK_MIC_MSG&0xff); /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */ krb5_hdr = ptr - 2; @@ -133,17 +113,9 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req, *(u16 *)(krb5_hdr + 2) = htons(ctx->signalg); memset(krb5_hdr + 4, 0xff, 4); - if (toktype == KG_TOK_WRAP_MSG) - *(u16 *)(krb5_hdr + 4) = htons(ctx->sealalg); - if (toktype == KG_TOK_WRAP_MSG) { - /* XXX removing support for now */ - goto out_err; - } else { /* Sign only. */ - if (make_checksum(checksum_type, krb5_hdr, 8, text, - &md5cksum)) + if (make_checksum(checksum_type, krb5_hdr, 8, text, 0, &md5cksum)) goto out_err; - } switch (ctx->signalg) { case SGN_ALG_DES_MAC_MD5: diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index 8767fc53183d..2030475d98ed 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c @@ -68,21 +68,14 @@ #endif -/* message_buffer is an input if toktype is MIC and an output if it is WRAP: - * If toktype is MIC: read_token is a mic token, and message_buffer is the - * data that the mic was supposedly taken over. - * If toktype is WRAP: read_token is a wrap token, and message_buffer is used - * to return the decrypted data. - */ +/* read_token is a mic token, and message_buffer is the data that the mic was + * supposedly taken over. */ -/* XXX will need to change prototype and/or just split into a separate function - * when we add privacy (because read_token will be in pages too). */ u32 -krb5_read_token(struct krb5_ctx *ctx, - struct xdr_netobj *read_token, - struct xdr_buf *message_buffer, - int *qop_state, int toktype) +gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, + struct xdr_buf *message_buffer, struct xdr_netobj *read_token) { + struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; int signalg; int sealalg; s32 checksum_type; @@ -100,16 +93,12 @@ krb5_read_token(struct krb5_ctx *ctx, read_token->len)) goto out; - if ((*ptr++ != ((toktype>>8)&0xff)) || (*ptr++ != (toktype&0xff))) + if ((*ptr++ != ((KG_TOK_MIC_MSG>>8)&0xff)) || + (*ptr++ != ( KG_TOK_MIC_MSG &0xff)) ) goto out; /* XXX sanity-check bodysize?? */ - if (toktype == KG_TOK_WRAP_MSG) { - /* XXX gone */ - goto out; - } - /* get the sign and seal algorithms */ signalg = ptr[0] + (ptr[1] << 8); @@ -120,14 +109,7 @@ krb5_read_token(struct krb5_ctx *ctx, if ((ptr[4] != 0xff) || (ptr[5] != 0xff)) goto out; - if (((toktype != KG_TOK_WRAP_MSG) && (sealalg != 0xffff)) || - ((toktype == KG_TOK_WRAP_MSG) && (sealalg == 0xffff))) - goto out; - - /* in the current spec, there is only one valid seal algorithm per - key type, so a simple comparison is ok */ - - if ((toktype == KG_TOK_WRAP_MSG) && !(sealalg == ctx->sealalg)) + if (sealalg != 0xffff) goto out; /* there are several mappings of seal algorithms to sign algorithms, @@ -154,7 +136,7 @@ krb5_read_token(struct krb5_ctx *ctx, switch (signalg) { case SGN_ALG_DES_MAC_MD5: ret = make_checksum(checksum_type, ptr - 2, 8, - message_buffer, &md5cksum); + message_buffer, 0, &md5cksum); if (ret) goto out; @@ -175,9 +157,6 @@ krb5_read_token(struct krb5_ctx *ctx, /* it got through unscathed. Make sure the context is unexpired */ - if (qop_state) - *qop_state = GSS_C_QOP_DEFAULT; - now = get_seconds(); ret = GSS_S_CONTEXT_EXPIRED; diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c new file mode 100644 index 000000000000..af777cf9f251 --- /dev/null +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -0,0 +1,363 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef RPC_DEBUG +# define RPCDBG_FACILITY RPCDBG_AUTH +#endif + +static inline int +gss_krb5_padding(int blocksize, int length) +{ + /* Most of the code is block-size independent but currently we + * use only 8: */ + BUG_ON(blocksize != 8); + return 8 - (length & 7); +} + +static inline void +gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize) +{ + int padding = gss_krb5_padding(blocksize, buf->len - offset); + char *p; + struct kvec *iov; + + if (buf->page_len || buf->tail[0].iov_len) + iov = &buf->tail[0]; + else + iov = &buf->head[0]; + p = iov->iov_base + iov->iov_len; + iov->iov_len += padding; + buf->len += padding; + memset(p, padding, padding); +} + +static inline int +gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize) +{ + u8 *ptr; + u8 pad; + int len = buf->len; + + if (len <= buf->head[0].iov_len) { + pad = *(u8 *)(buf->head[0].iov_base + len - 1); + if (pad > buf->head[0].iov_len) + return -EINVAL; + buf->head[0].iov_len -= pad; + goto out; + } else + len -= buf->head[0].iov_len; + if (len <= buf->page_len) { + int last = (buf->page_base + len - 1) + >>PAGE_CACHE_SHIFT; + int offset = (buf->page_base + len - 1) + & (PAGE_CACHE_SIZE - 1); + ptr = kmap_atomic(buf->pages[last], KM_SKB_SUNRPC_DATA); + pad = *(ptr + offset); + kunmap_atomic(ptr, KM_SKB_SUNRPC_DATA); + goto out; + } else + len -= buf->page_len; + BUG_ON(len > buf->tail[0].iov_len); + pad = *(u8 *)(buf->tail[0].iov_base + len - 1); +out: + /* XXX: NOTE: we do not adjust the page lengths--they represent + * a range of data in the real filesystem page cache, and we need + * to know that range so the xdr code can properly place read data. + * However adjusting the head length, as we do above, is harmless. + * In the case of a request that fits into a single page, the server + * also uses length and head length together to determine the original + * start of the request to copy the request for deferal; so it's + * easier on the server if we adjust head and tail length in tandem. + * It's not really a problem that we don't fool with the page and + * tail lengths, though--at worst badly formed xdr might lead the + * server to attempt to parse the padding. + * XXX: Document all these weird requirements for gss mechanism + * wrap/unwrap functions. */ + if (pad > blocksize) + return -EINVAL; + if (buf->len > pad) + buf->len -= pad; + else + return -EINVAL; + return 0; +} + +static inline void +make_confounder(char *p, int blocksize) +{ + static u64 i = 0; + u64 *q = (u64 *)p; + + /* rfc1964 claims this should be "random". But all that's really + * necessary is that it be unique. And not even that is necessary in + * our case since our "gssapi" implementation exists only to support + * rpcsec_gss, so we know that the only buffers we will ever encrypt + * already begin with a unique sequence number. Just to hedge my bets + * I'll make a half-hearted attempt at something unique, but ensuring + * uniqueness would mean worrying about atomicity and rollover, and I + * don't care enough. */ + + BUG_ON(blocksize != 8); + *q = i++; +} + +/* Assumptions: the head and tail of inbuf are ours to play with. + * The pages, however, may be real pages in the page cache and we replace + * them with scratch pages from **pages before writing to them. */ +/* XXX: obviously the above should be documentation of wrap interface, + * and shouldn't be in this kerberos-specific file. */ + +/* XXX factor out common code with seal/unseal. */ + +u32 +gss_wrap_kerberos(struct gss_ctx *ctx, int offset, + struct xdr_buf *buf, struct page **pages) +{ + struct krb5_ctx *kctx = ctx->internal_ctx_id; + s32 checksum_type; + struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; + int blocksize = 0, plainlen; + unsigned char *ptr, *krb5_hdr, *msg_start; + s32 now; + int headlen; + struct page **tmp_pages; + + dprintk("RPC: gss_wrap_kerberos\n"); + + now = get_seconds(); + + switch (kctx->signalg) { + case SGN_ALG_DES_MAC_MD5: + checksum_type = CKSUMTYPE_RSA_MD5; + break; + default: + dprintk("RPC: gss_krb5_seal: kctx->signalg %d not" + " supported\n", kctx->signalg); + goto out_err; + } + if (kctx->sealalg != SEAL_ALG_NONE && kctx->sealalg != SEAL_ALG_DES) { + dprintk("RPC: gss_krb5_seal: kctx->sealalg %d not supported\n", + kctx->sealalg); + goto out_err; + } + + blocksize = crypto_tfm_alg_blocksize(kctx->enc); + gss_krb5_add_padding(buf, offset, blocksize); + BUG_ON((buf->len - offset) % blocksize); + plainlen = blocksize + buf->len - offset; + + headlen = g_token_size(&kctx->mech_used, 22 + plainlen) - + (buf->len - offset); + + ptr = buf->head[0].iov_base + offset; + /* shift data to make room for header. */ + /* XXX Would be cleverer to encrypt while copying. */ + /* XXX bounds checking, slack, etc. */ + memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset); + buf->head[0].iov_len += headlen; + buf->len += headlen; + BUG_ON((buf->len - offset - headlen) % blocksize); + + g_make_token_header(&kctx->mech_used, 22 + plainlen, &ptr); + + + *ptr++ = (unsigned char) ((KG_TOK_WRAP_MSG>>8)&0xff); + *ptr++ = (unsigned char) (KG_TOK_WRAP_MSG&0xff); + + /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */ + krb5_hdr = ptr - 2; + msg_start = krb5_hdr + 24; + /* XXXJBF: */ BUG_ON(buf->head[0].iov_base + offset + headlen != msg_start + blocksize); + + *(u16 *)(krb5_hdr + 2) = htons(kctx->signalg); + memset(krb5_hdr + 4, 0xff, 4); + *(u16 *)(krb5_hdr + 4) = htons(kctx->sealalg); + + make_confounder(msg_start, blocksize); + + /* XXXJBF: UGH!: */ + tmp_pages = buf->pages; + buf->pages = pages; + if (make_checksum(checksum_type, krb5_hdr, 8, buf, + offset + headlen - blocksize, &md5cksum)) + goto out_err; + buf->pages = tmp_pages; + + switch (kctx->signalg) { + case SGN_ALG_DES_MAC_MD5: + if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, + md5cksum.data, md5cksum.len)) + goto out_err; + memcpy(krb5_hdr + 16, + md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH, + KRB5_CKSUM_LENGTH); + + dprintk("RPC: make_seal_token: cksum data: \n"); + print_hexl((u32 *) (krb5_hdr + 16), KRB5_CKSUM_LENGTH, 0); + break; + default: + BUG(); + } + + kfree(md5cksum.data); + + /* XXX would probably be more efficient to compute checksum + * and encrypt at the same time: */ + if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, + kctx->seq_send, krb5_hdr + 16, krb5_hdr + 8))) + goto out_err; + + if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, + pages)) + goto out_err; + + kctx->seq_send++; + + return ((kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE); +out_err: + if (md5cksum.data) kfree(md5cksum.data); + return GSS_S_FAILURE; +} + +u32 +gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) +{ + struct krb5_ctx *kctx = ctx->internal_ctx_id; + int signalg; + int sealalg; + s32 checksum_type; + struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; + s32 now; + int direction; + s32 seqnum; + unsigned char *ptr; + int bodysize; + u32 ret = GSS_S_DEFECTIVE_TOKEN; + void *data_start, *orig_start; + int data_len; + int blocksize; + + dprintk("RPC: gss_unwrap_kerberos\n"); + + ptr = (u8 *)buf->head[0].iov_base + offset; + if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, + buf->len - offset)) + goto out; + + if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) || + (*ptr++ != (KG_TOK_WRAP_MSG &0xff)) ) + goto out; + + /* XXX sanity-check bodysize?? */ + + /* get the sign and seal algorithms */ + + signalg = ptr[0] + (ptr[1] << 8); + sealalg = ptr[2] + (ptr[3] << 8); + + /* Sanity checks */ + + if ((ptr[4] != 0xff) || (ptr[5] != 0xff)) + goto out; + + if (sealalg == 0xffff) + goto out; + + /* in the current spec, there is only one valid seal algorithm per + key type, so a simple comparison is ok */ + + if (sealalg != kctx->sealalg) + goto out; + + /* there are several mappings of seal algorithms to sign algorithms, + but few enough that we can try them all. */ + + if ((kctx->sealalg == SEAL_ALG_NONE && signalg > 1) || + (kctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) || + (kctx->sealalg == SEAL_ALG_DES3KD && + signalg != SGN_ALG_HMAC_SHA1_DES3_KD)) + goto out; + + if (gss_decrypt_xdr_buf(kctx->enc, buf, + ptr + 22 - (unsigned char *)buf->head[0].iov_base)) + goto out; + + /* compute the checksum of the message */ + + /* initialize the the cksum */ + switch (signalg) { + case SGN_ALG_DES_MAC_MD5: + checksum_type = CKSUMTYPE_RSA_MD5; + break; + default: + ret = GSS_S_DEFECTIVE_TOKEN; + goto out; + } + + switch (signalg) { + case SGN_ALG_DES_MAC_MD5: + ret = make_checksum(checksum_type, ptr - 2, 8, buf, + ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum); + if (ret) + goto out; + + ret = krb5_encrypt(kctx->seq, NULL, md5cksum.data, + md5cksum.data, md5cksum.len); + if (ret) + goto out; + + if (memcmp(md5cksum.data + 8, ptr + 14, 8)) { + ret = GSS_S_BAD_SIG; + goto out; + } + break; + default: + ret = GSS_S_DEFECTIVE_TOKEN; + goto out; + } + + /* it got through unscathed. Make sure the context is unexpired */ + + now = get_seconds(); + + ret = GSS_S_CONTEXT_EXPIRED; + if (now > kctx->endtime) + goto out; + + /* do sequencing checks */ + + ret = GSS_S_BAD_SIG; + if ((ret = krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction, + &seqnum))) + goto out; + + if ((kctx->initiate && direction != 0xff) || + (!kctx->initiate && direction != 0)) + goto out; + + /* Copy the data back to the right position. XXX: Would probably be + * better to copy and encrypt at the same time. */ + + blocksize = crypto_tfm_alg_blocksize(kctx->enc); + data_start = ptr + 22 + blocksize; + orig_start = buf->head[0].iov_base + offset; + data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; + memmove(orig_start, data_start, data_len); + buf->head[0].iov_len -= (data_start - orig_start); + buf->len -= (data_start - orig_start); + + ret = GSS_S_DEFECTIVE_TOKEN; + if (gss_krb5_remove_padding(buf, blocksize)) + goto out; + + ret = GSS_S_COMPLETE; +out: + if (md5cksum.data) kfree(md5cksum.data); + return ret; +} diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index 9dfb68377d69..b048bf672da2 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c @@ -35,7 +35,6 @@ #include #include -#include #include #include #include @@ -251,13 +250,11 @@ gss_import_sec_context(const void *input_token, size_t bufsize, u32 gss_get_mic(struct gss_ctx *context_handle, - u32 qop, struct xdr_buf *message, struct xdr_netobj *mic_token) { return context_handle->mech_type->gm_ops ->gss_get_mic(context_handle, - qop, message, mic_token); } @@ -267,16 +264,34 @@ gss_get_mic(struct gss_ctx *context_handle, u32 gss_verify_mic(struct gss_ctx *context_handle, struct xdr_buf *message, - struct xdr_netobj *mic_token, - u32 *qstate) + struct xdr_netobj *mic_token) { return context_handle->mech_type->gm_ops ->gss_verify_mic(context_handle, message, - mic_token, - qstate); + mic_token); } +u32 +gss_wrap(struct gss_ctx *ctx_id, + int offset, + struct xdr_buf *buf, + struct page **inpages) +{ + return ctx_id->mech_type->gm_ops + ->gss_wrap(ctx_id, offset, buf, inpages); +} + +u32 +gss_unwrap(struct gss_ctx *ctx_id, + int offset, + struct xdr_buf *buf) +{ + return ctx_id->mech_type->gm_ops + ->gss_unwrap(ctx_id, offset, buf); +} + + /* gss_delete_sec_context: free all resources associated with context_handle. * Note this differs from the RFC 2744-specified prototype in that we don't * bother returning an output token, since it would never be used anyway. */ diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index 6c97d61baa9b..39b3edc14694 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c @@ -224,18 +224,13 @@ gss_delete_sec_context_spkm3(void *internal_ctx) { static u32 gss_verify_mic_spkm3(struct gss_ctx *ctx, struct xdr_buf *signbuf, - struct xdr_netobj *checksum, - u32 *qstate) { + struct xdr_netobj *checksum) +{ u32 maj_stat = 0; - int qop_state = 0; struct spkm3_ctx *sctx = ctx->internal_ctx_id; dprintk("RPC: gss_verify_mic_spkm3 calling spkm3_read_token\n"); - maj_stat = spkm3_read_token(sctx, checksum, signbuf, &qop_state, - SPKM_MIC_TOK); - - if (!maj_stat && qop_state) - *qstate = qop_state; + maj_stat = spkm3_read_token(sctx, checksum, signbuf, SPKM_MIC_TOK); dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat); return maj_stat; @@ -243,15 +238,15 @@ gss_verify_mic_spkm3(struct gss_ctx *ctx, static u32 gss_get_mic_spkm3(struct gss_ctx *ctx, - u32 qop, struct xdr_buf *message_buffer, - struct xdr_netobj *message_token) { + struct xdr_netobj *message_token) +{ u32 err = 0; struct spkm3_ctx *sctx = ctx->internal_ctx_id; dprintk("RPC: gss_get_mic_spkm3\n"); - err = spkm3_make_token(sctx, qop, message_buffer, + err = spkm3_make_token(sctx, message_buffer, message_token, SPKM_MIC_TOK); return err; } @@ -264,8 +259,8 @@ static struct gss_api_ops gss_spkm3_ops = { }; static struct pf_desc gss_spkm3_pfs[] = { - {RPC_AUTH_GSS_SPKM, 0, RPC_GSS_SVC_NONE, "spkm3"}, - {RPC_AUTH_GSS_SPKMI, 0, RPC_GSS_SVC_INTEGRITY, "spkm3i"}, + {RPC_AUTH_GSS_SPKM, RPC_GSS_SVC_NONE, "spkm3"}, + {RPC_AUTH_GSS_SPKMI, RPC_GSS_SVC_INTEGRITY, "spkm3i"}, }; static struct gss_api_mech gss_spkm3_mech = { diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c index 25339868d462..148201e929d0 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_seal.c +++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c @@ -51,7 +51,7 @@ */ u32 -spkm3_make_token(struct spkm3_ctx *ctx, int qop_req, +spkm3_make_token(struct spkm3_ctx *ctx, struct xdr_buf * text, struct xdr_netobj * token, int toktype) { @@ -68,8 +68,6 @@ spkm3_make_token(struct spkm3_ctx *ctx, int qop_req, dprintk("RPC: spkm3_make_token\n"); now = jiffies; - if (qop_req != 0) - goto out_err; if (ctx->ctx_id.len != 16) { dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n", diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c index 65ce81bf0bc4..c3c0d9586103 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c +++ b/net/sunrpc/auth_gss/gss_spkm3_unseal.c @@ -52,7 +52,7 @@ u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, /* checksum */ struct xdr_buf *message_buffer, /* signbuf */ - int *qop_state, int toktype) + int toktype) { s32 code; struct xdr_netobj wire_cksum = {.len =0, .data = NULL}; diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index e3308195374e..e4ada15ed856 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -566,8 +566,7 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci, if (rqstp->rq_deferred) /* skip verification of revisited request */ return SVC_OK; - if (gss_verify_mic(ctx_id, &rpchdr, &checksum, NULL) - != GSS_S_COMPLETE) { + if (gss_verify_mic(ctx_id, &rpchdr, &checksum) != GSS_S_COMPLETE) { *authp = rpcsec_gsserr_credproblem; return SVC_DENIED; } @@ -604,7 +603,7 @@ gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq) xdr_buf_from_iov(&iov, &verf_data); p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len; mic.data = (u8 *)(p + 1); - maj_stat = gss_get_mic(ctx_id, 0, &verf_data, &mic); + maj_stat = gss_get_mic(ctx_id, &verf_data, &mic); if (maj_stat != GSS_S_COMPLETE) return -1; *p++ = htonl(mic.len); @@ -710,7 +709,7 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx) goto out; if (read_bytes_from_xdr_buf(buf, integ_len + 4, mic.data, mic.len)) goto out; - maj_stat = gss_verify_mic(ctx, &integ_buf, &mic, NULL); + maj_stat = gss_verify_mic(ctx, &integ_buf, &mic); if (maj_stat != GSS_S_COMPLETE) goto out; if (ntohl(svc_getu32(&buf->head[0])) != seq) @@ -1012,7 +1011,7 @@ svcauth_gss_release(struct svc_rqst *rqstp) resv = &resbuf->tail[0]; } mic.data = (u8 *)resv->iov_base + resv->iov_len + 4; - if (gss_get_mic(gsd->rsci->mechctx, 0, &integ_buf, &mic)) + if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic)) goto out_err; svc_putu32(resv, htonl(mic.len)); memset(mic.data + mic.len, 0, diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c index 9b72d3abf823..f56767aaa927 100644 --- a/net/sunrpc/auth_null.c +++ b/net/sunrpc/auth_null.c @@ -7,9 +7,7 @@ */ #include -#include #include -#include #include #include #include diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c index 4ff297a9b15b..890fb5ea0dcb 100644 --- a/net/sunrpc/auth_unix.c +++ b/net/sunrpc/auth_unix.c @@ -9,8 +9,6 @@ #include #include #include -#include -#include #include #include diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index f17e6153b688..702ede309b06 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1,5 +1,5 @@ /* - * linux/net/sunrpc/rpcclnt.c + * linux/net/sunrpc/clnt.c * * This file contains the high-level RPC interface. * It is modeled as a finite state machine to support both synchronous @@ -27,7 +27,6 @@ #include #include #include -#include #include #include @@ -53,6 +52,7 @@ static void call_allocate(struct rpc_task *task); static void call_encode(struct rpc_task *task); static void call_decode(struct rpc_task *task); static void call_bind(struct rpc_task *task); +static void call_bind_status(struct rpc_task *task); static void call_transmit(struct rpc_task *task); static void call_status(struct rpc_task *task); static void call_refresh(struct rpc_task *task); @@ -517,15 +517,8 @@ void rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) { struct rpc_xprt *xprt = clnt->cl_xprt; - - xprt->sndsize = 0; - if (sndsize) - xprt->sndsize = sndsize + RPC_SLACK_SPACE; - xprt->rcvsize = 0; - if (rcvsize) - xprt->rcvsize = rcvsize + RPC_SLACK_SPACE; - if (xprt_connected(xprt)) - xprt_sock_setbufsize(xprt); + if (xprt->ops->set_buffer_size) + xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); } /* @@ -685,13 +678,11 @@ call_allocate(struct rpc_task *task) static void call_encode(struct rpc_task *task) { - struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; struct xdr_buf *sndbuf = &req->rq_snd_buf; struct xdr_buf *rcvbuf = &req->rq_rcv_buf; unsigned int bufsiz; kxdrproc_t encode; - int status; u32 *p; dprintk("RPC: %4d call_encode (status %d)\n", @@ -719,11 +710,15 @@ call_encode(struct rpc_task *task) rpc_exit(task, -EIO); return; } - if (encode && (status = rpcauth_wrap_req(task, encode, req, p, - task->tk_msg.rpc_argp)) < 0) { - printk(KERN_WARNING "%s: can't encode arguments: %d\n", - clnt->cl_protname, -status); - rpc_exit(task, status); + if (encode == NULL) + return; + + task->tk_status = rpcauth_wrap_req(task, encode, req, p, + task->tk_msg.rpc_argp); + if (task->tk_status == -ENOMEM) { + /* XXX: Is this sane? */ + rpc_delay(task, 3*HZ); + task->tk_status = -EAGAIN; } } @@ -734,43 +729,95 @@ static void call_bind(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; - struct rpc_xprt *xprt = clnt->cl_xprt; - dprintk("RPC: %4d call_bind xprt %p %s connected\n", task->tk_pid, - xprt, (xprt_connected(xprt) ? "is" : "is not")); - - task->tk_action = (xprt_connected(xprt)) ? call_transmit : call_connect; + dprintk("RPC: %4d call_bind (status %d)\n", + task->tk_pid, task->tk_status); + task->tk_action = call_connect; if (!clnt->cl_port) { - task->tk_action = call_connect; - task->tk_timeout = RPC_CONNECT_TIMEOUT; + task->tk_action = call_bind_status; + task->tk_timeout = task->tk_xprt->bind_timeout; rpc_getport(task, clnt); } } /* - * 4a. Connect to the RPC server (TCP case) + * 4a. Sort out bind result + */ +static void +call_bind_status(struct rpc_task *task) +{ + int status = -EACCES; + + if (task->tk_status >= 0) { + dprintk("RPC: %4d call_bind_status (status %d)\n", + task->tk_pid, task->tk_status); + task->tk_status = 0; + task->tk_action = call_connect; + return; + } + + switch (task->tk_status) { + case -EACCES: + dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n", + task->tk_pid); + rpc_delay(task, 3*HZ); + goto retry_bind; + case -ETIMEDOUT: + dprintk("RPC: %4d rpcbind request timed out\n", + task->tk_pid); + if (RPC_IS_SOFT(task)) { + status = -EIO; + break; + } + goto retry_bind; + case -EPFNOSUPPORT: + dprintk("RPC: %4d remote rpcbind service unavailable\n", + task->tk_pid); + break; + case -EPROTONOSUPPORT: + dprintk("RPC: %4d remote rpcbind version 2 unavailable\n", + task->tk_pid); + break; + default: + dprintk("RPC: %4d unrecognized rpcbind error (%d)\n", + task->tk_pid, -task->tk_status); + status = -EIO; + break; + } + + rpc_exit(task, status); + return; + +retry_bind: + task->tk_status = 0; + task->tk_action = call_bind; + return; +} + +/* + * 4b. Connect to the RPC server */ static void call_connect(struct rpc_task *task) { - struct rpc_clnt *clnt = task->tk_client; + struct rpc_xprt *xprt = task->tk_xprt; - dprintk("RPC: %4d call_connect status %d\n", - task->tk_pid, task->tk_status); + dprintk("RPC: %4d call_connect xprt %p %s connected\n", + task->tk_pid, xprt, + (xprt_connected(xprt) ? "is" : "is not")); - if (xprt_connected(clnt->cl_xprt)) { - task->tk_action = call_transmit; - return; + task->tk_action = call_transmit; + if (!xprt_connected(xprt)) { + task->tk_action = call_connect_status; + if (task->tk_status < 0) + return; + xprt_connect(task); } - task->tk_action = call_connect_status; - if (task->tk_status < 0) - return; - xprt_connect(task); } /* - * 4b. Sort out connect result + * 4c. Sort out connect result */ static void call_connect_status(struct rpc_task *task) @@ -778,6 +825,9 @@ call_connect_status(struct rpc_task *task) struct rpc_clnt *clnt = task->tk_client; int status = task->tk_status; + dprintk("RPC: %5u call_connect_status (status %d)\n", + task->tk_pid, task->tk_status); + task->tk_status = 0; if (status >= 0) { clnt->cl_stats->netreconn++; @@ -785,17 +835,19 @@ call_connect_status(struct rpc_task *task) return; } - /* Something failed: we may have to rebind */ + /* Something failed: remote service port may have changed */ if (clnt->cl_autobind) clnt->cl_port = 0; + switch (status) { case -ENOTCONN: case -ETIMEDOUT: case -EAGAIN: - task->tk_action = (clnt->cl_port == 0) ? call_bind : call_connect; + task->tk_action = call_bind; break; default: rpc_exit(task, -EIO); + break; } } @@ -815,10 +867,12 @@ call_transmit(struct rpc_task *task) if (task->tk_status != 0) return; /* Encode here so that rpcsec_gss can use correct sequence number. */ - if (!task->tk_rqstp->rq_bytes_sent) + if (task->tk_rqstp->rq_bytes_sent == 0) { call_encode(task); - if (task->tk_status < 0) - return; + /* Did the encode result in an error condition? */ + if (task->tk_status != 0) + goto out_nosend; + } xprt_transmit(task); if (task->tk_status < 0) return; @@ -826,6 +880,10 @@ call_transmit(struct rpc_task *task) task->tk_action = NULL; rpc_wake_up_task(task); } + return; +out_nosend: + /* release socket write lock before attempting to handle error */ + xprt_abort_transmit(task); } /* @@ -1020,13 +1078,12 @@ static u32 * call_header(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; - struct rpc_xprt *xprt = clnt->cl_xprt; struct rpc_rqst *req = task->tk_rqstp; u32 *p = req->rq_svec[0].iov_base; /* FIXME: check buffer size? */ - if (xprt->stream) - *p++ = 0; /* fill in later */ + + p = xprt_skip_transport_header(task->tk_xprt, p); *p++ = req->rq_xid; /* XID */ *p++ = htonl(RPC_CALL); /* CALL */ *p++ = htonl(RPC_VERSION); /* RPC version */ diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c index 4e81f2766923..a398575f94b8 100644 --- a/net/sunrpc/pmap_clnt.c +++ b/net/sunrpc/pmap_clnt.c @@ -26,7 +26,7 @@ #define PMAP_GETPORT 3 static struct rpc_procinfo pmap_procedures[]; -static struct rpc_clnt * pmap_create(char *, struct sockaddr_in *, int); +static struct rpc_clnt * pmap_create(char *, struct sockaddr_in *, int, int); static void pmap_getport_done(struct rpc_task *); static struct rpc_program pmap_program; static DEFINE_SPINLOCK(pmap_lock); @@ -65,7 +65,7 @@ rpc_getport(struct rpc_task *task, struct rpc_clnt *clnt) map->pm_binding = 1; spin_unlock(&pmap_lock); - pmap_clnt = pmap_create(clnt->cl_server, sap, map->pm_prot); + pmap_clnt = pmap_create(clnt->cl_server, sap, map->pm_prot, 0); if (IS_ERR(pmap_clnt)) { task->tk_status = PTR_ERR(pmap_clnt); goto bailout; @@ -112,7 +112,7 @@ rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot) NIPQUAD(sin->sin_addr.s_addr), prog, vers, prot); sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(sin->sin_addr.s_addr)); - pmap_clnt = pmap_create(hostname, sin, prot); + pmap_clnt = pmap_create(hostname, sin, prot, 0); if (IS_ERR(pmap_clnt)) return PTR_ERR(pmap_clnt); @@ -171,7 +171,7 @@ rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) sin.sin_family = AF_INET; sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); - pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP); + pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1); if (IS_ERR(pmap_clnt)) { error = PTR_ERR(pmap_clnt); dprintk("RPC: couldn't create pmap client. Error = %d\n", error); @@ -198,7 +198,7 @@ rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) } static struct rpc_clnt * -pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto) +pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto, int privileged) { struct rpc_xprt *xprt; struct rpc_clnt *clnt; @@ -208,6 +208,8 @@ pmap_create(char *hostname, struct sockaddr_in *srvaddr, int proto) if (IS_ERR(xprt)) return (struct rpc_clnt *)xprt; xprt->addr.sin_port = htons(RPC_PMAP_PORT); + if (!privileged) + xprt->resvport = 0; /* printk("pmap: create clnt\n"); */ clnt = rpc_new_client(xprt, hostname, diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index ded6c63f11ec..4f188d0a5d11 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -76,25 +76,35 @@ int rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg) { struct rpc_inode *rpci = RPC_I(inode); - int res = 0; + int res = -EPIPE; down(&inode->i_sem); + if (rpci->ops == NULL) + goto out; if (rpci->nreaders) { list_add_tail(&msg->list, &rpci->pipe); rpci->pipelen += msg->len; + res = 0; } else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) { if (list_empty(&rpci->pipe)) schedule_delayed_work(&rpci->queue_timeout, RPC_UPCALL_TIMEOUT); list_add_tail(&msg->list, &rpci->pipe); rpci->pipelen += msg->len; - } else - res = -EPIPE; + res = 0; + } +out: up(&inode->i_sem); wake_up(&rpci->waitq); return res; } +static inline void +rpc_inode_setowner(struct inode *inode, void *private) +{ + RPC_I(inode)->private = private; +} + static void rpc_close_pipes(struct inode *inode) { @@ -111,15 +121,10 @@ rpc_close_pipes(struct inode *inode) rpci->ops->release_pipe(inode); rpci->ops = NULL; } + rpc_inode_setowner(inode, NULL); up(&inode->i_sem); } -static inline void -rpc_inode_setowner(struct inode *inode, void *private) -{ - RPC_I(inode)->private = private; -} - static struct inode * rpc_alloc_inode(struct super_block *sb) { @@ -501,7 +506,6 @@ repeat: dentry = dvec[--n]; if (dentry->d_inode) { rpc_close_pipes(dentry->d_inode); - rpc_inode_setowner(dentry->d_inode, NULL); simple_unlink(dir, dentry); } dput(dentry); @@ -576,10 +580,8 @@ __rpc_rmdir(struct inode *dir, struct dentry *dentry) int error; shrink_dcache_parent(dentry); - if (dentry->d_inode) { + if (dentry->d_inode) rpc_close_pipes(dentry->d_inode); - rpc_inode_setowner(dentry->d_inode, NULL); - } if ((error = simple_rmdir(dir, dentry)) != 0) return error; if (!error) { @@ -732,7 +734,6 @@ rpc_unlink(char *path) d_drop(dentry); if (dentry->d_inode) { rpc_close_pipes(dentry->d_inode); - rpc_inode_setowner(dentry->d_inode, NULL); error = simple_unlink(dir, dentry); } dput(dentry); diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index f3104035e35d..54e60a657500 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -719,7 +719,7 @@ static void rpc_async_schedule(void *arg) void * rpc_malloc(struct rpc_task *task, size_t size) { - int gfp; + gfp_t gfp; if (task->tk_flags & RPC_TASK_SWAPPER) gfp = GFP_ATOMIC; diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c new file mode 100644 index 000000000000..8f97e90f36c8 --- /dev/null +++ b/net/sunrpc/socklib.c @@ -0,0 +1,175 @@ +/* + * linux/net/sunrpc/socklib.c + * + * Common socket helper routines for RPC client and server + * + * Copyright (C) 1995, 1996 Olaf Kirch + */ + +#include +#include +#include +#include + + +/** + * skb_read_bits - copy some data bits from skb to internal buffer + * @desc: sk_buff copy helper + * @to: copy destination + * @len: number of bytes to copy + * + * Possibly called several times to iterate over an sk_buff and copy + * data out of it. + */ +static size_t skb_read_bits(skb_reader_t *desc, void *to, size_t len) +{ + if (len > desc->count) + len = desc->count; + if (skb_copy_bits(desc->skb, desc->offset, to, len)) + return 0; + desc->count -= len; + desc->offset += len; + return len; +} + +/** + * skb_read_and_csum_bits - copy and checksum from skb to buffer + * @desc: sk_buff copy helper + * @to: copy destination + * @len: number of bytes to copy + * + * Same as skb_read_bits, but calculate a checksum at the same time. + */ +static size_t skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len) +{ + unsigned int csum2, pos; + + if (len > desc->count) + len = desc->count; + pos = desc->offset; + csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0); + desc->csum = csum_block_add(desc->csum, csum2, pos); + desc->count -= len; + desc->offset += len; + return len; +} + +/** + * xdr_partial_copy_from_skb - copy data out of an skb + * @xdr: target XDR buffer + * @base: starting offset + * @desc: sk_buff copy helper + * @copy_actor: virtual method for copying data + * + */ +ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, skb_reader_t *desc, skb_read_actor_t copy_actor) +{ + struct page **ppage = xdr->pages; + unsigned int len, pglen = xdr->page_len; + ssize_t copied = 0; + int ret; + + len = xdr->head[0].iov_len; + if (base < len) { + len -= base; + ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); + copied += ret; + if (ret != len || !desc->count) + goto out; + base = 0; + } else + base -= len; + + if (unlikely(pglen == 0)) + goto copy_tail; + if (unlikely(base >= pglen)) { + base -= pglen; + goto copy_tail; + } + if (base || xdr->page_base) { + pglen -= base; + base += xdr->page_base; + ppage += base >> PAGE_CACHE_SHIFT; + base &= ~PAGE_CACHE_MASK; + } + do { + char *kaddr; + + /* ACL likes to be lazy in allocating pages - ACLs + * are small by default but can get huge. */ + if (unlikely(*ppage == NULL)) { + *ppage = alloc_page(GFP_ATOMIC); + if (unlikely(*ppage == NULL)) { + if (copied == 0) + copied = -ENOMEM; + goto out; + } + } + + len = PAGE_CACHE_SIZE; + kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA); + if (base) { + len -= base; + if (pglen < len) + len = pglen; + ret = copy_actor(desc, kaddr + base, len); + base = 0; + } else { + if (pglen < len) + len = pglen; + ret = copy_actor(desc, kaddr, len); + } + flush_dcache_page(*ppage); + kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA); + copied += ret; + if (ret != len || !desc->count) + goto out; + ppage++; + } while ((pglen -= len) != 0); +copy_tail: + len = xdr->tail[0].iov_len; + if (base < len) + copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); +out: + return copied; +} + +/** + * csum_partial_copy_to_xdr - checksum and copy data + * @xdr: target XDR buffer + * @skb: source skb + * + * We have set things up such that we perform the checksum of the UDP + * packet in parallel with the copies into the RPC client iovec. -DaveM + */ +int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) +{ + skb_reader_t desc; + + desc.skb = skb; + desc.offset = sizeof(struct udphdr); + desc.count = skb->len - desc.offset; + + if (skb->ip_summed == CHECKSUM_UNNECESSARY) + goto no_checksum; + + desc.csum = csum_partial(skb->data, desc.offset, skb->csum); + if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0) + return -1; + if (desc.offset != skb->len) { + unsigned int csum2; + csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); + desc.csum = csum_block_add(desc.csum, csum2, desc.offset); + } + if (desc.count) + return -1; + if ((unsigned short)csum_fold(desc.csum)) + return -1; + return 0; +no_checksum: + if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0) + return -1; + if (desc.count) + return -1; + return 0; +} diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index ed48ff022d35..2387e7b823ff 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c @@ -10,7 +10,6 @@ #include #include -#include #include #include #include diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 30ec3efc48a6..f16e7cdd6150 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -548,9 +548,6 @@ svc_write_space(struct sock *sk) /* * Receive a datagram from a UDP socket. */ -extern int -csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb); - static int svc_udp_recvfrom(struct svc_rqst *rqstp) { @@ -587,7 +584,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) struct timeval tv; tv.tv_sec = xtime.tv_sec; - tv.tv_usec = xtime.tv_nsec * 1000; + tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC; skb_set_timestamp(skb, &tv); /* Don't enable netstamp, sunrpc doesn't need that much accuracy */ diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c index 1b9616a12e24..d0c9f460e411 100644 --- a/net/sunrpc/sysctl.c +++ b/net/sunrpc/sysctl.c @@ -119,8 +119,18 @@ done: return 0; } +unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; +unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE; +unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; +EXPORT_SYMBOL(xprt_min_resvport); +unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; +EXPORT_SYMBOL(xprt_max_resvport); + + static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; +static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; +static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; static ctl_table debug_table[] = { { @@ -177,6 +187,28 @@ static ctl_table debug_table[] = { .extra1 = &min_slot_table_size, .extra2 = &max_slot_table_size }, + { + .ctl_name = CTL_MIN_RESVPORT, + .procname = "min_resvport", + .data = &xprt_min_resvport, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &xprt_min_resvport_limit, + .extra2 = &xprt_max_resvport_limit + }, + { + .ctl_name = CTL_MAX_RESVPORT, + .procname = "max_resvport", + .data = &xprt_max_resvport, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .strategy = &sysctl_intvec, + .extra1 = &xprt_min_resvport_limit, + .extra2 = &xprt_max_resvport_limit + }, { .ctl_name = 0 } }; diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index fde16f40a581..32df43372ee9 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -6,15 +6,12 @@ * Copyright (C) 1995, 1996 Olaf Kirch */ +#include #include -#include #include #include #include #include -#include -#include -#include #include #include @@ -176,178 +173,6 @@ xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset, xdr->buflen += len; } -ssize_t -xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, - skb_reader_t *desc, - skb_read_actor_t copy_actor) -{ - struct page **ppage = xdr->pages; - unsigned int len, pglen = xdr->page_len; - ssize_t copied = 0; - int ret; - - len = xdr->head[0].iov_len; - if (base < len) { - len -= base; - ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); - copied += ret; - if (ret != len || !desc->count) - goto out; - base = 0; - } else - base -= len; - - if (pglen == 0) - goto copy_tail; - if (base >= pglen) { - base -= pglen; - goto copy_tail; - } - if (base || xdr->page_base) { - pglen -= base; - base += xdr->page_base; - ppage += base >> PAGE_CACHE_SHIFT; - base &= ~PAGE_CACHE_MASK; - } - do { - char *kaddr; - - /* ACL likes to be lazy in allocating pages - ACLs - * are small by default but can get huge. */ - if (unlikely(*ppage == NULL)) { - *ppage = alloc_page(GFP_ATOMIC); - if (unlikely(*ppage == NULL)) { - if (copied == 0) - copied = -ENOMEM; - goto out; - } - } - - len = PAGE_CACHE_SIZE; - kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA); - if (base) { - len -= base; - if (pglen < len) - len = pglen; - ret = copy_actor(desc, kaddr + base, len); - base = 0; - } else { - if (pglen < len) - len = pglen; - ret = copy_actor(desc, kaddr, len); - } - flush_dcache_page(*ppage); - kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA); - copied += ret; - if (ret != len || !desc->count) - goto out; - ppage++; - } while ((pglen -= len) != 0); -copy_tail: - len = xdr->tail[0].iov_len; - if (base < len) - copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); -out: - return copied; -} - - -int -xdr_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, - struct xdr_buf *xdr, unsigned int base, int msgflags) -{ - struct page **ppage = xdr->pages; - unsigned int len, pglen = xdr->page_len; - int err, ret = 0; - ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); - - len = xdr->head[0].iov_len; - if (base < len || (addr != NULL && base == 0)) { - struct kvec iov = { - .iov_base = xdr->head[0].iov_base + base, - .iov_len = len - base, - }; - struct msghdr msg = { - .msg_name = addr, - .msg_namelen = addrlen, - .msg_flags = msgflags, - }; - if (xdr->len > len) - msg.msg_flags |= MSG_MORE; - - if (iov.iov_len != 0) - err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); - else - err = kernel_sendmsg(sock, &msg, NULL, 0, 0); - if (ret == 0) - ret = err; - else if (err > 0) - ret += err; - if (err != iov.iov_len) - goto out; - base = 0; - } else - base -= len; - - if (pglen == 0) - goto copy_tail; - if (base >= pglen) { - base -= pglen; - goto copy_tail; - } - if (base || xdr->page_base) { - pglen -= base; - base += xdr->page_base; - ppage += base >> PAGE_CACHE_SHIFT; - base &= ~PAGE_CACHE_MASK; - } - - sendpage = sock->ops->sendpage ? : sock_no_sendpage; - do { - int flags = msgflags; - - len = PAGE_CACHE_SIZE; - if (base) - len -= base; - if (pglen < len) - len = pglen; - - if (pglen != len || xdr->tail[0].iov_len != 0) - flags |= MSG_MORE; - - /* Hmm... We might be dealing with highmem pages */ - if (PageHighMem(*ppage)) - sendpage = sock_no_sendpage; - err = sendpage(sock, *ppage, base, len, flags); - if (ret == 0) - ret = err; - else if (err > 0) - ret += err; - if (err != len) - goto out; - base = 0; - ppage++; - } while ((pglen -= len) != 0); -copy_tail: - len = xdr->tail[0].iov_len; - if (base < len) { - struct kvec iov = { - .iov_base = xdr->tail[0].iov_base + base, - .iov_len = len - base, - }; - struct msghdr msg = { - .msg_flags = msgflags, - }; - err = kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); - if (ret == 0) - ret = err; - else if (err > 0) - ret += err; - } -out: - return ret; -} - /* * Helper routines for doing 'memmove' like operations on a struct xdr_buf diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 3c654e06b084..6dda3860351f 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -10,12 +10,12 @@ * one is available. Otherwise, it sleeps on the backlog queue * (xprt_reserve). * - Next, the caller puts together the RPC message, stuffs it into - * the request struct, and calls xprt_call(). - * - xprt_call transmits the message and installs the caller on the - * socket's wait list. At the same time, it installs a timer that + * the request struct, and calls xprt_transmit(). + * - xprt_transmit sends the message and installs the caller on the + * transport's wait list. At the same time, it installs a timer that * is run after the packet's timeout has expired. * - When a packet arrives, the data_ready handler walks the list of - * pending requests for that socket. If a matching XID is found, the + * pending requests for that transport. If a matching XID is found, the * caller is woken up, and the timer removed. * - When no reply arrives within the timeout interval, the timer is * fired by the kernel and runs xprt_timer(). It either adjusts the @@ -33,36 +33,17 @@ * * Copyright (C) 1995-1997, Olaf Kirch * - * TCP callback races fixes (C) 1998 Red Hat Software - * TCP send fixes (C) 1998 Red Hat Software - * TCP NFS related read + write fixes - * (C) 1999 Dave Airlie, University of Limerick, Ireland - * - * Rewrite of larges part of the code in order to stabilize TCP stuff. - * Fix behaviour when socket buffer is full. - * (C) 1999 Trond Myklebust + * Transport switch API copyright (C) 2005, Chuck Lever */ +#include + #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include #include #include -#include -#include -#include -#include +#include /* * Local variables @@ -73,93 +54,62 @@ # define RPCDBG_FACILITY RPCDBG_XPRT #endif -#define XPRT_MAX_BACKOFF (8) -#define XPRT_IDLE_TIMEOUT (5*60*HZ) -#define XPRT_MAX_RESVPORT (800) - /* * Local functions */ static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); static inline void do_xprt_reserve(struct rpc_task *); -static void xprt_disconnect(struct rpc_xprt *); static void xprt_connect_status(struct rpc_task *task); -static struct rpc_xprt * xprt_setup(int proto, struct sockaddr_in *ap, - struct rpc_timeout *to); -static struct socket *xprt_create_socket(struct rpc_xprt *, int, int); -static void xprt_bind_socket(struct rpc_xprt *, struct socket *); static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); -static int xprt_clear_backlog(struct rpc_xprt *xprt); - -#ifdef RPC_DEBUG_DATA /* - * Print the buffer contents (first 128 bytes only--just enough for - * diropres return). + * The transport code maintains an estimate on the maximum number of out- + * standing RPC requests, using a smoothed version of the congestion + * avoidance implemented in 44BSD. This is basically the Van Jacobson + * congestion algorithm: If a retransmit occurs, the congestion window is + * halved; otherwise, it is incremented by 1/cwnd when + * + * - a reply is received and + * - a full number of requests are outstanding and + * - the congestion window hasn't been updated recently. */ -static void -xprt_pktdump(char *msg, u32 *packet, unsigned int count) -{ - u8 *buf = (u8 *) packet; - int j; +#define RPC_CWNDSHIFT (8U) +#define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT) +#define RPC_INITCWND RPC_CWNDSCALE +#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) - dprintk("RPC: %s\n", msg); - for (j = 0; j < count && j < 128; j += 4) { - if (!(j & 31)) { - if (j) - dprintk("\n"); - dprintk("0x%04x ", j); - } - dprintk("%02x%02x%02x%02x ", - buf[j], buf[j+1], buf[j+2], buf[j+3]); - } - dprintk("\n"); -} -#else -static inline void -xprt_pktdump(char *msg, u32 *packet, unsigned int count) -{ - /* NOP */ -} -#endif +#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) -/* - * Look up RPC transport given an INET socket +/** + * xprt_reserve_xprt - serialize write access to transports + * @task: task that is requesting access to the transport + * + * This prevents mixing the payload of separate requests, and prevents + * transport connects from colliding with writes. No congestion control + * is provided. */ -static inline struct rpc_xprt * -xprt_from_sock(struct sock *sk) -{ - return (struct rpc_xprt *) sk->sk_user_data; -} - -/* - * Serialize write access to sockets, in order to prevent different - * requests from interfering with each other. - * Also prevents TCP socket connects from colliding with writes. - */ -static int -__xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) +int xprt_reserve_xprt(struct rpc_task *task) { + struct rpc_xprt *xprt = task->tk_xprt; struct rpc_rqst *req = task->tk_rqstp; - if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) { + if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { if (task == xprt->snd_task) return 1; + if (task == NULL) + return 0; goto out_sleep; } - if (xprt->nocong || __xprt_get_cong(xprt, task)) { - xprt->snd_task = task; - if (req) { - req->rq_bytes_sent = 0; - req->rq_ntrans++; - } - return 1; + xprt->snd_task = task; + if (req) { + req->rq_bytes_sent = 0; + req->rq_ntrans++; } - smp_mb__before_clear_bit(); - clear_bit(XPRT_LOCKED, &xprt->sockstate); - smp_mb__after_clear_bit(); + return 1; + out_sleep: - dprintk("RPC: %4d failed to lock socket %p\n", task->tk_pid, xprt); + dprintk("RPC: %4d failed to lock transport %p\n", + task->tk_pid, xprt); task->tk_timeout = 0; task->tk_status = -EAGAIN; if (req && req->rq_ntrans) @@ -169,26 +119,92 @@ out_sleep: return 0; } -static inline int -xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) +/* + * xprt_reserve_xprt_cong - serialize write access to transports + * @task: task that is requesting access to the transport + * + * Same as xprt_reserve_xprt, but Van Jacobson congestion control is + * integrated into the decision of whether a request is allowed to be + * woken up and given access to the transport. + */ +int xprt_reserve_xprt_cong(struct rpc_task *task) +{ + struct rpc_xprt *xprt = task->tk_xprt; + struct rpc_rqst *req = task->tk_rqstp; + + if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { + if (task == xprt->snd_task) + return 1; + goto out_sleep; + } + if (__xprt_get_cong(xprt, task)) { + xprt->snd_task = task; + if (req) { + req->rq_bytes_sent = 0; + req->rq_ntrans++; + } + return 1; + } + smp_mb__before_clear_bit(); + clear_bit(XPRT_LOCKED, &xprt->state); + smp_mb__after_clear_bit(); +out_sleep: + dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt); + task->tk_timeout = 0; + task->tk_status = -EAGAIN; + if (req && req->rq_ntrans) + rpc_sleep_on(&xprt->resend, task, NULL, NULL); + else + rpc_sleep_on(&xprt->sending, task, NULL, NULL); + return 0; +} + +static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) { int retval; - spin_lock_bh(&xprt->sock_lock); - retval = __xprt_lock_write(xprt, task); - spin_unlock_bh(&xprt->sock_lock); + spin_lock_bh(&xprt->transport_lock); + retval = xprt->ops->reserve_xprt(task); + spin_unlock_bh(&xprt->transport_lock); return retval; } +static void __xprt_lock_write_next(struct rpc_xprt *xprt) +{ + struct rpc_task *task; + struct rpc_rqst *req; -static void -__xprt_lock_write_next(struct rpc_xprt *xprt) + if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) + return; + + task = rpc_wake_up_next(&xprt->resend); + if (!task) { + task = rpc_wake_up_next(&xprt->sending); + if (!task) + goto out_unlock; + } + + req = task->tk_rqstp; + xprt->snd_task = task; + if (req) { + req->rq_bytes_sent = 0; + req->rq_ntrans++; + } + return; + +out_unlock: + smp_mb__before_clear_bit(); + clear_bit(XPRT_LOCKED, &xprt->state); + smp_mb__after_clear_bit(); +} + +static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) { struct rpc_task *task; - if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) + if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) return; - if (!xprt->nocong && RPCXPRT_CONGESTED(xprt)) + if (RPCXPRT_CONGESTED(xprt)) goto out_unlock; task = rpc_wake_up_next(&xprt->resend); if (!task) { @@ -196,7 +212,7 @@ __xprt_lock_write_next(struct rpc_xprt *xprt) if (!task) goto out_unlock; } - if (xprt->nocong || __xprt_get_cong(xprt, task)) { + if (__xprt_get_cong(xprt, task)) { struct rpc_rqst *req = task->tk_rqstp; xprt->snd_task = task; if (req) { @@ -207,87 +223,52 @@ __xprt_lock_write_next(struct rpc_xprt *xprt) } out_unlock: smp_mb__before_clear_bit(); - clear_bit(XPRT_LOCKED, &xprt->sockstate); + clear_bit(XPRT_LOCKED, &xprt->state); smp_mb__after_clear_bit(); } -/* - * Releases the socket for use by other requests. +/** + * xprt_release_xprt - allow other requests to use a transport + * @xprt: transport with other tasks potentially waiting + * @task: task that is releasing access to the transport + * + * Note that "task" can be NULL. No congestion control is provided. */ -static void -__xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) +void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task) { if (xprt->snd_task == task) { xprt->snd_task = NULL; smp_mb__before_clear_bit(); - clear_bit(XPRT_LOCKED, &xprt->sockstate); + clear_bit(XPRT_LOCKED, &xprt->state); smp_mb__after_clear_bit(); __xprt_lock_write_next(xprt); } } -static inline void -xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) +/** + * xprt_release_xprt_cong - allow other requests to use a transport + * @xprt: transport with other tasks potentially waiting + * @task: task that is releasing access to the transport + * + * Note that "task" can be NULL. Another task is awoken to use the + * transport if the transport's congestion window allows it. + */ +void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) { - spin_lock_bh(&xprt->sock_lock); - __xprt_release_write(xprt, task); - spin_unlock_bh(&xprt->sock_lock); + if (xprt->snd_task == task) { + xprt->snd_task = NULL; + smp_mb__before_clear_bit(); + clear_bit(XPRT_LOCKED, &xprt->state); + smp_mb__after_clear_bit(); + __xprt_lock_write_next_cong(xprt); + } } -/* - * Write data to socket. - */ -static inline int -xprt_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req) +static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task) { - struct socket *sock = xprt->sock; - struct xdr_buf *xdr = &req->rq_snd_buf; - struct sockaddr *addr = NULL; - int addrlen = 0; - unsigned int skip; - int result; - - if (!sock) - return -ENOTCONN; - - xprt_pktdump("packet data:", - req->rq_svec->iov_base, - req->rq_svec->iov_len); - - /* For UDP, we need to provide an address */ - if (!xprt->stream) { - addr = (struct sockaddr *) &xprt->addr; - addrlen = sizeof(xprt->addr); - } - /* Dont repeat bytes */ - skip = req->rq_bytes_sent; - - clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); - result = xdr_sendpages(sock, addr, addrlen, xdr, skip, MSG_DONTWAIT); - - dprintk("RPC: xprt_sendmsg(%d) = %d\n", xdr->len - skip, result); - - if (result >= 0) - return result; - - switch (result) { - case -ECONNREFUSED: - /* When the server has died, an ICMP port unreachable message - * prompts ECONNREFUSED. - */ - case -EAGAIN: - break; - case -ECONNRESET: - case -ENOTCONN: - case -EPIPE: - /* connection broken */ - if (xprt->stream) - result = -ENOTCONN; - break; - default: - printk(KERN_NOTICE "RPC: sendmsg returned error %d\n", -result); - } - return result; + spin_lock_bh(&xprt->transport_lock); + xprt->ops->release_xprt(xprt, task); + spin_unlock_bh(&xprt->transport_lock); } /* @@ -321,26 +302,40 @@ __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) return; req->rq_cong = 0; xprt->cong -= RPC_CWNDSCALE; - __xprt_lock_write_next(xprt); + __xprt_lock_write_next_cong(xprt); } -/* - * Adjust RPC congestion window +/** + * xprt_release_rqst_cong - housekeeping when request is complete + * @task: RPC request that recently completed + * + * Useful for transports that require congestion control. + */ +void xprt_release_rqst_cong(struct rpc_task *task) +{ + __xprt_put_cong(task->tk_xprt, task->tk_rqstp); +} + +/** + * xprt_adjust_cwnd - adjust transport congestion window + * @task: recently completed RPC request used to adjust window + * @result: result code of completed RPC request + * * We use a time-smoothed congestion estimator to avoid heavy oscillation. */ -static void -xprt_adjust_cwnd(struct rpc_xprt *xprt, int result) +void xprt_adjust_cwnd(struct rpc_task *task, int result) { - unsigned long cwnd; + struct rpc_rqst *req = task->tk_rqstp; + struct rpc_xprt *xprt = task->tk_xprt; + unsigned long cwnd = xprt->cwnd; - cwnd = xprt->cwnd; if (result >= 0 && cwnd <= xprt->cong) { /* The (cwnd >> 1) term makes sure * the result gets rounded properly. */ cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd; if (cwnd > RPC_MAXCWND(xprt)) cwnd = RPC_MAXCWND(xprt); - __xprt_lock_write_next(xprt); + __xprt_lock_write_next_cong(xprt); } else if (result == -ETIMEDOUT) { cwnd >>= 1; if (cwnd < RPC_CWNDSCALE) @@ -349,11 +344,89 @@ xprt_adjust_cwnd(struct rpc_xprt *xprt, int result) dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n", xprt->cong, xprt->cwnd, cwnd); xprt->cwnd = cwnd; + __xprt_put_cong(xprt, req); +} + +/** + * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue + * @xprt: transport with waiting tasks + * @status: result code to plant in each task before waking it + * + */ +void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status) +{ + if (status < 0) + rpc_wake_up_status(&xprt->pending, status); + else + rpc_wake_up(&xprt->pending); +} + +/** + * xprt_wait_for_buffer_space - wait for transport output buffer to clear + * @task: task to be put to sleep + * + */ +void xprt_wait_for_buffer_space(struct rpc_task *task) +{ + struct rpc_rqst *req = task->tk_rqstp; + struct rpc_xprt *xprt = req->rq_xprt; + + task->tk_timeout = req->rq_timeout; + rpc_sleep_on(&xprt->pending, task, NULL, NULL); +} + +/** + * xprt_write_space - wake the task waiting for transport output buffer space + * @xprt: transport with waiting tasks + * + * Can be called in a soft IRQ context, so xprt_write_space never sleeps. + */ +void xprt_write_space(struct rpc_xprt *xprt) +{ + if (unlikely(xprt->shutdown)) + return; + + spin_lock_bh(&xprt->transport_lock); + if (xprt->snd_task) { + dprintk("RPC: write space: waking waiting task on xprt %p\n", + xprt); + rpc_wake_up_task(xprt->snd_task); + } + spin_unlock_bh(&xprt->transport_lock); +} + +/** + * xprt_set_retrans_timeout_def - set a request's retransmit timeout + * @task: task whose timeout is to be set + * + * Set a request's retransmit timeout based on the transport's + * default timeout parameters. Used by transports that don't adjust + * the retransmit timeout based on round-trip time estimation. + */ +void xprt_set_retrans_timeout_def(struct rpc_task *task) +{ + task->tk_timeout = task->tk_rqstp->rq_timeout; } /* - * Reset the major timeout value + * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout + * @task: task whose timeout is to be set + * + * Set a request's retransmit timeout using the RTT estimator. */ +void xprt_set_retrans_timeout_rtt(struct rpc_task *task) +{ + int timer = task->tk_msg.rpc_proc->p_timer; + struct rpc_rtt *rtt = task->tk_client->cl_rtt; + struct rpc_rqst *req = task->tk_rqstp; + unsigned long max_timeout = req->rq_xprt->timeout.to_maxval; + + task->tk_timeout = rpc_calc_rto(rtt, timer); + task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; + if (task->tk_timeout > max_timeout || task->tk_timeout == 0) + task->tk_timeout = max_timeout; +} + static void xprt_reset_majortimeo(struct rpc_rqst *req) { struct rpc_timeout *to = &req->rq_xprt->timeout; @@ -368,8 +441,10 @@ static void xprt_reset_majortimeo(struct rpc_rqst *req) req->rq_majortimeo += jiffies; } -/* - * Adjust timeout values etc for next retransmit +/** + * xprt_adjust_timeout - adjust timeout values for next retransmit + * @req: RPC request containing parameters to use for the adjustment + * */ int xprt_adjust_timeout(struct rpc_rqst *req) { @@ -391,9 +466,9 @@ int xprt_adjust_timeout(struct rpc_rqst *req) req->rq_retries = 0; xprt_reset_majortimeo(req); /* Reset the RTT counters == "slow start" */ - spin_lock_bh(&xprt->sock_lock); + spin_lock_bh(&xprt->transport_lock); rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); - spin_unlock_bh(&xprt->sock_lock); + spin_unlock_bh(&xprt->transport_lock); pprintk("RPC: %lu timeout\n", jiffies); status = -ETIMEDOUT; } @@ -405,133 +480,52 @@ int xprt_adjust_timeout(struct rpc_rqst *req) return status; } -/* - * Close down a transport socket - */ -static void -xprt_close(struct rpc_xprt *xprt) -{ - struct socket *sock = xprt->sock; - struct sock *sk = xprt->inet; - - if (!sk) - return; - - write_lock_bh(&sk->sk_callback_lock); - xprt->inet = NULL; - xprt->sock = NULL; - - sk->sk_user_data = NULL; - sk->sk_data_ready = xprt->old_data_ready; - sk->sk_state_change = xprt->old_state_change; - sk->sk_write_space = xprt->old_write_space; - write_unlock_bh(&sk->sk_callback_lock); - - sk->sk_no_check = 0; - - sock_release(sock); -} - -static void -xprt_socket_autoclose(void *args) +static void xprt_autoclose(void *args) { struct rpc_xprt *xprt = (struct rpc_xprt *)args; xprt_disconnect(xprt); - xprt_close(xprt); + xprt->ops->close(xprt); xprt_release_write(xprt, NULL); } -/* - * Mark a transport as disconnected +/** + * xprt_disconnect - mark a transport as disconnected + * @xprt: transport to flag for disconnect + * */ -static void -xprt_disconnect(struct rpc_xprt *xprt) +void xprt_disconnect(struct rpc_xprt *xprt) { dprintk("RPC: disconnected transport %p\n", xprt); - spin_lock_bh(&xprt->sock_lock); + spin_lock_bh(&xprt->transport_lock); xprt_clear_connected(xprt); - rpc_wake_up_status(&xprt->pending, -ENOTCONN); - spin_unlock_bh(&xprt->sock_lock); + xprt_wake_pending_tasks(xprt, -ENOTCONN); + spin_unlock_bh(&xprt->transport_lock); } -/* - * Used to allow disconnection when we've been idle - */ static void xprt_init_autodisconnect(unsigned long data) { struct rpc_xprt *xprt = (struct rpc_xprt *)data; - spin_lock(&xprt->sock_lock); + spin_lock(&xprt->transport_lock); if (!list_empty(&xprt->recv) || xprt->shutdown) goto out_abort; - if (test_and_set_bit(XPRT_LOCKED, &xprt->sockstate)) + if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) goto out_abort; - spin_unlock(&xprt->sock_lock); - /* Let keventd close the socket */ - if (test_bit(XPRT_CONNECTING, &xprt->sockstate) != 0) + spin_unlock(&xprt->transport_lock); + if (xprt_connecting(xprt)) xprt_release_write(xprt, NULL); else schedule_work(&xprt->task_cleanup); return; out_abort: - spin_unlock(&xprt->sock_lock); + spin_unlock(&xprt->transport_lock); } -static void xprt_socket_connect(void *args) -{ - struct rpc_xprt *xprt = (struct rpc_xprt *)args; - struct socket *sock = xprt->sock; - int status = -EIO; - - if (xprt->shutdown || xprt->addr.sin_port == 0) - goto out; - - /* - * Start by resetting any existing state - */ - xprt_close(xprt); - sock = xprt_create_socket(xprt, xprt->prot, xprt->resvport); - if (sock == NULL) { - /* couldn't create socket or bind to reserved port; - * this is likely a permanent error, so cause an abort */ - goto out; - } - xprt_bind_socket(xprt, sock); - xprt_sock_setbufsize(xprt); - - status = 0; - if (!xprt->stream) - goto out; - - /* - * Tell the socket layer to start connecting... - */ - status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr, - sizeof(xprt->addr), O_NONBLOCK); - dprintk("RPC: %p connect status %d connected %d sock state %d\n", - xprt, -status, xprt_connected(xprt), sock->sk->sk_state); - if (status < 0) { - switch (status) { - case -EINPROGRESS: - case -EALREADY: - goto out_clear; - } - } -out: - if (status < 0) - rpc_wake_up_status(&xprt->pending, status); - else - rpc_wake_up(&xprt->pending); -out_clear: - smp_mb__before_clear_bit(); - clear_bit(XPRT_CONNECTING, &xprt->sockstate); - smp_mb__after_clear_bit(); -} - -/* - * Attempt to connect a TCP socket. +/** + * xprt_connect - schedule a transport connect operation + * @task: RPC task that is requesting the connect * */ void xprt_connect(struct rpc_task *task) @@ -552,37 +546,19 @@ void xprt_connect(struct rpc_task *task) if (!xprt_lock_write(xprt, task)) return; if (xprt_connected(xprt)) - goto out_write; + xprt_release_write(xprt, task); + else { + if (task->tk_rqstp) + task->tk_rqstp->rq_bytes_sent = 0; - if (task->tk_rqstp) - task->tk_rqstp->rq_bytes_sent = 0; - - task->tk_timeout = RPC_CONNECT_TIMEOUT; - rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL); - if (!test_and_set_bit(XPRT_CONNECTING, &xprt->sockstate)) { - /* Note: if we are here due to a dropped connection - * we delay reconnecting by RPC_REESTABLISH_TIMEOUT/HZ - * seconds - */ - if (xprt->sock != NULL) - schedule_delayed_work(&xprt->sock_connect, - RPC_REESTABLISH_TIMEOUT); - else { - schedule_work(&xprt->sock_connect); - if (!RPC_IS_ASYNC(task)) - flush_scheduled_work(); - } + task->tk_timeout = xprt->connect_timeout; + rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL); + xprt->ops->connect(task); } return; - out_write: - xprt_release_write(xprt, task); } -/* - * We arrive here when awoken from waiting on connection establishment. - */ -static void -xprt_connect_status(struct rpc_task *task) +static void xprt_connect_status(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; @@ -592,31 +568,42 @@ xprt_connect_status(struct rpc_task *task) return; } - /* if soft mounted, just cause this RPC to fail */ - if (RPC_IS_SOFT(task)) - task->tk_status = -EIO; - switch (task->tk_status) { case -ECONNREFUSED: case -ECONNRESET: + dprintk("RPC: %4d xprt_connect_status: server %s refused connection\n", + task->tk_pid, task->tk_client->cl_server); + break; case -ENOTCONN: - return; + dprintk("RPC: %4d xprt_connect_status: connection broken\n", + task->tk_pid); + break; case -ETIMEDOUT: - dprintk("RPC: %4d xprt_connect_status: timed out\n", + dprintk("RPC: %4d xprt_connect_status: connect attempt timed out\n", task->tk_pid); break; default: - printk(KERN_ERR "RPC: error %d connecting to server %s\n", - -task->tk_status, task->tk_client->cl_server); + dprintk("RPC: %4d xprt_connect_status: error %d connecting to server %s\n", + task->tk_pid, -task->tk_status, task->tk_client->cl_server); + xprt_release_write(xprt, task); + task->tk_status = -EIO; + return; + } + + /* if soft mounted, just cause this RPC to fail */ + if (RPC_IS_SOFT(task)) { + xprt_release_write(xprt, task); + task->tk_status = -EIO; } - xprt_release_write(xprt, task); } -/* - * Look up the RPC request corresponding to a reply, and then lock it. +/** + * xprt_lookup_rqst - find an RPC request corresponding to an XID + * @xprt: transport on which the original request was transmitted + * @xid: RPC XID of incoming reply + * */ -static inline struct rpc_rqst * -xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) +struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) { struct list_head *pos; struct rpc_rqst *req = NULL; @@ -631,556 +618,68 @@ xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) return req; } -/* - * Complete reply received. - * The TCP code relies on us to remove the request from xprt->pending. +/** + * xprt_update_rtt - update an RPC client's RTT state after receiving a reply + * @task: RPC request that recently completed + * */ -static void -xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied) +void xprt_update_rtt(struct rpc_task *task) { - struct rpc_task *task = req->rq_task; - struct rpc_clnt *clnt = task->tk_client; + struct rpc_rqst *req = task->tk_rqstp; + struct rpc_rtt *rtt = task->tk_client->cl_rtt; + unsigned timer = task->tk_msg.rpc_proc->p_timer; - /* Adjust congestion window */ - if (!xprt->nocong) { - unsigned timer = task->tk_msg.rpc_proc->p_timer; - xprt_adjust_cwnd(xprt, copied); - __xprt_put_cong(xprt, req); - if (timer) { - if (req->rq_ntrans == 1) - rpc_update_rtt(clnt->cl_rtt, timer, - (long)jiffies - req->rq_xtime); - rpc_set_timeo(clnt->cl_rtt, timer, req->rq_ntrans - 1); - } + if (timer) { + if (req->rq_ntrans == 1) + rpc_update_rtt(rtt, timer, + (long)jiffies - req->rq_xtime); + rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); } +} -#ifdef RPC_PROFILE - /* Profile only reads for now */ - if (copied > 1024) { - static unsigned long nextstat; - static unsigned long pkt_rtt, pkt_len, pkt_cnt; +/** + * xprt_complete_rqst - called when reply processing is complete + * @task: RPC request that recently completed + * @copied: actual number of bytes received from the transport + * + * Caller holds transport lock. + */ +void xprt_complete_rqst(struct rpc_task *task, int copied) +{ + struct rpc_rqst *req = task->tk_rqstp; - pkt_cnt++; - pkt_len += req->rq_slen + copied; - pkt_rtt += jiffies - req->rq_xtime; - if (time_before(nextstat, jiffies)) { - printk("RPC: %lu %ld cwnd\n", jiffies, xprt->cwnd); - printk("RPC: %ld %ld %ld %ld stat\n", - jiffies, pkt_cnt, pkt_len, pkt_rtt); - pkt_rtt = pkt_len = pkt_cnt = 0; - nextstat = jiffies + 5 * HZ; - } - } -#endif + dprintk("RPC: %5u xid %08x complete (%d bytes received)\n", + task->tk_pid, ntohl(req->rq_xid), copied); - dprintk("RPC: %4d has input (%d bytes)\n", task->tk_pid, copied); list_del_init(&req->rq_list); req->rq_received = req->rq_private_buf.len = copied; - - /* ... and wake up the process. */ rpc_wake_up_task(task); - return; } -static size_t -skb_read_bits(skb_reader_t *desc, void *to, size_t len) +static void xprt_timer(struct rpc_task *task) { - if (len > desc->count) - len = desc->count; - if (skb_copy_bits(desc->skb, desc->offset, to, len)) - return 0; - desc->count -= len; - desc->offset += len; - return len; -} - -static size_t -skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len) -{ - unsigned int csum2, pos; - - if (len > desc->count) - len = desc->count; - pos = desc->offset; - csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0); - desc->csum = csum_block_add(desc->csum, csum2, pos); - desc->count -= len; - desc->offset += len; - return len; -} - -/* - * We have set things up such that we perform the checksum of the UDP - * packet in parallel with the copies into the RPC client iovec. -DaveM - */ -int -csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) -{ - skb_reader_t desc; - - desc.skb = skb; - desc.offset = sizeof(struct udphdr); - desc.count = skb->len - desc.offset; - - if (skb->ip_summed == CHECKSUM_UNNECESSARY) - goto no_checksum; - - desc.csum = csum_partial(skb->data, desc.offset, skb->csum); - if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0) - return -1; - if (desc.offset != skb->len) { - unsigned int csum2; - csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); - desc.csum = csum_block_add(desc.csum, csum2, desc.offset); - } - if (desc.count) - return -1; - if ((unsigned short)csum_fold(desc.csum)) - return -1; - return 0; -no_checksum: - if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0) - return -1; - if (desc.count) - return -1; - return 0; -} - -/* - * Input handler for RPC replies. Called from a bottom half and hence - * atomic. - */ -static void -udp_data_ready(struct sock *sk, int len) -{ - struct rpc_task *task; - struct rpc_xprt *xprt; - struct rpc_rqst *rovr; - struct sk_buff *skb; - int err, repsize, copied; - u32 _xid, *xp; - - read_lock(&sk->sk_callback_lock); - dprintk("RPC: udp_data_ready...\n"); - if (!(xprt = xprt_from_sock(sk))) { - printk("RPC: udp_data_ready request not found!\n"); - goto out; - } - - dprintk("RPC: udp_data_ready client %p\n", xprt); - - if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) - goto out; - - if (xprt->shutdown) - goto dropit; - - repsize = skb->len - sizeof(struct udphdr); - if (repsize < 4) { - printk("RPC: impossible RPC reply size %d!\n", repsize); - goto dropit; - } - - /* Copy the XID from the skb... */ - xp = skb_header_pointer(skb, sizeof(struct udphdr), - sizeof(_xid), &_xid); - if (xp == NULL) - goto dropit; - - /* Look up and lock the request corresponding to the given XID */ - spin_lock(&xprt->sock_lock); - rovr = xprt_lookup_rqst(xprt, *xp); - if (!rovr) - goto out_unlock; - task = rovr->rq_task; - - dprintk("RPC: %4d received reply\n", task->tk_pid); - - if ((copied = rovr->rq_private_buf.buflen) > repsize) - copied = repsize; - - /* Suck it into the iovec, verify checksum if not done by hw. */ - if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) - goto out_unlock; - - /* Something worked... */ - dst_confirm(skb->dst); - - xprt_complete_rqst(xprt, rovr, copied); - - out_unlock: - spin_unlock(&xprt->sock_lock); - dropit: - skb_free_datagram(sk, skb); - out: - read_unlock(&sk->sk_callback_lock); -} - -/* - * Copy from an skb into memory and shrink the skb. - */ -static inline size_t -tcp_copy_data(skb_reader_t *desc, void *p, size_t len) -{ - if (len > desc->count) - len = desc->count; - if (skb_copy_bits(desc->skb, desc->offset, p, len)) { - dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n", - len, desc->count); - return 0; - } - desc->offset += len; - desc->count -= len; - dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n", - len, desc->count); - return len; -} - -/* - * TCP read fragment marker - */ -static inline void -tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc) -{ - size_t len, used; - char *p; - - p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset; - len = sizeof(xprt->tcp_recm) - xprt->tcp_offset; - used = tcp_copy_data(desc, p, len); - xprt->tcp_offset += used; - if (used != len) - return; - xprt->tcp_reclen = ntohl(xprt->tcp_recm); - if (xprt->tcp_reclen & 0x80000000) - xprt->tcp_flags |= XPRT_LAST_FRAG; - else - xprt->tcp_flags &= ~XPRT_LAST_FRAG; - xprt->tcp_reclen &= 0x7fffffff; - xprt->tcp_flags &= ~XPRT_COPY_RECM; - xprt->tcp_offset = 0; - /* Sanity check of the record length */ - if (xprt->tcp_reclen < 4) { - printk(KERN_ERR "RPC: Invalid TCP record fragment length\n"); - xprt_disconnect(xprt); - } - dprintk("RPC: reading TCP record fragment of length %d\n", - xprt->tcp_reclen); -} - -static void -tcp_check_recm(struct rpc_xprt *xprt) -{ - dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n", - xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags); - if (xprt->tcp_offset == xprt->tcp_reclen) { - xprt->tcp_flags |= XPRT_COPY_RECM; - xprt->tcp_offset = 0; - if (xprt->tcp_flags & XPRT_LAST_FRAG) { - xprt->tcp_flags &= ~XPRT_COPY_DATA; - xprt->tcp_flags |= XPRT_COPY_XID; - xprt->tcp_copied = 0; - } - } -} - -/* - * TCP read xid - */ -static inline void -tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc) -{ - size_t len, used; - char *p; - - len = sizeof(xprt->tcp_xid) - xprt->tcp_offset; - dprintk("RPC: reading XID (%Zu bytes)\n", len); - p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset; - used = tcp_copy_data(desc, p, len); - xprt->tcp_offset += used; - if (used != len) - return; - xprt->tcp_flags &= ~XPRT_COPY_XID; - xprt->tcp_flags |= XPRT_COPY_DATA; - xprt->tcp_copied = 4; - dprintk("RPC: reading reply for XID %08x\n", - ntohl(xprt->tcp_xid)); - tcp_check_recm(xprt); -} - -/* - * TCP read and complete request - */ -static inline void -tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) -{ - struct rpc_rqst *req; - struct xdr_buf *rcvbuf; - size_t len; - ssize_t r; - - /* Find and lock the request corresponding to this xid */ - spin_lock(&xprt->sock_lock); - req = xprt_lookup_rqst(xprt, xprt->tcp_xid); - if (!req) { - xprt->tcp_flags &= ~XPRT_COPY_DATA; - dprintk("RPC: XID %08x request not found!\n", - ntohl(xprt->tcp_xid)); - spin_unlock(&xprt->sock_lock); - return; - } - - rcvbuf = &req->rq_private_buf; - len = desc->count; - if (len > xprt->tcp_reclen - xprt->tcp_offset) { - skb_reader_t my_desc; - - len = xprt->tcp_reclen - xprt->tcp_offset; - memcpy(&my_desc, desc, sizeof(my_desc)); - my_desc.count = len; - r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, - &my_desc, tcp_copy_data); - desc->count -= r; - desc->offset += r; - } else - r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, - desc, tcp_copy_data); - - if (r > 0) { - xprt->tcp_copied += r; - xprt->tcp_offset += r; - } - if (r != len) { - /* Error when copying to the receive buffer, - * usually because we weren't able to allocate - * additional buffer pages. All we can do now - * is turn off XPRT_COPY_DATA, so the request - * will not receive any additional updates, - * and time out. - * Any remaining data from this record will - * be discarded. - */ - xprt->tcp_flags &= ~XPRT_COPY_DATA; - dprintk("RPC: XID %08x truncated request\n", - ntohl(xprt->tcp_xid)); - dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", - xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); - goto out; - } - - dprintk("RPC: XID %08x read %Zd bytes\n", - ntohl(xprt->tcp_xid), r); - dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", - xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); - - if (xprt->tcp_copied == req->rq_private_buf.buflen) - xprt->tcp_flags &= ~XPRT_COPY_DATA; - else if (xprt->tcp_offset == xprt->tcp_reclen) { - if (xprt->tcp_flags & XPRT_LAST_FRAG) - xprt->tcp_flags &= ~XPRT_COPY_DATA; - } - -out: - if (!(xprt->tcp_flags & XPRT_COPY_DATA)) { - dprintk("RPC: %4d received reply complete\n", - req->rq_task->tk_pid); - xprt_complete_rqst(xprt, req, xprt->tcp_copied); - } - spin_unlock(&xprt->sock_lock); - tcp_check_recm(xprt); -} - -/* - * TCP discard extra bytes from a short read - */ -static inline void -tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc) -{ - size_t len; - - len = xprt->tcp_reclen - xprt->tcp_offset; - if (len > desc->count) - len = desc->count; - desc->count -= len; - desc->offset += len; - xprt->tcp_offset += len; - dprintk("RPC: discarded %Zu bytes\n", len); - tcp_check_recm(xprt); -} - -/* - * TCP record receive routine - * We first have to grab the record marker, then the XID, then the data. - */ -static int -tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, - unsigned int offset, size_t len) -{ - struct rpc_xprt *xprt = rd_desc->arg.data; - skb_reader_t desc = { - .skb = skb, - .offset = offset, - .count = len, - .csum = 0 - }; - - dprintk("RPC: tcp_data_recv\n"); - do { - /* Read in a new fragment marker if necessary */ - /* Can we ever really expect to get completely empty fragments? */ - if (xprt->tcp_flags & XPRT_COPY_RECM) { - tcp_read_fraghdr(xprt, &desc); - continue; - } - /* Read in the xid if necessary */ - if (xprt->tcp_flags & XPRT_COPY_XID) { - tcp_read_xid(xprt, &desc); - continue; - } - /* Read in the request data */ - if (xprt->tcp_flags & XPRT_COPY_DATA) { - tcp_read_request(xprt, &desc); - continue; - } - /* Skip over any trailing bytes on short reads */ - tcp_read_discard(xprt, &desc); - } while (desc.count); - dprintk("RPC: tcp_data_recv done\n"); - return len - desc.count; -} - -static void tcp_data_ready(struct sock *sk, int bytes) -{ - struct rpc_xprt *xprt; - read_descriptor_t rd_desc; - - read_lock(&sk->sk_callback_lock); - dprintk("RPC: tcp_data_ready...\n"); - if (!(xprt = xprt_from_sock(sk))) { - printk("RPC: tcp_data_ready socket info not found!\n"); - goto out; - } - if (xprt->shutdown) - goto out; - - /* We use rd_desc to pass struct xprt to tcp_data_recv */ - rd_desc.arg.data = xprt; - rd_desc.count = 65536; - tcp_read_sock(sk, &rd_desc, tcp_data_recv); -out: - read_unlock(&sk->sk_callback_lock); -} - -static void -tcp_state_change(struct sock *sk) -{ - struct rpc_xprt *xprt; - - read_lock(&sk->sk_callback_lock); - if (!(xprt = xprt_from_sock(sk))) - goto out; - dprintk("RPC: tcp_state_change client %p...\n", xprt); - dprintk("RPC: state %x conn %d dead %d zapped %d\n", - sk->sk_state, xprt_connected(xprt), - sock_flag(sk, SOCK_DEAD), - sock_flag(sk, SOCK_ZAPPED)); - - switch (sk->sk_state) { - case TCP_ESTABLISHED: - spin_lock_bh(&xprt->sock_lock); - if (!xprt_test_and_set_connected(xprt)) { - /* Reset TCP record info */ - xprt->tcp_offset = 0; - xprt->tcp_reclen = 0; - xprt->tcp_copied = 0; - xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; - rpc_wake_up(&xprt->pending); - } - spin_unlock_bh(&xprt->sock_lock); - break; - case TCP_SYN_SENT: - case TCP_SYN_RECV: - break; - default: - xprt_disconnect(xprt); - break; - } - out: - read_unlock(&sk->sk_callback_lock); -} - -/* - * Called when more output buffer space is available for this socket. - * We try not to wake our writers until they can make "significant" - * progress, otherwise we'll waste resources thrashing sock_sendmsg - * with a bunch of small requests. - */ -static void -xprt_write_space(struct sock *sk) -{ - struct rpc_xprt *xprt; - struct socket *sock; - - read_lock(&sk->sk_callback_lock); - if (!(xprt = xprt_from_sock(sk)) || !(sock = sk->sk_socket)) - goto out; - if (xprt->shutdown) - goto out; - - /* Wait until we have enough socket memory */ - if (xprt->stream) { - /* from net/core/stream.c:sk_stream_write_space */ - if (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)) - goto out; - } else { - /* from net/core/sock.c:sock_def_write_space */ - if (!sock_writeable(sk)) - goto out; - } - - if (!test_and_clear_bit(SOCK_NOSPACE, &sock->flags)) - goto out; - - spin_lock_bh(&xprt->sock_lock); - if (xprt->snd_task) - rpc_wake_up_task(xprt->snd_task); - spin_unlock_bh(&xprt->sock_lock); -out: - read_unlock(&sk->sk_callback_lock); -} - -/* - * RPC receive timeout handler. - */ -static void -xprt_timer(struct rpc_task *task) -{ - struct rpc_rqst *req = task->tk_rqstp; + struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; - spin_lock(&xprt->sock_lock); - if (req->rq_received) - goto out; + dprintk("RPC: %4d xprt_timer\n", task->tk_pid); - xprt_adjust_cwnd(req->rq_xprt, -ETIMEDOUT); - __xprt_put_cong(xprt, req); - - dprintk("RPC: %4d xprt_timer (%s request)\n", - task->tk_pid, req ? "pending" : "backlogged"); - - task->tk_status = -ETIMEDOUT; -out: + spin_lock(&xprt->transport_lock); + if (!req->rq_received) { + if (xprt->ops->timer) + xprt->ops->timer(task); + task->tk_status = -ETIMEDOUT; + } task->tk_timeout = 0; rpc_wake_up_task(task); - spin_unlock(&xprt->sock_lock); + spin_unlock(&xprt->transport_lock); } -/* - * Place the actual RPC call. - * We have to copy the iovec because sendmsg fiddles with its contents. +/** + * xprt_prepare_transmit - reserve the transport before sending a request + * @task: RPC task about to send a request + * */ -int -xprt_prepare_transmit(struct rpc_task *task) +int xprt_prepare_transmit(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; @@ -1191,12 +690,12 @@ xprt_prepare_transmit(struct rpc_task *task) if (xprt->shutdown) return -EIO; - spin_lock_bh(&xprt->sock_lock); + spin_lock_bh(&xprt->transport_lock); if (req->rq_received && !req->rq_bytes_sent) { err = req->rq_received; goto out_unlock; } - if (!__xprt_lock_write(xprt, task)) { + if (!xprt->ops->reserve_xprt(task)) { err = -EAGAIN; goto out_unlock; } @@ -1206,39 +705,42 @@ xprt_prepare_transmit(struct rpc_task *task) goto out_unlock; } out_unlock: - spin_unlock_bh(&xprt->sock_lock); + spin_unlock_bh(&xprt->transport_lock); return err; } void -xprt_transmit(struct rpc_task *task) +xprt_abort_transmit(struct rpc_task *task) +{ + struct rpc_xprt *xprt = task->tk_xprt; + + xprt_release_write(xprt, task); +} + +/** + * xprt_transmit - send an RPC request on a transport + * @task: controlling RPC task + * + * We have to copy the iovec because sendmsg fiddles with its contents. + */ +void xprt_transmit(struct rpc_task *task) { - struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; - int status, retry = 0; - + int status; dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); - /* set up everything as needed. */ - /* Write the record marker */ - if (xprt->stream) { - u32 *marker = req->rq_svec[0].iov_base; - - *marker = htonl(0x80000000|(req->rq_slen-sizeof(*marker))); - } - smp_rmb(); if (!req->rq_received) { if (list_empty(&req->rq_list)) { - spin_lock_bh(&xprt->sock_lock); + spin_lock_bh(&xprt->transport_lock); /* Update the softirq receive buffer */ memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(req->rq_private_buf)); /* Add request to the receive list */ list_add_tail(&req->rq_list, &xprt->recv); - spin_unlock_bh(&xprt->sock_lock); + spin_unlock_bh(&xprt->transport_lock); xprt_reset_majortimeo(req); /* Turn off autodisconnect */ del_singleshot_timer_sync(&xprt->timer); @@ -1246,40 +748,19 @@ xprt_transmit(struct rpc_task *task) } else if (!req->rq_bytes_sent) return; - /* Continue transmitting the packet/record. We must be careful - * to cope with writespace callbacks arriving _after_ we have - * called xprt_sendmsg(). - */ - while (1) { - req->rq_xtime = jiffies; - status = xprt_sendmsg(xprt, req); - - if (status < 0) - break; - - if (xprt->stream) { - req->rq_bytes_sent += status; - - /* If we've sent the entire packet, immediately - * reset the count of bytes sent. */ - if (req->rq_bytes_sent >= req->rq_slen) { - req->rq_bytes_sent = 0; - goto out_receive; - } - } else { - if (status >= req->rq_slen) - goto out_receive; - status = -EAGAIN; - break; - } - - dprintk("RPC: %4d xmit incomplete (%d left of %d)\n", - task->tk_pid, req->rq_slen - req->rq_bytes_sent, - req->rq_slen); - - status = -EAGAIN; - if (retry++ > 50) - break; + status = xprt->ops->send_request(task); + if (status == 0) { + dprintk("RPC: %4d xmit complete\n", task->tk_pid); + spin_lock_bh(&xprt->transport_lock); + xprt->ops->set_retrans_timeout(task); + /* Don't race with disconnect */ + if (!xprt_connected(xprt)) + task->tk_status = -ENOTCONN; + else if (!req->rq_received) + rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); + xprt->ops->release_xprt(xprt, task); + spin_unlock_bh(&xprt->transport_lock); + return; } /* Note: at this point, task->tk_sleeping has not yet been set, @@ -1289,60 +770,19 @@ xprt_transmit(struct rpc_task *task) task->tk_status = status; switch (status) { - case -EAGAIN: - if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { - /* Protect against races with xprt_write_space */ - spin_lock_bh(&xprt->sock_lock); - /* Don't race with disconnect */ - if (!xprt_connected(xprt)) - task->tk_status = -ENOTCONN; - else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) { - task->tk_timeout = req->rq_timeout; - rpc_sleep_on(&xprt->pending, task, NULL, NULL); - } - spin_unlock_bh(&xprt->sock_lock); - return; - } - /* Keep holding the socket if it is blocked */ - rpc_delay(task, HZ>>4); - return; case -ECONNREFUSED: - task->tk_timeout = RPC_REESTABLISH_TIMEOUT; rpc_sleep_on(&xprt->sending, task, NULL, NULL); + case -EAGAIN: case -ENOTCONN: return; default: - if (xprt->stream) - xprt_disconnect(xprt); + break; } xprt_release_write(xprt, task); return; - out_receive: - dprintk("RPC: %4d xmit complete\n", task->tk_pid); - /* Set the task's receive timeout value */ - spin_lock_bh(&xprt->sock_lock); - if (!xprt->nocong) { - int timer = task->tk_msg.rpc_proc->p_timer; - task->tk_timeout = rpc_calc_rto(clnt->cl_rtt, timer); - task->tk_timeout <<= rpc_ntimeo(clnt->cl_rtt, timer) + req->rq_retries; - if (task->tk_timeout > xprt->timeout.to_maxval || task->tk_timeout == 0) - task->tk_timeout = xprt->timeout.to_maxval; - } else - task->tk_timeout = req->rq_timeout; - /* Don't race with disconnect */ - if (!xprt_connected(xprt)) - task->tk_status = -ENOTCONN; - else if (!req->rq_received) - rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); - __xprt_release_write(xprt, task); - spin_unlock_bh(&xprt->sock_lock); } -/* - * Reserve an RPC call slot. - */ -static inline void -do_xprt_reserve(struct rpc_task *task) +static inline void do_xprt_reserve(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; @@ -1362,22 +802,25 @@ do_xprt_reserve(struct rpc_task *task) rpc_sleep_on(&xprt->backlog, task, NULL, NULL); } -void -xprt_reserve(struct rpc_task *task) +/** + * xprt_reserve - allocate an RPC request slot + * @task: RPC task requesting a slot allocation + * + * If no more slots are available, place the task on the transport's + * backlog queue. + */ +void xprt_reserve(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; task->tk_status = -EIO; if (!xprt->shutdown) { - spin_lock(&xprt->xprt_lock); + spin_lock(&xprt->reserve_lock); do_xprt_reserve(task); - spin_unlock(&xprt->xprt_lock); + spin_unlock(&xprt->reserve_lock); } } -/* - * Allocate a 'unique' XID - */ static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt) { return xprt->xid++; @@ -1388,11 +831,7 @@ static inline void xprt_init_xid(struct rpc_xprt *xprt) get_random_bytes(&xprt->xid, sizeof(xprt->xid)); } -/* - * Initialize RPC request - */ -static void -xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) +static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) { struct rpc_rqst *req = task->tk_rqstp; @@ -1400,128 +839,104 @@ xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt) req->rq_task = task; req->rq_xprt = xprt; req->rq_xid = xprt_alloc_xid(xprt); + req->rq_release_snd_buf = NULL; dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid, req, ntohl(req->rq_xid)); } -/* - * Release an RPC call slot +/** + * xprt_release - release an RPC request slot + * @task: task which is finished with the slot + * */ -void -xprt_release(struct rpc_task *task) +void xprt_release(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_xprt; struct rpc_rqst *req; if (!(req = task->tk_rqstp)) return; - spin_lock_bh(&xprt->sock_lock); - __xprt_release_write(xprt, task); - __xprt_put_cong(xprt, req); + spin_lock_bh(&xprt->transport_lock); + xprt->ops->release_xprt(xprt, task); + if (xprt->ops->release_request) + xprt->ops->release_request(task); if (!list_empty(&req->rq_list)) list_del(&req->rq_list); xprt->last_used = jiffies; if (list_empty(&xprt->recv) && !xprt->shutdown) - mod_timer(&xprt->timer, xprt->last_used + XPRT_IDLE_TIMEOUT); - spin_unlock_bh(&xprt->sock_lock); + mod_timer(&xprt->timer, + xprt->last_used + xprt->idle_timeout); + spin_unlock_bh(&xprt->transport_lock); task->tk_rqstp = NULL; + if (req->rq_release_snd_buf) + req->rq_release_snd_buf(req); memset(req, 0, sizeof(*req)); /* mark unused */ dprintk("RPC: %4d release request %p\n", task->tk_pid, req); - spin_lock(&xprt->xprt_lock); + spin_lock(&xprt->reserve_lock); list_add(&req->rq_list, &xprt->free); - xprt_clear_backlog(xprt); - spin_unlock(&xprt->xprt_lock); + rpc_wake_up_next(&xprt->backlog); + spin_unlock(&xprt->reserve_lock); } -/* - * Set default timeout parameters +/** + * xprt_set_timeout - set constant RPC timeout + * @to: RPC timeout parameters to set up + * @retr: number of retries + * @incr: amount of increase after each retry + * */ -static void -xprt_default_timeout(struct rpc_timeout *to, int proto) -{ - if (proto == IPPROTO_UDP) - xprt_set_timeout(to, 5, 5 * HZ); - else - xprt_set_timeout(to, 5, 60 * HZ); -} - -/* - * Set constant timeout - */ -void -xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr) +void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr) { to->to_initval = to->to_increment = incr; - to->to_maxval = incr * retr; + to->to_maxval = to->to_initval + (incr * retr); to->to_retries = retr; to->to_exponential = 0; } -unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE; -unsigned int xprt_tcp_slot_table_entries = RPC_DEF_SLOT_TABLE; - -/* - * Initialize an RPC client - */ -static struct rpc_xprt * -xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to) +static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to) { + int result; struct rpc_xprt *xprt; - unsigned int entries; - size_t slot_table_size; struct rpc_rqst *req; - dprintk("RPC: setting up %s transport...\n", - proto == IPPROTO_UDP? "UDP" : "TCP"); - - entries = (proto == IPPROTO_TCP)? - xprt_tcp_slot_table_entries : xprt_udp_slot_table_entries; - if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) return ERR_PTR(-ENOMEM); memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */ - xprt->max_reqs = entries; - slot_table_size = entries * sizeof(xprt->slot[0]); - xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); - if (xprt->slot == NULL) { - kfree(xprt); - return ERR_PTR(-ENOMEM); - } - memset(xprt->slot, 0, slot_table_size); xprt->addr = *ap; - xprt->prot = proto; - xprt->stream = (proto == IPPROTO_TCP)? 1 : 0; - if (xprt->stream) { - xprt->cwnd = RPC_MAXCWND(xprt); - xprt->nocong = 1; - xprt->max_payload = (1U << 31) - 1; - } else { - xprt->cwnd = RPC_INITCWND; - xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); + + switch (proto) { + case IPPROTO_UDP: + result = xs_setup_udp(xprt, to); + break; + case IPPROTO_TCP: + result = xs_setup_tcp(xprt, to); + break; + default: + printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n", + proto); + result = -EIO; + break; } - spin_lock_init(&xprt->sock_lock); - spin_lock_init(&xprt->xprt_lock); - init_waitqueue_head(&xprt->cong_wait); + if (result) { + kfree(xprt); + return ERR_PTR(result); + } + + spin_lock_init(&xprt->transport_lock); + spin_lock_init(&xprt->reserve_lock); INIT_LIST_HEAD(&xprt->free); INIT_LIST_HEAD(&xprt->recv); - INIT_WORK(&xprt->sock_connect, xprt_socket_connect, xprt); - INIT_WORK(&xprt->task_cleanup, xprt_socket_autoclose, xprt); + INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt); init_timer(&xprt->timer); xprt->timer.function = xprt_init_autodisconnect; xprt->timer.data = (unsigned long) xprt; xprt->last_used = jiffies; - xprt->port = XPRT_MAX_RESVPORT; - - /* Set timeout parameters */ - if (to) { - xprt->timeout = *to; - } else - xprt_default_timeout(&xprt->timeout, xprt->prot); + xprt->cwnd = RPC_INITCWND; rpc_init_wait_queue(&xprt->pending, "xprt_pending"); rpc_init_wait_queue(&xprt->sending, "xprt_sending"); @@ -1529,139 +944,25 @@ xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to) rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); /* initialize free list */ - for (req = &xprt->slot[entries-1]; req >= &xprt->slot[0]; req--) + for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--) list_add(&req->rq_list, &xprt->free); xprt_init_xid(xprt); - /* Check whether we want to use a reserved port */ - xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; - dprintk("RPC: created transport %p with %u slots\n", xprt, xprt->max_reqs); return xprt; } -/* - * Bind to a reserved port +/** + * xprt_create_proto - create an RPC client transport + * @proto: requested transport protocol + * @sap: remote peer's address + * @to: timeout parameters for new transport + * */ -static inline int xprt_bindresvport(struct rpc_xprt *xprt, struct socket *sock) -{ - struct sockaddr_in myaddr = { - .sin_family = AF_INET, - }; - int err, port; - - /* Were we already bound to a given port? Try to reuse it */ - port = xprt->port; - do { - myaddr.sin_port = htons(port); - err = sock->ops->bind(sock, (struct sockaddr *) &myaddr, - sizeof(myaddr)); - if (err == 0) { - xprt->port = port; - return 0; - } - if (--port == 0) - port = XPRT_MAX_RESVPORT; - } while (err == -EADDRINUSE && port != xprt->port); - - printk("RPC: Can't bind to reserved port (%d).\n", -err); - return err; -} - -static void -xprt_bind_socket(struct rpc_xprt *xprt, struct socket *sock) -{ - struct sock *sk = sock->sk; - - if (xprt->inet) - return; - - write_lock_bh(&sk->sk_callback_lock); - sk->sk_user_data = xprt; - xprt->old_data_ready = sk->sk_data_ready; - xprt->old_state_change = sk->sk_state_change; - xprt->old_write_space = sk->sk_write_space; - if (xprt->prot == IPPROTO_UDP) { - sk->sk_data_ready = udp_data_ready; - sk->sk_no_check = UDP_CSUM_NORCV; - xprt_set_connected(xprt); - } else { - tcp_sk(sk)->nonagle = 1; /* disable Nagle's algorithm */ - sk->sk_data_ready = tcp_data_ready; - sk->sk_state_change = tcp_state_change; - xprt_clear_connected(xprt); - } - sk->sk_write_space = xprt_write_space; - - /* Reset to new socket */ - xprt->sock = sock; - xprt->inet = sk; - write_unlock_bh(&sk->sk_callback_lock); - - return; -} - -/* - * Set socket buffer length - */ -void -xprt_sock_setbufsize(struct rpc_xprt *xprt) -{ - struct sock *sk = xprt->inet; - - if (xprt->stream) - return; - if (xprt->rcvsize) { - sk->sk_userlocks |= SOCK_RCVBUF_LOCK; - sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2; - } - if (xprt->sndsize) { - sk->sk_userlocks |= SOCK_SNDBUF_LOCK; - sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2; - sk->sk_write_space(sk); - } -} - -/* - * Datastream sockets are created here, but xprt_connect will create - * and connect stream sockets. - */ -static struct socket * xprt_create_socket(struct rpc_xprt *xprt, int proto, int resvport) -{ - struct socket *sock; - int type, err; - - dprintk("RPC: xprt_create_socket(%s %d)\n", - (proto == IPPROTO_UDP)? "udp" : "tcp", proto); - - type = (proto == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; - - if ((err = sock_create_kern(PF_INET, type, proto, &sock)) < 0) { - printk("RPC: can't create socket (%d).\n", -err); - return NULL; - } - - /* If the caller has the capability, bind to a reserved port */ - if (resvport && xprt_bindresvport(xprt, sock) < 0) { - printk("RPC: can't bind to reserved port.\n"); - goto failed; - } - - return sock; - -failed: - sock_release(sock); - return NULL; -} - -/* - * Create an RPC client transport given the protocol and peer address. - */ -struct rpc_xprt * -xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to) +struct rpc_xprt *xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to) { struct rpc_xprt *xprt; @@ -1673,46 +974,26 @@ xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to) return xprt; } -/* - * Prepare for transport shutdown. - */ -static void -xprt_shutdown(struct rpc_xprt *xprt) +static void xprt_shutdown(struct rpc_xprt *xprt) { xprt->shutdown = 1; rpc_wake_up(&xprt->sending); rpc_wake_up(&xprt->resend); - rpc_wake_up(&xprt->pending); + xprt_wake_pending_tasks(xprt, -EIO); rpc_wake_up(&xprt->backlog); - wake_up(&xprt->cong_wait); del_timer_sync(&xprt->timer); - - /* synchronously wait for connect worker to finish */ - cancel_delayed_work(&xprt->sock_connect); - flush_scheduled_work(); } -/* - * Clear the xprt backlog queue +/** + * xprt_destroy - destroy an RPC transport, killing off all requests. + * @xprt: transport to destroy + * */ -static int -xprt_clear_backlog(struct rpc_xprt *xprt) { - rpc_wake_up_next(&xprt->backlog); - wake_up(&xprt->cong_wait); - return 1; -} - -/* - * Destroy an RPC transport, killing off all requests. - */ -int -xprt_destroy(struct rpc_xprt *xprt) +int xprt_destroy(struct rpc_xprt *xprt) { dprintk("RPC: destroying transport %p\n", xprt); xprt_shutdown(xprt); - xprt_disconnect(xprt); - xprt_close(xprt); - kfree(xprt->slot); + xprt->ops->destroy(xprt); kfree(xprt); return 0; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c new file mode 100644 index 000000000000..2e1529217e65 --- /dev/null +++ b/net/sunrpc/xprtsock.c @@ -0,0 +1,1252 @@ +/* + * linux/net/sunrpc/xprtsock.c + * + * Client-side transport implementation for sockets. + * + * TCP callback races fixes (C) 1998 Red Hat Software + * TCP send fixes (C) 1998 Red Hat Software + * TCP NFS related read + write fixes + * (C) 1999 Dave Airlie, University of Limerick, Ireland + * + * Rewrite of larges part of the code in order to stabilize TCP stuff. + * Fix behaviour when socket buffer is full. + * (C) 1999 Trond Myklebust + * + * IP socket transport implementation, (C) 2005 Chuck Lever + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * How many times to try sending a request on a socket before waiting + * for the socket buffer to clear. + */ +#define XS_SENDMSG_RETRY (10U) + +/* + * Time out for an RPC UDP socket connect. UDP socket connects are + * synchronous, but we set a timeout anyway in case of resource + * exhaustion on the local host. + */ +#define XS_UDP_CONN_TO (5U * HZ) + +/* + * Wait duration for an RPC TCP connection to be established. Solaris + * NFS over TCP uses 60 seconds, for example, which is in line with how + * long a server takes to reboot. + */ +#define XS_TCP_CONN_TO (60U * HZ) + +/* + * Wait duration for a reply from the RPC portmapper. + */ +#define XS_BIND_TO (60U * HZ) + +/* + * Delay if a UDP socket connect error occurs. This is most likely some + * kind of resource problem on the local host. + */ +#define XS_UDP_REEST_TO (2U * HZ) + +/* + * The reestablish timeout allows clients to delay for a bit before attempting + * to reconnect to a server that just dropped our connection. + * + * We implement an exponential backoff when trying to reestablish a TCP + * transport connection with the server. Some servers like to drop a TCP + * connection when they are overworked, so we start with a short timeout and + * increase over time if the server is down or not responding. + */ +#define XS_TCP_INIT_REEST_TO (3U * HZ) +#define XS_TCP_MAX_REEST_TO (5U * 60 * HZ) + +/* + * TCP idle timeout; client drops the transport socket if it is idle + * for this long. Note that we also timeout UDP sockets to prevent + * holding port numbers when there is no RPC traffic. + */ +#define XS_IDLE_DISC_TO (5U * 60 * HZ) + +#ifdef RPC_DEBUG +# undef RPC_DEBUG_DATA +# define RPCDBG_FACILITY RPCDBG_TRANS +#endif + +#ifdef RPC_DEBUG_DATA +static void xs_pktdump(char *msg, u32 *packet, unsigned int count) +{ + u8 *buf = (u8 *) packet; + int j; + + dprintk("RPC: %s\n", msg); + for (j = 0; j < count && j < 128; j += 4) { + if (!(j & 31)) { + if (j) + dprintk("\n"); + dprintk("0x%04x ", j); + } + dprintk("%02x%02x%02x%02x ", + buf[j], buf[j+1], buf[j+2], buf[j+3]); + } + dprintk("\n"); +} +#else +static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) +{ + /* NOP */ +} +#endif + +#define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) + +static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len) +{ + struct kvec iov = { + .iov_base = xdr->head[0].iov_base + base, + .iov_len = len - base, + }; + struct msghdr msg = { + .msg_name = addr, + .msg_namelen = addrlen, + .msg_flags = XS_SENDMSG_FLAGS, + }; + + if (xdr->len > len) + msg.msg_flags |= MSG_MORE; + + if (likely(iov.iov_len)) + return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); + return kernel_sendmsg(sock, &msg, NULL, 0, 0); +} + +static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int base, unsigned int len) +{ + struct kvec iov = { + .iov_base = xdr->tail[0].iov_base + base, + .iov_len = len - base, + }; + struct msghdr msg = { + .msg_flags = XS_SENDMSG_FLAGS, + }; + + return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); +} + +/** + * xs_sendpages - write pages directly to a socket + * @sock: socket to send on + * @addr: UDP only -- address of destination + * @addrlen: UDP only -- length of destination address + * @xdr: buffer containing this request + * @base: starting position in the buffer + * + */ +static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base) +{ + struct page **ppage = xdr->pages; + unsigned int len, pglen = xdr->page_len; + int err, ret = 0; + ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); + + if (unlikely(!sock)) + return -ENOTCONN; + + clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); + + len = xdr->head[0].iov_len; + if (base < len || (addr != NULL && base == 0)) { + err = xs_send_head(sock, addr, addrlen, xdr, base, len); + if (ret == 0) + ret = err; + else if (err > 0) + ret += err; + if (err != (len - base)) + goto out; + base = 0; + } else + base -= len; + + if (unlikely(pglen == 0)) + goto copy_tail; + if (unlikely(base >= pglen)) { + base -= pglen; + goto copy_tail; + } + if (base || xdr->page_base) { + pglen -= base; + base += xdr->page_base; + ppage += base >> PAGE_CACHE_SHIFT; + base &= ~PAGE_CACHE_MASK; + } + + sendpage = sock->ops->sendpage ? : sock_no_sendpage; + do { + int flags = XS_SENDMSG_FLAGS; + + len = PAGE_CACHE_SIZE; + if (base) + len -= base; + if (pglen < len) + len = pglen; + + if (pglen != len || xdr->tail[0].iov_len != 0) + flags |= MSG_MORE; + + /* Hmm... We might be dealing with highmem pages */ + if (PageHighMem(*ppage)) + sendpage = sock_no_sendpage; + err = sendpage(sock, *ppage, base, len, flags); + if (ret == 0) + ret = err; + else if (err > 0) + ret += err; + if (err != len) + goto out; + base = 0; + ppage++; + } while ((pglen -= len) != 0); +copy_tail: + len = xdr->tail[0].iov_len; + if (base < len) { + err = xs_send_tail(sock, xdr, base, len); + if (ret == 0) + ret = err; + else if (err > 0) + ret += err; + } +out: + return ret; +} + +/** + * xs_nospace - place task on wait queue if transmit was incomplete + * @task: task to put to sleep + * + */ +static void xs_nospace(struct rpc_task *task) +{ + struct rpc_rqst *req = task->tk_rqstp; + struct rpc_xprt *xprt = req->rq_xprt; + + dprintk("RPC: %4d xmit incomplete (%u left of %u)\n", + task->tk_pid, req->rq_slen - req->rq_bytes_sent, + req->rq_slen); + + if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { + /* Protect against races with write_space */ + spin_lock_bh(&xprt->transport_lock); + + /* Don't race with disconnect */ + if (!xprt_connected(xprt)) + task->tk_status = -ENOTCONN; + else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) + xprt_wait_for_buffer_space(task); + + spin_unlock_bh(&xprt->transport_lock); + } else + /* Keep holding the socket if it is blocked */ + rpc_delay(task, HZ>>4); +} + +/** + * xs_udp_send_request - write an RPC request to a UDP socket + * @task: address of RPC task that manages the state of an RPC request + * + * Return values: + * 0: The request has been sent + * EAGAIN: The socket was blocked, please call again later to + * complete the request + * ENOTCONN: Caller needs to invoke connect logic then call again + * other: Some other error occured, the request was not sent + */ +static int xs_udp_send_request(struct rpc_task *task) +{ + struct rpc_rqst *req = task->tk_rqstp; + struct rpc_xprt *xprt = req->rq_xprt; + struct xdr_buf *xdr = &req->rq_snd_buf; + int status; + + xs_pktdump("packet data:", + req->rq_svec->iov_base, + req->rq_svec->iov_len); + + req->rq_xtime = jiffies; + status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr, + sizeof(xprt->addr), xdr, req->rq_bytes_sent); + + dprintk("RPC: xs_udp_send_request(%u) = %d\n", + xdr->len - req->rq_bytes_sent, status); + + if (likely(status >= (int) req->rq_slen)) + return 0; + + /* Still some bytes left; set up for a retry later. */ + if (status > 0) + status = -EAGAIN; + + switch (status) { + case -ENETUNREACH: + case -EPIPE: + case -ECONNREFUSED: + /* When the server has died, an ICMP port unreachable message + * prompts ECONNREFUSED. */ + break; + case -EAGAIN: + xs_nospace(task); + break; + default: + dprintk("RPC: sendmsg returned unrecognized error %d\n", + -status); + break; + } + + return status; +} + +static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf) +{ + u32 reclen = buf->len - sizeof(rpc_fraghdr); + rpc_fraghdr *base = buf->head[0].iov_base; + *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen); +} + +/** + * xs_tcp_send_request - write an RPC request to a TCP socket + * @task: address of RPC task that manages the state of an RPC request + * + * Return values: + * 0: The request has been sent + * EAGAIN: The socket was blocked, please call again later to + * complete the request + * ENOTCONN: Caller needs to invoke connect logic then call again + * other: Some other error occured, the request was not sent + * + * XXX: In the case of soft timeouts, should we eventually give up + * if sendmsg is not able to make progress? + */ +static int xs_tcp_send_request(struct rpc_task *task) +{ + struct rpc_rqst *req = task->tk_rqstp; + struct rpc_xprt *xprt = req->rq_xprt; + struct xdr_buf *xdr = &req->rq_snd_buf; + int status, retry = 0; + + xs_encode_tcp_record_marker(&req->rq_snd_buf); + + xs_pktdump("packet data:", + req->rq_svec->iov_base, + req->rq_svec->iov_len); + + /* Continue transmitting the packet/record. We must be careful + * to cope with writespace callbacks arriving _after_ we have + * called sendmsg(). */ + while (1) { + req->rq_xtime = jiffies; + status = xs_sendpages(xprt->sock, NULL, 0, xdr, + req->rq_bytes_sent); + + dprintk("RPC: xs_tcp_send_request(%u) = %d\n", + xdr->len - req->rq_bytes_sent, status); + + if (unlikely(status < 0)) + break; + + /* If we've sent the entire packet, immediately + * reset the count of bytes sent. */ + req->rq_bytes_sent += status; + if (likely(req->rq_bytes_sent >= req->rq_slen)) { + req->rq_bytes_sent = 0; + return 0; + } + + status = -EAGAIN; + if (retry++ > XS_SENDMSG_RETRY) + break; + } + + switch (status) { + case -EAGAIN: + xs_nospace(task); + break; + case -ECONNREFUSED: + case -ECONNRESET: + case -ENOTCONN: + case -EPIPE: + status = -ENOTCONN; + break; + default: + dprintk("RPC: sendmsg returned unrecognized error %d\n", + -status); + xprt_disconnect(xprt); + break; + } + + return status; +} + +/** + * xs_close - close a socket + * @xprt: transport + * + * This is used when all requests are complete; ie, no DRC state remains + * on the server we want to save. + */ +static void xs_close(struct rpc_xprt *xprt) +{ + struct socket *sock = xprt->sock; + struct sock *sk = xprt->inet; + + if (!sk) + return; + + dprintk("RPC: xs_close xprt %p\n", xprt); + + write_lock_bh(&sk->sk_callback_lock); + xprt->inet = NULL; + xprt->sock = NULL; + + sk->sk_user_data = NULL; + sk->sk_data_ready = xprt->old_data_ready; + sk->sk_state_change = xprt->old_state_change; + sk->sk_write_space = xprt->old_write_space; + write_unlock_bh(&sk->sk_callback_lock); + + sk->sk_no_check = 0; + + sock_release(sock); +} + +/** + * xs_destroy - prepare to shutdown a transport + * @xprt: doomed transport + * + */ +static void xs_destroy(struct rpc_xprt *xprt) +{ + dprintk("RPC: xs_destroy xprt %p\n", xprt); + + cancel_delayed_work(&xprt->connect_worker); + flush_scheduled_work(); + + xprt_disconnect(xprt); + xs_close(xprt); + kfree(xprt->slot); +} + +static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) +{ + return (struct rpc_xprt *) sk->sk_user_data; +} + +/** + * xs_udp_data_ready - "data ready" callback for UDP sockets + * @sk: socket with data to read + * @len: how much data to read + * + */ +static void xs_udp_data_ready(struct sock *sk, int len) +{ + struct rpc_task *task; + struct rpc_xprt *xprt; + struct rpc_rqst *rovr; + struct sk_buff *skb; + int err, repsize, copied; + u32 _xid, *xp; + + read_lock(&sk->sk_callback_lock); + dprintk("RPC: xs_udp_data_ready...\n"); + if (!(xprt = xprt_from_sock(sk))) + goto out; + + if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) + goto out; + + if (xprt->shutdown) + goto dropit; + + repsize = skb->len - sizeof(struct udphdr); + if (repsize < 4) { + dprintk("RPC: impossible RPC reply size %d!\n", repsize); + goto dropit; + } + + /* Copy the XID from the skb... */ + xp = skb_header_pointer(skb, sizeof(struct udphdr), + sizeof(_xid), &_xid); + if (xp == NULL) + goto dropit; + + /* Look up and lock the request corresponding to the given XID */ + spin_lock(&xprt->transport_lock); + rovr = xprt_lookup_rqst(xprt, *xp); + if (!rovr) + goto out_unlock; + task = rovr->rq_task; + + if ((copied = rovr->rq_private_buf.buflen) > repsize) + copied = repsize; + + /* Suck it into the iovec, verify checksum if not done by hw. */ + if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) + goto out_unlock; + + /* Something worked... */ + dst_confirm(skb->dst); + + xprt_adjust_cwnd(task, copied); + xprt_update_rtt(task); + xprt_complete_rqst(task, copied); + + out_unlock: + spin_unlock(&xprt->transport_lock); + dropit: + skb_free_datagram(sk, skb); + out: + read_unlock(&sk->sk_callback_lock); +} + +static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len) +{ + if (len > desc->count) + len = desc->count; + if (skb_copy_bits(desc->skb, desc->offset, p, len)) { + dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n", + len, desc->count); + return 0; + } + desc->offset += len; + desc->count -= len; + dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n", + len, desc->count); + return len; +} + +static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc) +{ + size_t len, used; + char *p; + + p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset; + len = sizeof(xprt->tcp_recm) - xprt->tcp_offset; + used = xs_tcp_copy_data(desc, p, len); + xprt->tcp_offset += used; + if (used != len) + return; + + xprt->tcp_reclen = ntohl(xprt->tcp_recm); + if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) + xprt->tcp_flags |= XPRT_LAST_FRAG; + else + xprt->tcp_flags &= ~XPRT_LAST_FRAG; + xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; + + xprt->tcp_flags &= ~XPRT_COPY_RECM; + xprt->tcp_offset = 0; + + /* Sanity check of the record length */ + if (unlikely(xprt->tcp_reclen < 4)) { + dprintk("RPC: invalid TCP record fragment length\n"); + xprt_disconnect(xprt); + return; + } + dprintk("RPC: reading TCP record fragment of length %d\n", + xprt->tcp_reclen); +} + +static void xs_tcp_check_recm(struct rpc_xprt *xprt) +{ + dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n", + xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags); + if (xprt->tcp_offset == xprt->tcp_reclen) { + xprt->tcp_flags |= XPRT_COPY_RECM; + xprt->tcp_offset = 0; + if (xprt->tcp_flags & XPRT_LAST_FRAG) { + xprt->tcp_flags &= ~XPRT_COPY_DATA; + xprt->tcp_flags |= XPRT_COPY_XID; + xprt->tcp_copied = 0; + } + } +} + +static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc) +{ + size_t len, used; + char *p; + + len = sizeof(xprt->tcp_xid) - xprt->tcp_offset; + dprintk("RPC: reading XID (%Zu bytes)\n", len); + p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset; + used = xs_tcp_copy_data(desc, p, len); + xprt->tcp_offset += used; + if (used != len) + return; + xprt->tcp_flags &= ~XPRT_COPY_XID; + xprt->tcp_flags |= XPRT_COPY_DATA; + xprt->tcp_copied = 4; + dprintk("RPC: reading reply for XID %08x\n", + ntohl(xprt->tcp_xid)); + xs_tcp_check_recm(xprt); +} + +static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) +{ + struct rpc_rqst *req; + struct xdr_buf *rcvbuf; + size_t len; + ssize_t r; + + /* Find and lock the request corresponding to this xid */ + spin_lock(&xprt->transport_lock); + req = xprt_lookup_rqst(xprt, xprt->tcp_xid); + if (!req) { + xprt->tcp_flags &= ~XPRT_COPY_DATA; + dprintk("RPC: XID %08x request not found!\n", + ntohl(xprt->tcp_xid)); + spin_unlock(&xprt->transport_lock); + return; + } + + rcvbuf = &req->rq_private_buf; + len = desc->count; + if (len > xprt->tcp_reclen - xprt->tcp_offset) { + skb_reader_t my_desc; + + len = xprt->tcp_reclen - xprt->tcp_offset; + memcpy(&my_desc, desc, sizeof(my_desc)); + my_desc.count = len; + r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, + &my_desc, xs_tcp_copy_data); + desc->count -= r; + desc->offset += r; + } else + r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, + desc, xs_tcp_copy_data); + + if (r > 0) { + xprt->tcp_copied += r; + xprt->tcp_offset += r; + } + if (r != len) { + /* Error when copying to the receive buffer, + * usually because we weren't able to allocate + * additional buffer pages. All we can do now + * is turn off XPRT_COPY_DATA, so the request + * will not receive any additional updates, + * and time out. + * Any remaining data from this record will + * be discarded. + */ + xprt->tcp_flags &= ~XPRT_COPY_DATA; + dprintk("RPC: XID %08x truncated request\n", + ntohl(xprt->tcp_xid)); + dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", + xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); + goto out; + } + + dprintk("RPC: XID %08x read %Zd bytes\n", + ntohl(xprt->tcp_xid), r); + dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", + xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); + + if (xprt->tcp_copied == req->rq_private_buf.buflen) + xprt->tcp_flags &= ~XPRT_COPY_DATA; + else if (xprt->tcp_offset == xprt->tcp_reclen) { + if (xprt->tcp_flags & XPRT_LAST_FRAG) + xprt->tcp_flags &= ~XPRT_COPY_DATA; + } + +out: + if (!(xprt->tcp_flags & XPRT_COPY_DATA)) + xprt_complete_rqst(req->rq_task, xprt->tcp_copied); + spin_unlock(&xprt->transport_lock); + xs_tcp_check_recm(xprt); +} + +static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc) +{ + size_t len; + + len = xprt->tcp_reclen - xprt->tcp_offset; + if (len > desc->count) + len = desc->count; + desc->count -= len; + desc->offset += len; + xprt->tcp_offset += len; + dprintk("RPC: discarded %Zu bytes\n", len); + xs_tcp_check_recm(xprt); +} + +static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) +{ + struct rpc_xprt *xprt = rd_desc->arg.data; + skb_reader_t desc = { + .skb = skb, + .offset = offset, + .count = len, + .csum = 0 + }; + + dprintk("RPC: xs_tcp_data_recv started\n"); + do { + /* Read in a new fragment marker if necessary */ + /* Can we ever really expect to get completely empty fragments? */ + if (xprt->tcp_flags & XPRT_COPY_RECM) { + xs_tcp_read_fraghdr(xprt, &desc); + continue; + } + /* Read in the xid if necessary */ + if (xprt->tcp_flags & XPRT_COPY_XID) { + xs_tcp_read_xid(xprt, &desc); + continue; + } + /* Read in the request data */ + if (xprt->tcp_flags & XPRT_COPY_DATA) { + xs_tcp_read_request(xprt, &desc); + continue; + } + /* Skip over any trailing bytes on short reads */ + xs_tcp_read_discard(xprt, &desc); + } while (desc.count); + dprintk("RPC: xs_tcp_data_recv done\n"); + return len - desc.count; +} + +/** + * xs_tcp_data_ready - "data ready" callback for TCP sockets + * @sk: socket with data to read + * @bytes: how much data to read + * + */ +static void xs_tcp_data_ready(struct sock *sk, int bytes) +{ + struct rpc_xprt *xprt; + read_descriptor_t rd_desc; + + read_lock(&sk->sk_callback_lock); + dprintk("RPC: xs_tcp_data_ready...\n"); + if (!(xprt = xprt_from_sock(sk))) + goto out; + if (xprt->shutdown) + goto out; + + /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */ + rd_desc.arg.data = xprt; + rd_desc.count = 65536; + tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); +out: + read_unlock(&sk->sk_callback_lock); +} + +/** + * xs_tcp_state_change - callback to handle TCP socket state changes + * @sk: socket whose state has changed + * + */ +static void xs_tcp_state_change(struct sock *sk) +{ + struct rpc_xprt *xprt; + + read_lock(&sk->sk_callback_lock); + if (!(xprt = xprt_from_sock(sk))) + goto out; + dprintk("RPC: xs_tcp_state_change client %p...\n", xprt); + dprintk("RPC: state %x conn %d dead %d zapped %d\n", + sk->sk_state, xprt_connected(xprt), + sock_flag(sk, SOCK_DEAD), + sock_flag(sk, SOCK_ZAPPED)); + + switch (sk->sk_state) { + case TCP_ESTABLISHED: + spin_lock_bh(&xprt->transport_lock); + if (!xprt_test_and_set_connected(xprt)) { + /* Reset TCP record info */ + xprt->tcp_offset = 0; + xprt->tcp_reclen = 0; + xprt->tcp_copied = 0; + xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; + xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; + xprt_wake_pending_tasks(xprt, 0); + } + spin_unlock_bh(&xprt->transport_lock); + break; + case TCP_SYN_SENT: + case TCP_SYN_RECV: + break; + default: + xprt_disconnect(xprt); + break; + } + out: + read_unlock(&sk->sk_callback_lock); +} + +/** + * xs_udp_write_space - callback invoked when socket buffer space + * becomes available + * @sk: socket whose state has changed + * + * Called when more output buffer space is available for this socket. + * We try not to wake our writers until they can make "significant" + * progress, otherwise we'll waste resources thrashing kernel_sendmsg + * with a bunch of small requests. + */ +static void xs_udp_write_space(struct sock *sk) +{ + read_lock(&sk->sk_callback_lock); + + /* from net/core/sock.c:sock_def_write_space */ + if (sock_writeable(sk)) { + struct socket *sock; + struct rpc_xprt *xprt; + + if (unlikely(!(sock = sk->sk_socket))) + goto out; + if (unlikely(!(xprt = xprt_from_sock(sk)))) + goto out; + if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) + goto out; + + xprt_write_space(xprt); + } + + out: + read_unlock(&sk->sk_callback_lock); +} + +/** + * xs_tcp_write_space - callback invoked when socket buffer space + * becomes available + * @sk: socket whose state has changed + * + * Called when more output buffer space is available for this socket. + * We try not to wake our writers until they can make "significant" + * progress, otherwise we'll waste resources thrashing kernel_sendmsg + * with a bunch of small requests. + */ +static void xs_tcp_write_space(struct sock *sk) +{ + read_lock(&sk->sk_callback_lock); + + /* from net/core/stream.c:sk_stream_write_space */ + if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { + struct socket *sock; + struct rpc_xprt *xprt; + + if (unlikely(!(sock = sk->sk_socket))) + goto out; + if (unlikely(!(xprt = xprt_from_sock(sk)))) + goto out; + if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) + goto out; + + xprt_write_space(xprt); + } + + out: + read_unlock(&sk->sk_callback_lock); +} + +static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) +{ + struct sock *sk = xprt->inet; + + if (xprt->rcvsize) { + sk->sk_userlocks |= SOCK_RCVBUF_LOCK; + sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2; + } + if (xprt->sndsize) { + sk->sk_userlocks |= SOCK_SNDBUF_LOCK; + sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2; + sk->sk_write_space(sk); + } +} + +/** + * xs_udp_set_buffer_size - set send and receive limits + * @xprt: generic transport + * @sndsize: requested size of send buffer, in bytes + * @rcvsize: requested size of receive buffer, in bytes + * + * Set socket send and receive buffer size limits. + */ +static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize) +{ + xprt->sndsize = 0; + if (sndsize) + xprt->sndsize = sndsize + 1024; + xprt->rcvsize = 0; + if (rcvsize) + xprt->rcvsize = rcvsize + 1024; + + xs_udp_do_set_buffer_size(xprt); +} + +/** + * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport + * @task: task that timed out + * + * Adjust the congestion window after a retransmit timeout has occurred. + */ +static void xs_udp_timer(struct rpc_task *task) +{ + xprt_adjust_cwnd(task, -ETIMEDOUT); +} + +static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) +{ + struct sockaddr_in myaddr = { + .sin_family = AF_INET, + }; + int err; + unsigned short port = xprt->port; + + do { + myaddr.sin_port = htons(port); + err = sock->ops->bind(sock, (struct sockaddr *) &myaddr, + sizeof(myaddr)); + if (err == 0) { + xprt->port = port; + dprintk("RPC: xs_bindresvport bound to port %u\n", + port); + return 0; + } + if (port <= xprt_min_resvport) + port = xprt_max_resvport; + else + port--; + } while (err == -EADDRINUSE && port != xprt->port); + + dprintk("RPC: can't bind to reserved port (%d).\n", -err); + return err; +} + +/** + * xs_udp_connect_worker - set up a UDP socket + * @args: RPC transport to connect + * + * Invoked by a work queue tasklet. + */ +static void xs_udp_connect_worker(void *args) +{ + struct rpc_xprt *xprt = (struct rpc_xprt *) args; + struct socket *sock = xprt->sock; + int err, status = -EIO; + + if (xprt->shutdown || xprt->addr.sin_port == 0) + goto out; + + dprintk("RPC: xs_udp_connect_worker for xprt %p\n", xprt); + + /* Start by resetting any existing state */ + xs_close(xprt); + + if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) { + dprintk("RPC: can't create UDP transport socket (%d).\n", -err); + goto out; + } + + if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { + sock_release(sock); + goto out; + } + + if (!xprt->inet) { + struct sock *sk = sock->sk; + + write_lock_bh(&sk->sk_callback_lock); + + sk->sk_user_data = xprt; + xprt->old_data_ready = sk->sk_data_ready; + xprt->old_state_change = sk->sk_state_change; + xprt->old_write_space = sk->sk_write_space; + sk->sk_data_ready = xs_udp_data_ready; + sk->sk_write_space = xs_udp_write_space; + sk->sk_no_check = UDP_CSUM_NORCV; + + xprt_set_connected(xprt); + + /* Reset to new socket */ + xprt->sock = sock; + xprt->inet = sk; + + write_unlock_bh(&sk->sk_callback_lock); + } + xs_udp_do_set_buffer_size(xprt); + status = 0; +out: + xprt_wake_pending_tasks(xprt, status); + xprt_clear_connecting(xprt); +} + +/* + * We need to preserve the port number so the reply cache on the server can + * find our cached RPC replies when we get around to reconnecting. + */ +static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) +{ + int result; + struct socket *sock = xprt->sock; + struct sockaddr any; + + dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt); + + /* + * Disconnect the transport socket by doing a connect operation + * with AF_UNSPEC. This should return immediately... + */ + memset(&any, 0, sizeof(any)); + any.sa_family = AF_UNSPEC; + result = sock->ops->connect(sock, &any, sizeof(any), 0); + if (result) + dprintk("RPC: AF_UNSPEC connect return code %d\n", + result); +} + +/** + * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint + * @args: RPC transport to connect + * + * Invoked by a work queue tasklet. + */ +static void xs_tcp_connect_worker(void *args) +{ + struct rpc_xprt *xprt = (struct rpc_xprt *)args; + struct socket *sock = xprt->sock; + int err, status = -EIO; + + if (xprt->shutdown || xprt->addr.sin_port == 0) + goto out; + + dprintk("RPC: xs_tcp_connect_worker for xprt %p\n", xprt); + + if (!xprt->sock) { + /* start from scratch */ + if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { + dprintk("RPC: can't create TCP transport socket (%d).\n", -err); + goto out; + } + + if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { + sock_release(sock); + goto out; + } + } else + /* "close" the socket, preserving the local port */ + xs_tcp_reuse_connection(xprt); + + if (!xprt->inet) { + struct sock *sk = sock->sk; + + write_lock_bh(&sk->sk_callback_lock); + + sk->sk_user_data = xprt; + xprt->old_data_ready = sk->sk_data_ready; + xprt->old_state_change = sk->sk_state_change; + xprt->old_write_space = sk->sk_write_space; + sk->sk_data_ready = xs_tcp_data_ready; + sk->sk_state_change = xs_tcp_state_change; + sk->sk_write_space = xs_tcp_write_space; + + /* socket options */ + sk->sk_userlocks |= SOCK_BINDPORT_LOCK; + sock_reset_flag(sk, SOCK_LINGER); + tcp_sk(sk)->linger2 = 0; + tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; + + xprt_clear_connected(xprt); + + /* Reset to new socket */ + xprt->sock = sock; + xprt->inet = sk; + + write_unlock_bh(&sk->sk_callback_lock); + } + + /* Tell the socket layer to start connecting... */ + status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr, + sizeof(xprt->addr), O_NONBLOCK); + dprintk("RPC: %p connect status %d connected %d sock state %d\n", + xprt, -status, xprt_connected(xprt), sock->sk->sk_state); + if (status < 0) { + switch (status) { + case -EINPROGRESS: + case -EALREADY: + goto out_clear; + case -ECONNREFUSED: + case -ECONNRESET: + /* retry with existing socket, after a delay */ + break; + default: + /* get rid of existing socket, and retry */ + xs_close(xprt); + break; + } + } +out: + xprt_wake_pending_tasks(xprt, status); +out_clear: + xprt_clear_connecting(xprt); +} + +/** + * xs_connect - connect a socket to a remote endpoint + * @task: address of RPC task that manages state of connect request + * + * TCP: If the remote end dropped the connection, delay reconnecting. + * + * UDP socket connects are synchronous, but we use a work queue anyway + * to guarantee that even unprivileged user processes can set up a + * socket on a privileged port. + * + * If a UDP socket connect fails, the delay behavior here prevents + * retry floods (hard mounts). + */ +static void xs_connect(struct rpc_task *task) +{ + struct rpc_xprt *xprt = task->tk_xprt; + + if (xprt_test_and_set_connecting(xprt)) + return; + + if (xprt->sock != NULL) { + dprintk("RPC: xs_connect delayed xprt %p for %lu seconds\n", + xprt, xprt->reestablish_timeout / HZ); + schedule_delayed_work(&xprt->connect_worker, + xprt->reestablish_timeout); + xprt->reestablish_timeout <<= 1; + if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) + xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; + } else { + dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); + schedule_work(&xprt->connect_worker); + + /* flush_scheduled_work can sleep... */ + if (!RPC_IS_ASYNC(task)) + flush_scheduled_work(); + } +} + +static struct rpc_xprt_ops xs_udp_ops = { + .set_buffer_size = xs_udp_set_buffer_size, + .reserve_xprt = xprt_reserve_xprt_cong, + .release_xprt = xprt_release_xprt_cong, + .connect = xs_connect, + .send_request = xs_udp_send_request, + .set_retrans_timeout = xprt_set_retrans_timeout_rtt, + .timer = xs_udp_timer, + .release_request = xprt_release_rqst_cong, + .close = xs_close, + .destroy = xs_destroy, +}; + +static struct rpc_xprt_ops xs_tcp_ops = { + .reserve_xprt = xprt_reserve_xprt, + .release_xprt = xprt_release_xprt, + .connect = xs_connect, + .send_request = xs_tcp_send_request, + .set_retrans_timeout = xprt_set_retrans_timeout_def, + .close = xs_close, + .destroy = xs_destroy, +}; + +/** + * xs_setup_udp - Set up transport to use a UDP socket + * @xprt: transport to set up + * @to: timeout parameters + * + */ +int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) +{ + size_t slot_table_size; + + dprintk("RPC: setting up udp-ipv4 transport...\n"); + + xprt->max_reqs = xprt_udp_slot_table_entries; + slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); + xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); + if (xprt->slot == NULL) + return -ENOMEM; + memset(xprt->slot, 0, slot_table_size); + + xprt->prot = IPPROTO_UDP; + xprt->port = xprt_max_resvport; + xprt->tsh_size = 0; + xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; + /* XXX: header size can vary due to auth type, IPv6, etc. */ + xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); + + INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); + xprt->bind_timeout = XS_BIND_TO; + xprt->connect_timeout = XS_UDP_CONN_TO; + xprt->reestablish_timeout = XS_UDP_REEST_TO; + xprt->idle_timeout = XS_IDLE_DISC_TO; + + xprt->ops = &xs_udp_ops; + + if (to) + xprt->timeout = *to; + else + xprt_set_timeout(&xprt->timeout, 5, 5 * HZ); + + return 0; +} + +/** + * xs_setup_tcp - Set up transport to use a TCP socket + * @xprt: transport to set up + * @to: timeout parameters + * + */ +int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) +{ + size_t slot_table_size; + + dprintk("RPC: setting up tcp-ipv4 transport...\n"); + + xprt->max_reqs = xprt_tcp_slot_table_entries; + slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); + xprt->slot = kmalloc(slot_table_size, GFP_KERNEL); + if (xprt->slot == NULL) + return -ENOMEM; + memset(xprt->slot, 0, slot_table_size); + + xprt->prot = IPPROTO_TCP; + xprt->port = xprt_max_resvport; + xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); + xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0; + xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; + + INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); + xprt->bind_timeout = XS_BIND_TO; + xprt->connect_timeout = XS_TCP_CONN_TO; + xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; + xprt->idle_timeout = XS_IDLE_DISC_TO; + + xprt->ops = &xs_tcp_ops; + + if (to) + xprt->timeout = *to; + else + xprt_set_timeout(&xprt->timeout, 2, 60 * HZ); + + return 0; +} diff --git a/net/sysctl_net.c b/net/sysctl_net.c index c5241fcbb966..55538f6b60ff 100644 --- a/net/sysctl_net.c +++ b/net/sysctl_net.c @@ -16,6 +16,8 @@ #include #include +#include + #ifdef CONFIG_INET #include #endif diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index fda737d77edc..0db9e57013fd 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -163,7 +163,7 @@ static void xfrm_policy_timer(unsigned long data) if (xp->dead) goto out; - dir = xp->index & 7; + dir = xfrm_policy_id2dir(xp->index); if (xp->lft.hard_add_expires_seconds) { long tmo = xp->lft.hard_add_expires_seconds + @@ -225,7 +225,7 @@ expired: * SPD calls. */ -struct xfrm_policy *xfrm_policy_alloc(int gfp) +struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp) { struct xfrm_policy *policy; @@ -417,7 +417,7 @@ struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete) struct xfrm_policy *pol, **p; write_lock_bh(&xfrm_policy_lock); - for (p = &xfrm_policy_list[id & 7]; (pol=*p)!=NULL; p = &pol->next) { + for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) { if (pol->index == id) { xfrm_pol_hold(pol); if (delete) @@ -1192,46 +1192,6 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family) EXPORT_SYMBOL(xfrm_bundle_ok); -/* Well... that's _TASK_. We need to scan through transformation - * list and figure out what mss tcp should generate in order to - * final datagram fit to mtu. Mama mia... :-) - * - * Apparently, some easy way exists, but we used to choose the most - * bizarre ones. :-) So, raising Kalashnikov... tra-ta-ta. - * - * Consider this function as something like dark humour. :-) - */ -static int xfrm_get_mss(struct dst_entry *dst, u32 mtu) -{ - int res = mtu - dst->header_len; - - for (;;) { - struct dst_entry *d = dst; - int m = res; - - do { - struct xfrm_state *x = d->xfrm; - if (x) { - spin_lock_bh(&x->lock); - if (x->km.state == XFRM_STATE_VALID && - x->type && x->type->get_max_size) - m = x->type->get_max_size(d->xfrm, m); - else - m += x->props.header_len; - spin_unlock_bh(&x->lock); - } - } while ((d = d->child) != NULL); - - if (m <= mtu) - break; - res -= (m - mtu); - if (res < 88) - return mtu; - } - - return res + dst->header_len; -} - int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) { int err = 0; @@ -1252,8 +1212,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) dst_ops->negative_advice = xfrm_negative_advice; if (likely(dst_ops->link_failure == NULL)) dst_ops->link_failure = xfrm_link_failure; - if (likely(dst_ops->get_mss == NULL)) - dst_ops->get_mss = xfrm_get_mss; if (likely(afinfo->garbage_collect == NULL)) afinfo->garbage_collect = __xfrm_garbage_collect; xfrm_policy_afinfo[afinfo->family] = afinfo; @@ -1281,7 +1239,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) dst_ops->check = NULL; dst_ops->negative_advice = NULL; dst_ops->link_failure = NULL; - dst_ops->get_mss = NULL; afinfo->garbage_collect = NULL; } } diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 9d206c282cf1..8b9a4747417d 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1026,6 +1026,12 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x) } EXPORT_SYMBOL(xfrm_state_delete_tunnel); +/* + * This function is NOT optimal. For example, with ESP it will give an + * MTU that's usually two bytes short of being optimal. However, it will + * usually give an answer that's a multiple of 4 provided the input is + * also a multiple of 4. + */ int xfrm_state_mtu(struct xfrm_state *x, int mtu) { int res = mtu; diff --git a/scripts/.gitignore b/scripts/.gitignore new file mode 100644 index 000000000000..b46d68bb9e17 --- /dev/null +++ b/scripts/.gitignore @@ -0,0 +1,4 @@ +conmakehash +kallsyms +pnmtologo + diff --git a/scripts/basic/.gitignore b/scripts/basic/.gitignore new file mode 100644 index 000000000000..7304e19782c7 --- /dev/null +++ b/scripts/basic/.gitignore @@ -0,0 +1,3 @@ +fixdep +split-include +docproc diff --git a/scripts/kconfig/.gitignore b/scripts/kconfig/.gitignore new file mode 100644 index 000000000000..2dac3442e0ac --- /dev/null +++ b/scripts/kconfig/.gitignore @@ -0,0 +1,16 @@ +# +# Generated files +# +config* +lex.*.c +*.tab.c +*.tab.h + +# +# configuration programs +# +conf +mconf +qconf +gconf +kxgettext diff --git a/scripts/mod/.gitignore b/scripts/mod/.gitignore new file mode 100644 index 000000000000..e9b7abe7b95b --- /dev/null +++ b/scripts/mod/.gitignore @@ -0,0 +1,4 @@ +elfconfig.h +mk_elfconfig +modpost + diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index d8ee38aede26..f2ee673329a7 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -295,11 +295,13 @@ static int do_pcmcia_entry(const char *filename, { unsigned int i; + id->match_flags = TO_NATIVE(id->match_flags); id->manf_id = TO_NATIVE(id->manf_id); id->card_id = TO_NATIVE(id->card_id); id->func_id = TO_NATIVE(id->func_id); id->function = TO_NATIVE(id->function); id->device_no = TO_NATIVE(id->device_no); + for (i=0; i<4; i++) { id->prod_id_hash[i] = TO_NATIVE(id->prod_id_hash[i]); } diff --git a/security/dummy.c b/security/dummy.c index 9623a61dfc76..3d34f3de7e82 100644 --- a/security/dummy.c +++ b/security/dummy.c @@ -768,7 +768,7 @@ static int dummy_socket_getpeersec(struct socket *sock, char __user *optval, return -ENOPROTOOPT; } -static inline int dummy_sk_alloc_security (struct sock *sk, int family, int priority) +static inline int dummy_sk_alloc_security (struct sock *sk, int family, gfp_t priority) { return 0; } diff --git a/security/keys/Makefile b/security/keys/Makefile index c392d750b208..5145adfb6a05 100644 --- a/security/keys/Makefile +++ b/security/keys/Makefile @@ -6,6 +6,7 @@ obj-y := \ key.o \ keyring.o \ keyctl.o \ + permission.o \ process_keys.o \ request_key.o \ request_key_auth.o \ diff --git a/security/keys/internal.h b/security/keys/internal.h index 46c8602661c9..db99ed434f3a 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -71,26 +71,26 @@ extern void keyring_publish_name(struct key *keyring); extern int __key_link(struct key *keyring, struct key *key); -extern struct key *__keyring_search_one(struct key *keyring, - const struct key_type *type, - const char *description, - key_perm_t perm); +extern key_ref_t __keyring_search_one(key_ref_t keyring_ref, + const struct key_type *type, + const char *description, + key_perm_t perm); extern struct key *keyring_search_instkey(struct key *keyring, key_serial_t target_id); typedef int (*key_match_func_t)(const struct key *, const void *); -extern struct key *keyring_search_aux(struct key *keyring, - struct task_struct *tsk, - struct key_type *type, - const void *description, - key_match_func_t match); +extern key_ref_t keyring_search_aux(key_ref_t keyring_ref, + struct task_struct *tsk, + struct key_type *type, + const void *description, + key_match_func_t match); -extern struct key *search_process_keyrings(struct key_type *type, - const void *description, - key_match_func_t match, - struct task_struct *tsk); +extern key_ref_t search_process_keyrings(struct key_type *type, + const void *description, + key_match_func_t match, + struct task_struct *tsk); extern struct key *find_keyring_by_name(const char *name, key_serial_t bound); diff --git a/security/keys/key.c b/security/keys/key.c index fb89f9844465..2182be9e9309 100644 --- a/security/keys/key.c +++ b/security/keys/key.c @@ -693,14 +693,15 @@ void key_type_put(struct key_type *ktype) * - the key has an incremented refcount * - we need to put the key if we get an error */ -static inline struct key *__key_update(struct key *key, const void *payload, - size_t plen) +static inline key_ref_t __key_update(key_ref_t key_ref, + const void *payload, size_t plen) { + struct key *key = key_ref_to_ptr(key_ref); int ret; /* need write permission on the key to update it */ ret = -EACCES; - if (!key_permission(key, KEY_WRITE)) + if (!key_permission(key_ref, KEY_WRITE)) goto error; ret = -EEXIST; @@ -719,12 +720,12 @@ static inline struct key *__key_update(struct key *key, const void *payload, if (ret < 0) goto error; - out: - return key; +out: + return key_ref; - error: +error: key_put(key); - key = ERR_PTR(ret); + key_ref = ERR_PTR(ret); goto out; } /* end __key_update() */ @@ -734,52 +735,56 @@ static inline struct key *__key_update(struct key *key, const void *payload, * search the specified keyring for a key of the same description; if one is * found, update it, otherwise add a new one */ -struct key *key_create_or_update(struct key *keyring, - const char *type, - const char *description, - const void *payload, - size_t plen, - int not_in_quota) +key_ref_t key_create_or_update(key_ref_t keyring_ref, + const char *type, + const char *description, + const void *payload, + size_t plen, + int not_in_quota) { struct key_type *ktype; - struct key *key = NULL; + struct key *keyring, *key = NULL; key_perm_t perm; + key_ref_t key_ref; int ret; - key_check(keyring); - /* look up the key type to see if it's one of the registered kernel * types */ ktype = key_type_lookup(type); if (IS_ERR(ktype)) { - key = ERR_PTR(-ENODEV); + key_ref = ERR_PTR(-ENODEV); goto error; } - ret = -EINVAL; + key_ref = ERR_PTR(-EINVAL); if (!ktype->match || !ktype->instantiate) goto error_2; + keyring = key_ref_to_ptr(keyring_ref); + + key_check(keyring); + + down_write(&keyring->sem); + + /* if we're going to allocate a new key, we're going to have + * to modify the keyring */ + key_ref = ERR_PTR(-EACCES); + if (!key_permission(keyring_ref, KEY_WRITE)) + goto error_3; + /* search for an existing key of the same type and description in the * destination keyring */ - down_write(&keyring->sem); - - key = __keyring_search_one(keyring, ktype, description, 0); - if (!IS_ERR(key)) + key_ref = __keyring_search_one(keyring_ref, ktype, description, 0); + if (!IS_ERR(key_ref)) goto found_matching_key; - /* if we're going to allocate a new key, we're going to have to modify - * the keyring */ - ret = -EACCES; - if (!key_permission(keyring, KEY_WRITE)) - goto error_3; - /* decide on the permissions we want */ - perm = KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK; + perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK; + perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK; if (ktype->read) - perm |= KEY_USR_READ; + perm |= KEY_POS_READ | KEY_USR_READ; if (ktype == &key_type_keyring || ktype->update) perm |= KEY_USR_WRITE; @@ -788,7 +793,7 @@ struct key *key_create_or_update(struct key *keyring, key = key_alloc(ktype, description, current->fsuid, current->fsgid, perm, not_in_quota); if (IS_ERR(key)) { - ret = PTR_ERR(key); + key_ref = ERR_PTR(PTR_ERR(key)); goto error_3; } @@ -796,15 +801,18 @@ struct key *key_create_or_update(struct key *keyring, ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL); if (ret < 0) { key_put(key); - key = ERR_PTR(ret); + key_ref = ERR_PTR(ret); + goto error_3; } + key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); + error_3: up_write(&keyring->sem); error_2: key_type_put(ktype); error: - return key; + return key_ref; found_matching_key: /* we found a matching key, so we're going to try to update it @@ -813,7 +821,7 @@ struct key *key_create_or_update(struct key *keyring, up_write(&keyring->sem); key_type_put(ktype); - key = __key_update(key, payload, plen); + key_ref = __key_update(key_ref, payload, plen); goto error; } /* end key_create_or_update() */ @@ -824,15 +832,16 @@ EXPORT_SYMBOL(key_create_or_update); /* * update a key */ -int key_update(struct key *key, const void *payload, size_t plen) +int key_update(key_ref_t key_ref, const void *payload, size_t plen) { + struct key *key = key_ref_to_ptr(key_ref); int ret; key_check(key); /* the key must be writable */ ret = -EACCES; - if (!key_permission(key, KEY_WRITE)) + if (!key_permission(key_ref, KEY_WRITE)) goto error; /* attempt to update it if supported */ diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index a6516a64b297..4c670ee6acf9 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -34,7 +34,7 @@ asmlinkage long sys_add_key(const char __user *_type, size_t plen, key_serial_t ringid) { - struct key *keyring, *key; + key_ref_t keyring_ref, key_ref; char type[32], *description; void *payload; long dlen, ret; @@ -86,25 +86,25 @@ asmlinkage long sys_add_key(const char __user *_type, } /* find the target keyring (which must be writable) */ - keyring = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); - if (IS_ERR(keyring)) { - ret = PTR_ERR(keyring); + keyring_ref = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); + if (IS_ERR(keyring_ref)) { + ret = PTR_ERR(keyring_ref); goto error3; } /* create or update the requested key and add it to the target * keyring */ - key = key_create_or_update(keyring, type, description, - payload, plen, 0); - if (!IS_ERR(key)) { - ret = key->serial; - key_put(key); + key_ref = key_create_or_update(keyring_ref, type, description, + payload, plen, 0); + if (!IS_ERR(key_ref)) { + ret = key_ref_to_ptr(key_ref)->serial; + key_ref_put(key_ref); } else { - ret = PTR_ERR(key); + ret = PTR_ERR(key_ref); } - key_put(keyring); + key_ref_put(keyring_ref); error3: kfree(payload); error2: @@ -131,7 +131,8 @@ asmlinkage long sys_request_key(const char __user *_type, key_serial_t destringid) { struct key_type *ktype; - struct key *key, *dest; + struct key *key; + key_ref_t dest_ref; char type[32], *description, *callout_info; long dlen, ret; @@ -187,11 +188,11 @@ asmlinkage long sys_request_key(const char __user *_type, } /* get the destination keyring if specified */ - dest = NULL; + dest_ref = NULL; if (destringid) { - dest = lookup_user_key(NULL, destringid, 1, 0, KEY_WRITE); - if (IS_ERR(dest)) { - ret = PTR_ERR(dest); + dest_ref = lookup_user_key(NULL, destringid, 1, 0, KEY_WRITE); + if (IS_ERR(dest_ref)) { + ret = PTR_ERR(dest_ref); goto error3; } } @@ -204,7 +205,8 @@ asmlinkage long sys_request_key(const char __user *_type, } /* do the search */ - key = request_key_and_link(ktype, description, callout_info, dest); + key = request_key_and_link(ktype, description, callout_info, + key_ref_to_ptr(dest_ref)); if (IS_ERR(key)) { ret = PTR_ERR(key); goto error5; @@ -216,7 +218,7 @@ asmlinkage long sys_request_key(const char __user *_type, error5: key_type_put(ktype); error4: - key_put(dest); + key_ref_put(dest_ref); error3: kfree(callout_info); error2: @@ -234,17 +236,17 @@ asmlinkage long sys_request_key(const char __user *_type, */ long keyctl_get_keyring_ID(key_serial_t id, int create) { - struct key *key; + key_ref_t key_ref; long ret; - key = lookup_user_key(NULL, id, create, 0, KEY_SEARCH); - if (IS_ERR(key)) { - ret = PTR_ERR(key); + key_ref = lookup_user_key(NULL, id, create, 0, KEY_SEARCH); + if (IS_ERR(key_ref)) { + ret = PTR_ERR(key_ref); goto error; } - ret = key->serial; - key_put(key); + ret = key_ref_to_ptr(key_ref)->serial; + key_ref_put(key_ref); error: return ret; @@ -302,7 +304,7 @@ long keyctl_update_key(key_serial_t id, const void __user *_payload, size_t plen) { - struct key *key; + key_ref_t key_ref; void *payload; long ret; @@ -324,16 +326,16 @@ long keyctl_update_key(key_serial_t id, } /* find the target key (which must be writable) */ - key = lookup_user_key(NULL, id, 0, 0, KEY_WRITE); - if (IS_ERR(key)) { - ret = PTR_ERR(key); + key_ref = lookup_user_key(NULL, id, 0, 0, KEY_WRITE); + if (IS_ERR(key_ref)) { + ret = PTR_ERR(key_ref); goto error2; } /* update the key */ - ret = key_update(key, payload, plen); + ret = key_update(key_ref, payload, plen); - key_put(key); + key_ref_put(key_ref); error2: kfree(payload); error: @@ -349,19 +351,19 @@ long keyctl_update_key(key_serial_t id, */ long keyctl_revoke_key(key_serial_t id) { - struct key *key; + key_ref_t key_ref; long ret; - key = lookup_user_key(NULL, id, 0, 0, KEY_WRITE); - if (IS_ERR(key)) { - ret = PTR_ERR(key); + key_ref = lookup_user_key(NULL, id, 0, 0, KEY_WRITE); + if (IS_ERR(key_ref)) { + ret = PTR_ERR(key_ref); goto error; } - key_revoke(key); + key_revoke(key_ref_to_ptr(key_ref)); ret = 0; - key_put(key); + key_ref_put(key_ref); error: return ret; @@ -375,18 +377,18 @@ long keyctl_revoke_key(key_serial_t id) */ long keyctl_keyring_clear(key_serial_t ringid) { - struct key *keyring; + key_ref_t keyring_ref; long ret; - keyring = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); - if (IS_ERR(keyring)) { - ret = PTR_ERR(keyring); + keyring_ref = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); + if (IS_ERR(keyring_ref)) { + ret = PTR_ERR(keyring_ref); goto error; } - ret = keyring_clear(keyring); + ret = keyring_clear(key_ref_to_ptr(keyring_ref)); - key_put(keyring); + key_ref_put(keyring_ref); error: return ret; @@ -401,26 +403,26 @@ long keyctl_keyring_clear(key_serial_t ringid) */ long keyctl_keyring_link(key_serial_t id, key_serial_t ringid) { - struct key *keyring, *key; + key_ref_t keyring_ref, key_ref; long ret; - keyring = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); - if (IS_ERR(keyring)) { - ret = PTR_ERR(keyring); + keyring_ref = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); + if (IS_ERR(keyring_ref)) { + ret = PTR_ERR(keyring_ref); goto error; } - key = lookup_user_key(NULL, id, 1, 0, KEY_LINK); - if (IS_ERR(key)) { - ret = PTR_ERR(key); + key_ref = lookup_user_key(NULL, id, 1, 0, KEY_LINK); + if (IS_ERR(key_ref)) { + ret = PTR_ERR(key_ref); goto error2; } - ret = key_link(keyring, key); + ret = key_link(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref)); - key_put(key); + key_ref_put(key_ref); error2: - key_put(keyring); + key_ref_put(keyring_ref); error: return ret; @@ -435,26 +437,26 @@ long keyctl_keyring_link(key_serial_t id, key_serial_t ringid) */ long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid) { - struct key *keyring, *key; + key_ref_t keyring_ref, key_ref; long ret; - keyring = lookup_user_key(NULL, ringid, 0, 0, KEY_WRITE); - if (IS_ERR(keyring)) { - ret = PTR_ERR(keyring); + keyring_ref = lookup_user_key(NULL, ringid, 0, 0, KEY_WRITE); + if (IS_ERR(keyring_ref)) { + ret = PTR_ERR(keyring_ref); goto error; } - key = lookup_user_key(NULL, id, 0, 0, 0); - if (IS_ERR(key)) { - ret = PTR_ERR(key); + key_ref = lookup_user_key(NULL, id, 0, 0, 0); + if (IS_ERR(key_ref)) { + ret = PTR_ERR(key_ref); goto error2; } - ret = key_unlink(keyring, key); + ret = key_unlink(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref)); - key_put(key); + key_ref_put(key_ref); error2: - key_put(keyring); + key_ref_put(keyring_ref); error: return ret; @@ -476,24 +478,26 @@ long keyctl_describe_key(key_serial_t keyid, size_t buflen) { struct key *key, *instkey; + key_ref_t key_ref; char *tmpbuf; long ret; - key = lookup_user_key(NULL, keyid, 0, 1, KEY_VIEW); - if (IS_ERR(key)) { + key_ref = lookup_user_key(NULL, keyid, 0, 1, KEY_VIEW); + if (IS_ERR(key_ref)) { /* viewing a key under construction is permitted if we have the * authorisation token handy */ - if (PTR_ERR(key) == -EACCES) { + if (PTR_ERR(key_ref) == -EACCES) { instkey = key_get_instantiation_authkey(keyid); if (!IS_ERR(instkey)) { key_put(instkey); - key = lookup_user_key(NULL, keyid, 0, 1, 0); - if (!IS_ERR(key)) + key_ref = lookup_user_key(NULL, keyid, + 0, 1, 0); + if (!IS_ERR(key_ref)) goto okay; } } - ret = PTR_ERR(key); + ret = PTR_ERR(key_ref); goto error; } @@ -504,13 +508,16 @@ okay: if (!tmpbuf) goto error2; + key = key_ref_to_ptr(key_ref); + ret = snprintf(tmpbuf, PAGE_SIZE - 1, - "%s;%d;%d;%06x;%s", - key->type->name, - key->uid, - key->gid, - key->perm, - key->description ? key->description :"" + "%s;%d;%d;%08x;%s", + key_ref_to_ptr(key_ref)->type->name, + key_ref_to_ptr(key_ref)->uid, + key_ref_to_ptr(key_ref)->gid, + key_ref_to_ptr(key_ref)->perm, + key_ref_to_ptr(key_ref)->description ? + key_ref_to_ptr(key_ref)->description : "" ); /* include a NUL char at the end of the data */ @@ -530,7 +537,7 @@ okay: kfree(tmpbuf); error2: - key_put(key); + key_ref_put(key_ref); error: return ret; @@ -552,7 +559,7 @@ long keyctl_keyring_search(key_serial_t ringid, key_serial_t destringid) { struct key_type *ktype; - struct key *keyring, *key, *dest; + key_ref_t keyring_ref, key_ref, dest_ref; char type[32], *description; long dlen, ret; @@ -581,18 +588,18 @@ long keyctl_keyring_search(key_serial_t ringid, goto error2; /* get the keyring at which to begin the search */ - keyring = lookup_user_key(NULL, ringid, 0, 0, KEY_SEARCH); - if (IS_ERR(keyring)) { - ret = PTR_ERR(keyring); + keyring_ref = lookup_user_key(NULL, ringid, 0, 0, KEY_SEARCH); + if (IS_ERR(keyring_ref)) { + ret = PTR_ERR(keyring_ref); goto error2; } /* get the destination keyring if specified */ - dest = NULL; + dest_ref = NULL; if (destringid) { - dest = lookup_user_key(NULL, destringid, 1, 0, KEY_WRITE); - if (IS_ERR(dest)) { - ret = PTR_ERR(dest); + dest_ref = lookup_user_key(NULL, destringid, 1, 0, KEY_WRITE); + if (IS_ERR(dest_ref)) { + ret = PTR_ERR(dest_ref); goto error3; } } @@ -605,9 +612,9 @@ long keyctl_keyring_search(key_serial_t ringid, } /* do the search */ - key = keyring_search(keyring, ktype, description); - if (IS_ERR(key)) { - ret = PTR_ERR(key); + key_ref = keyring_search(keyring_ref, ktype, description); + if (IS_ERR(key_ref)) { + ret = PTR_ERR(key_ref); /* treat lack or presence of a negative key the same */ if (ret == -EAGAIN) @@ -616,26 +623,26 @@ long keyctl_keyring_search(key_serial_t ringid, } /* link the resulting key to the destination keyring if we can */ - if (dest) { + if (dest_ref) { ret = -EACCES; - if (!key_permission(key, KEY_LINK)) + if (!key_permission(key_ref, KEY_LINK)) goto error6; - ret = key_link(dest, key); + ret = key_link(key_ref_to_ptr(dest_ref), key_ref_to_ptr(key_ref)); if (ret < 0) goto error6; } - ret = key->serial; + ret = key_ref_to_ptr(key_ref)->serial; error6: - key_put(key); + key_ref_put(key_ref); error5: key_type_put(ktype); error4: - key_put(dest); + key_ref_put(dest_ref); error3: - key_put(keyring); + key_ref_put(keyring_ref); error2: kfree(description); error: @@ -643,16 +650,6 @@ long keyctl_keyring_search(key_serial_t ringid, } /* end keyctl_keyring_search() */ -/*****************************************************************************/ -/* - * see if the key we're looking at is the target key - */ -static int keyctl_read_key_same(const struct key *key, const void *target) -{ - return key == target; - -} /* end keyctl_read_key_same() */ - /*****************************************************************************/ /* * read a user key's payload @@ -665,38 +662,33 @@ static int keyctl_read_key_same(const struct key *key, const void *target) */ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) { - struct key *key, *skey; + struct key *key; + key_ref_t key_ref; long ret; /* find the key first */ - key = lookup_user_key(NULL, keyid, 0, 0, 0); - if (!IS_ERR(key)) { - /* see if we can read it directly */ - if (key_permission(key, KEY_READ)) - goto can_read_key; + key_ref = lookup_user_key(NULL, keyid, 0, 0, 0); + if (IS_ERR(key_ref)) { + ret = -ENOKEY; + goto error; + } - /* we can't; see if it's searchable from this process's - * keyrings - * - we automatically take account of the fact that it may be - * dangling off an instantiation key - */ - skey = search_process_keyrings(key->type, key, - keyctl_read_key_same, current); - if (!IS_ERR(skey)) - goto can_read_key2; + key = key_ref_to_ptr(key_ref); - ret = PTR_ERR(skey); - if (ret == -EAGAIN) - ret = -EACCES; + /* see if we can read it directly */ + if (key_permission(key_ref, KEY_READ)) + goto can_read_key; + + /* we can't; see if it's searchable from this process's keyrings + * - we automatically take account of the fact that it may be + * dangling off an instantiation key + */ + if (!is_key_possessed(key_ref)) { + ret = -EACCES; goto error2; } - ret = -ENOKEY; - goto error; - /* the key is probably readable - now try to read it */ - can_read_key2: - key_put(skey); can_read_key: ret = key_validate(key); if (ret == 0) { @@ -727,18 +719,21 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid) { struct key *key; + key_ref_t key_ref; long ret; ret = 0; if (uid == (uid_t) -1 && gid == (gid_t) -1) goto error; - key = lookup_user_key(NULL, id, 1, 1, 0); - if (IS_ERR(key)) { - ret = PTR_ERR(key); + key_ref = lookup_user_key(NULL, id, 1, 1, 0); + if (IS_ERR(key_ref)) { + ret = PTR_ERR(key_ref); goto error; } + key = key_ref_to_ptr(key_ref); + /* make the changes with the locks held to prevent chown/chown races */ ret = -EACCES; down_write(&key->sem); @@ -784,18 +779,21 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid) long keyctl_setperm_key(key_serial_t id, key_perm_t perm) { struct key *key; + key_ref_t key_ref; long ret; ret = -EINVAL; - if (perm & ~(KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL)) + if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL)) goto error; - key = lookup_user_key(NULL, id, 1, 1, 0); - if (IS_ERR(key)) { - ret = PTR_ERR(key); + key_ref = lookup_user_key(NULL, id, 1, 1, 0); + if (IS_ERR(key_ref)) { + ret = PTR_ERR(key_ref); goto error; } + key = key_ref_to_ptr(key_ref); + /* make the changes with the locks held to prevent chown/chmod races */ ret = -EACCES; down_write(&key->sem); @@ -824,7 +822,8 @@ long keyctl_instantiate_key(key_serial_t id, key_serial_t ringid) { struct request_key_auth *rka; - struct key *instkey, *keyring; + struct key *instkey; + key_ref_t keyring_ref; void *payload; long ret; @@ -857,21 +856,21 @@ long keyctl_instantiate_key(key_serial_t id, /* find the destination keyring amongst those belonging to the * requesting task */ - keyring = NULL; + keyring_ref = NULL; if (ringid) { - keyring = lookup_user_key(rka->context, ringid, 1, 0, - KEY_WRITE); - if (IS_ERR(keyring)) { - ret = PTR_ERR(keyring); + keyring_ref = lookup_user_key(rka->context, ringid, 1, 0, + KEY_WRITE); + if (IS_ERR(keyring_ref)) { + ret = PTR_ERR(keyring_ref); goto error3; } } /* instantiate the key and link it into a keyring */ ret = key_instantiate_and_link(rka->target_key, payload, plen, - keyring, instkey); + key_ref_to_ptr(keyring_ref), instkey); - key_put(keyring); + key_ref_put(keyring_ref); error3: key_put(instkey); error2: @@ -889,7 +888,8 @@ long keyctl_instantiate_key(key_serial_t id, long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid) { struct request_key_auth *rka; - struct key *instkey, *keyring; + struct key *instkey; + key_ref_t keyring_ref; long ret; /* find the instantiation authorisation key */ @@ -903,19 +903,20 @@ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid) /* find the destination keyring if present (which must also be * writable) */ - keyring = NULL; + keyring_ref = NULL; if (ringid) { - keyring = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); - if (IS_ERR(keyring)) { - ret = PTR_ERR(keyring); + keyring_ref = lookup_user_key(NULL, ringid, 1, 0, KEY_WRITE); + if (IS_ERR(keyring_ref)) { + ret = PTR_ERR(keyring_ref); goto error2; } } /* instantiate the key and link it into a keyring */ - ret = key_negate_and_link(rka->target_key, timeout, keyring, instkey); + ret = key_negate_and_link(rka->target_key, timeout, + key_ref_to_ptr(keyring_ref), instkey); - key_put(keyring); + key_ref_put(keyring_ref); error2: key_put(instkey); error: diff --git a/security/keys/keyring.c b/security/keys/keyring.c index 9c208c756df8..0639396dd441 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -309,7 +309,7 @@ struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, int ret; keyring = key_alloc(&key_type_keyring, description, - uid, gid, KEY_USR_ALL, not_in_quota); + uid, gid, KEY_POS_ALL | KEY_USR_ALL, not_in_quota); if (!IS_ERR(keyring)) { ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL); @@ -333,12 +333,13 @@ struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, * - we rely on RCU to prevent the keyring lists from disappearing on us * - we return -EAGAIN if we didn't find any matching key * - we return -ENOKEY if we only found negative matching keys + * - we propagate the possession attribute from the keyring ref to the key ref */ -struct key *keyring_search_aux(struct key *keyring, - struct task_struct *context, - struct key_type *type, - const void *description, - key_match_func_t match) +key_ref_t keyring_search_aux(key_ref_t keyring_ref, + struct task_struct *context, + struct key_type *type, + const void *description, + key_match_func_t match) { struct { struct keyring_list *keylist; @@ -347,29 +348,33 @@ struct key *keyring_search_aux(struct key *keyring, struct keyring_list *keylist; struct timespec now; - struct key *key; + unsigned long possessed; + struct key *keyring, *key; + key_ref_t key_ref; long err; int sp, kix; + keyring = key_ref_to_ptr(keyring_ref); + possessed = is_key_possessed(keyring_ref); key_check(keyring); - rcu_read_lock(); - /* top keyring must have search permission to begin the search */ - key = ERR_PTR(-EACCES); - if (!key_task_permission(keyring, context, KEY_SEARCH)) + key_ref = ERR_PTR(-EACCES); + if (!key_task_permission(keyring_ref, context, KEY_SEARCH)) goto error; - key = ERR_PTR(-ENOTDIR); + key_ref = ERR_PTR(-ENOTDIR); if (keyring->type != &key_type_keyring) goto error; + rcu_read_lock(); + now = current_kernel_time(); err = -EAGAIN; sp = 0; /* start processing a new keyring */ - descend: +descend: if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) goto not_this_keyring; @@ -397,7 +402,8 @@ struct key *keyring_search_aux(struct key *keyring, continue; /* key must have search permissions */ - if (!key_task_permission(key, context, KEY_SEARCH)) + if (!key_task_permission(make_key_ref(key, possessed), + context, KEY_SEARCH)) continue; /* we set a different error code if we find a negative key */ @@ -411,7 +417,7 @@ struct key *keyring_search_aux(struct key *keyring, /* search through the keyrings nested in this one */ kix = 0; - ascend: +ascend: for (; kix < keylist->nkeys; kix++) { key = keylist->keys[kix]; if (key->type != &key_type_keyring) @@ -423,7 +429,8 @@ struct key *keyring_search_aux(struct key *keyring, if (sp >= KEYRING_SEARCH_MAX_DEPTH) continue; - if (!key_task_permission(key, context, KEY_SEARCH)) + if (!key_task_permission(make_key_ref(key, possessed), + context, KEY_SEARCH)) continue; /* stack the current position */ @@ -438,7 +445,7 @@ struct key *keyring_search_aux(struct key *keyring, /* the keyring we're looking at was disqualified or didn't contain a * matching key */ - not_this_keyring: +not_this_keyring: if (sp > 0) { /* resume the processing of a keyring higher up in the tree */ sp--; @@ -447,16 +454,18 @@ struct key *keyring_search_aux(struct key *keyring, goto ascend; } - key = ERR_PTR(err); - goto error; + key_ref = ERR_PTR(err); + goto error_2; /* we found a viable match */ - found: +found: atomic_inc(&key->usage); key_check(key); - error: + key_ref = make_key_ref(key, possessed); +error_2: rcu_read_unlock(); - return key; +error: + return key_ref; } /* end keyring_search_aux() */ @@ -469,9 +478,9 @@ struct key *keyring_search_aux(struct key *keyring, * - we return -EAGAIN if we didn't find any matching key * - we return -ENOKEY if we only found negative matching keys */ -struct key *keyring_search(struct key *keyring, - struct key_type *type, - const char *description) +key_ref_t keyring_search(key_ref_t keyring, + struct key_type *type, + const char *description) { if (!type->match) return ERR_PTR(-ENOKEY); @@ -488,15 +497,19 @@ EXPORT_SYMBOL(keyring_search); * search the given keyring only (no recursion) * - keyring must be locked by caller */ -struct key *__keyring_search_one(struct key *keyring, - const struct key_type *ktype, - const char *description, - key_perm_t perm) +key_ref_t __keyring_search_one(key_ref_t keyring_ref, + const struct key_type *ktype, + const char *description, + key_perm_t perm) { struct keyring_list *klist; - struct key *key; + unsigned long possessed; + struct key *keyring, *key; int loop; + keyring = key_ref_to_ptr(keyring_ref); + possessed = is_key_possessed(keyring_ref); + rcu_read_lock(); klist = rcu_dereference(keyring->payload.subscriptions); @@ -507,21 +520,21 @@ struct key *__keyring_search_one(struct key *keyring, if (key->type == ktype && (!key->type->match || key->type->match(key, description)) && - key_permission(key, perm) && + key_permission(make_key_ref(key, possessed), + perm) && !test_bit(KEY_FLAG_REVOKED, &key->flags) ) goto found; } } - key = ERR_PTR(-ENOKEY); - goto error; + rcu_read_unlock(); + return ERR_PTR(-ENOKEY); found: atomic_inc(&key->usage); - error: rcu_read_unlock(); - return key; + return make_key_ref(key, possessed); } /* end __keyring_search_one() */ @@ -603,7 +616,8 @@ struct key *find_keyring_by_name(const char *name, key_serial_t bound) if (strcmp(keyring->description, name) != 0) continue; - if (!key_permission(keyring, KEY_SEARCH)) + if (!key_permission(make_key_ref(keyring, 0), + KEY_SEARCH)) continue; /* found a potential candidate, but we still need to diff --git a/security/keys/permission.c b/security/keys/permission.c new file mode 100644 index 000000000000..03db073ba45c --- /dev/null +++ b/security/keys/permission.c @@ -0,0 +1,70 @@ +/* permission.c: key permission determination + * + * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include "internal.h" + +/*****************************************************************************/ +/* + * check to see whether permission is granted to use a key in the desired way, + * but permit the security modules to override + */ +int key_task_permission(const key_ref_t key_ref, + struct task_struct *context, + key_perm_t perm) +{ + struct key *key; + key_perm_t kperm; + int ret; + + key = key_ref_to_ptr(key_ref); + + /* use the second 8-bits of permissions for keys the caller owns */ + if (key->uid == context->fsuid) { + kperm = key->perm >> 16; + goto use_these_perms; + } + + /* use the third 8-bits of permissions for keys the caller has a group + * membership in common with */ + if (key->gid != -1 && key->perm & KEY_GRP_ALL) { + if (key->gid == context->fsgid) { + kperm = key->perm >> 8; + goto use_these_perms; + } + + task_lock(context); + ret = groups_search(context->group_info, key->gid); + task_unlock(context); + + if (ret) { + kperm = key->perm >> 8; + goto use_these_perms; + } + } + + /* otherwise use the least-significant 8-bits */ + kperm = key->perm; + +use_these_perms: + /* use the top 8-bits of permissions for keys the caller possesses + * - possessor permissions are additive with other permissions + */ + if (is_key_possessed(key_ref)) + kperm |= key->perm >> 24; + + kperm = kperm & perm & KEY_ALL; + + return kperm == perm; + +} /* end key_task_permission() */ + +EXPORT_SYMBOL(key_task_permission); diff --git a/security/keys/proc.c b/security/keys/proc.c index c55cf1fd0826..12b750e51fbf 100644 --- a/security/keys/proc.c +++ b/security/keys/proc.c @@ -167,7 +167,7 @@ static int proc_keys_show(struct seq_file *m, void *v) #define showflag(KEY, LETTER, FLAG) \ (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-') - seq_printf(m, "%08x %c%c%c%c%c%c %5d %4s %06x %5d %5d %-9.9s ", + seq_printf(m, "%08x %c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ", key->serial, showflag(key, 'I', KEY_FLAG_INSTANTIATED), showflag(key, 'R', KEY_FLAG_REVOKED), diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index c089f78fb94e..d42d2158ce13 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -39,7 +39,7 @@ struct key root_user_keyring = { .type = &key_type_keyring, .user = &root_key_user, .sem = __RWSEM_INITIALIZER(root_user_keyring.sem), - .perm = KEY_USR_ALL, + .perm = KEY_POS_ALL | KEY_USR_ALL, .flags = 1 << KEY_FLAG_INSTANTIATED, .description = "_uid.0", #ifdef KEY_DEBUGGING @@ -54,7 +54,7 @@ struct key root_session_keyring = { .type = &key_type_keyring, .user = &root_key_user, .sem = __RWSEM_INITIALIZER(root_session_keyring.sem), - .perm = KEY_USR_ALL, + .perm = KEY_POS_ALL | KEY_USR_ALL, .flags = 1 << KEY_FLAG_INSTANTIATED, .description = "_uid_ses.0", #ifdef KEY_DEBUGGING @@ -98,7 +98,7 @@ int alloc_uid_keyring(struct user_struct *user) user->session_keyring = session_keyring; ret = 0; - error: +error: return ret; } /* end alloc_uid_keyring() */ @@ -156,7 +156,7 @@ int install_thread_keyring(struct task_struct *tsk) ret = 0; key_put(old); - error: +error: return ret; } /* end install_thread_keyring() */ @@ -193,7 +193,7 @@ int install_process_keyring(struct task_struct *tsk) } ret = 0; - error: +error: return ret; } /* end install_process_keyring() */ @@ -236,7 +236,7 @@ static int install_session_keyring(struct task_struct *tsk, /* we're using RCU on the pointer */ synchronize_rcu(); key_put(old); - error: +error: return ret; } /* end install_session_keyring() */ @@ -376,13 +376,13 @@ void key_fsgid_changed(struct task_struct *tsk) * - we return -EAGAIN if we didn't find any matching key * - we return -ENOKEY if we found only negative matching keys */ -struct key *search_process_keyrings(struct key_type *type, - const void *description, - key_match_func_t match, - struct task_struct *context) +key_ref_t search_process_keyrings(struct key_type *type, + const void *description, + key_match_func_t match, + struct task_struct *context) { struct request_key_auth *rka; - struct key *key, *ret, *err, *instkey; + key_ref_t key_ref, ret, err, instkey_ref; /* we want to return -EAGAIN or -ENOKEY if any of the keyrings were * searchable, but we failed to find a key or we found a negative key; @@ -391,46 +391,48 @@ struct key *search_process_keyrings(struct key_type *type, * * in terms of priority: success > -ENOKEY > -EAGAIN > other error */ - key = NULL; + key_ref = NULL; ret = NULL; err = ERR_PTR(-EAGAIN); /* search the thread keyring first */ if (context->thread_keyring) { - key = keyring_search_aux(context->thread_keyring, - context, type, description, match); - if (!IS_ERR(key)) + key_ref = keyring_search_aux( + make_key_ref(context->thread_keyring, 1), + context, type, description, match); + if (!IS_ERR(key_ref)) goto found; - switch (PTR_ERR(key)) { + switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ if (ret) break; case -ENOKEY: /* negative key */ - ret = key; + ret = key_ref; break; default: - err = key; + err = key_ref; break; } } /* search the process keyring second */ if (context->signal->process_keyring) { - key = keyring_search_aux(context->signal->process_keyring, - context, type, description, match); - if (!IS_ERR(key)) + key_ref = keyring_search_aux( + make_key_ref(context->signal->process_keyring, 1), + context, type, description, match); + if (!IS_ERR(key_ref)) goto found; - switch (PTR_ERR(key)) { + switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ if (ret) break; case -ENOKEY: /* negative key */ - ret = key; + ret = key_ref; break; default: - err = key; + err = key_ref; break; } } @@ -438,23 +440,25 @@ struct key *search_process_keyrings(struct key_type *type, /* search the session keyring */ if (context->signal->session_keyring) { rcu_read_lock(); - key = keyring_search_aux( - rcu_dereference(context->signal->session_keyring), + key_ref = keyring_search_aux( + make_key_ref(rcu_dereference( + context->signal->session_keyring), + 1), context, type, description, match); rcu_read_unlock(); - if (!IS_ERR(key)) + if (!IS_ERR(key_ref)) goto found; - switch (PTR_ERR(key)) { + switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ if (ret) break; case -ENOKEY: /* negative key */ - ret = key; + ret = key_ref; break; default: - err = key; + err = key_ref; break; } @@ -465,51 +469,54 @@ struct key *search_process_keyrings(struct key_type *type, goto no_key; rcu_read_lock(); - instkey = __keyring_search_one( - rcu_dereference(context->signal->session_keyring), + instkey_ref = __keyring_search_one( + make_key_ref(rcu_dereference( + context->signal->session_keyring), + 1), &key_type_request_key_auth, NULL, 0); rcu_read_unlock(); - if (IS_ERR(instkey)) + if (IS_ERR(instkey_ref)) goto no_key; - rka = instkey->payload.data; + rka = key_ref_to_ptr(instkey_ref)->payload.data; - key = search_process_keyrings(type, description, match, - rka->context); - key_put(instkey); + key_ref = search_process_keyrings(type, description, match, + rka->context); + key_ref_put(instkey_ref); - if (!IS_ERR(key)) + if (!IS_ERR(key_ref)) goto found; - switch (PTR_ERR(key)) { + switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ if (ret) break; case -ENOKEY: /* negative key */ - ret = key; + ret = key_ref; break; default: - err = key; + err = key_ref; break; } } /* or search the user-session keyring */ else { - key = keyring_search_aux(context->user->session_keyring, - context, type, description, match); - if (!IS_ERR(key)) + key_ref = keyring_search_aux( + make_key_ref(context->user->session_keyring, 1), + context, type, description, match); + if (!IS_ERR(key_ref)) goto found; - switch (PTR_ERR(key)) { + switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ if (ret) break; case -ENOKEY: /* negative key */ - ret = key; + ret = key_ref; break; default: - err = key; + err = key_ref; break; } } @@ -517,29 +524,40 @@ struct key *search_process_keyrings(struct key_type *type, no_key: /* no key - decide on the error we're going to go for */ - key = ret ? ret : err; + key_ref = ret ? ret : err; found: - return key; + return key_ref; } /* end search_process_keyrings() */ +/*****************************************************************************/ +/* + * see if the key we're looking at is the target key + */ +static int lookup_user_key_possessed(const struct key *key, const void *target) +{ + return key == target; + +} /* end lookup_user_key_possessed() */ + /*****************************************************************************/ /* * lookup a key given a key ID from userspace with a given permissions mask * - don't create special keyrings unless so requested * - partially constructed keys aren't found unless requested */ -struct key *lookup_user_key(struct task_struct *context, key_serial_t id, - int create, int partial, key_perm_t perm) +key_ref_t lookup_user_key(struct task_struct *context, key_serial_t id, + int create, int partial, key_perm_t perm) { + key_ref_t key_ref, skey_ref; struct key *key; int ret; if (!context) context = current; - key = ERR_PTR(-ENOKEY); + key_ref = ERR_PTR(-ENOKEY); switch (id) { case KEY_SPEC_THREAD_KEYRING: @@ -556,6 +574,7 @@ struct key *lookup_user_key(struct task_struct *context, key_serial_t id, key = context->thread_keyring; atomic_inc(&key->usage); + key_ref = make_key_ref(key, 1); break; case KEY_SPEC_PROCESS_KEYRING: @@ -572,6 +591,7 @@ struct key *lookup_user_key(struct task_struct *context, key_serial_t id, key = context->signal->process_keyring; atomic_inc(&key->usage); + key_ref = make_key_ref(key, 1); break; case KEY_SPEC_SESSION_KEYRING: @@ -579,7 +599,7 @@ struct key *lookup_user_key(struct task_struct *context, key_serial_t id, /* always install a session keyring upon access if one * doesn't exist yet */ ret = install_session_keyring( - context, context->user->session_keyring); + context, context->user->session_keyring); if (ret < 0) goto error; } @@ -588,16 +608,19 @@ struct key *lookup_user_key(struct task_struct *context, key_serial_t id, key = rcu_dereference(context->signal->session_keyring); atomic_inc(&key->usage); rcu_read_unlock(); + key_ref = make_key_ref(key, 1); break; case KEY_SPEC_USER_KEYRING: key = context->user->uid_keyring; atomic_inc(&key->usage); + key_ref = make_key_ref(key, 1); break; case KEY_SPEC_USER_SESSION_KEYRING: key = context->user->session_keyring; atomic_inc(&key->usage); + key_ref = make_key_ref(key, 1); break; case KEY_SPEC_GROUP_KEYRING: @@ -606,13 +629,28 @@ struct key *lookup_user_key(struct task_struct *context, key_serial_t id, goto error; default: - key = ERR_PTR(-EINVAL); + key_ref = ERR_PTR(-EINVAL); if (id < 1) goto error; key = key_lookup(id); - if (IS_ERR(key)) + if (IS_ERR(key)) { + key_ref = ERR_PTR(PTR_ERR(key)); goto error; + } + + key_ref = make_key_ref(key, 0); + + /* check to see if we possess the key */ + skey_ref = search_process_keyrings(key->type, key, + lookup_user_key_possessed, + current); + + if (!IS_ERR(skey_ref)) { + key_put(key); + key_ref = skey_ref; + } + break; } @@ -630,15 +668,15 @@ struct key *lookup_user_key(struct task_struct *context, key_serial_t id, /* check the permissions */ ret = -EACCES; - if (!key_task_permission(key, context, perm)) + if (!key_task_permission(key_ref, context, perm)) goto invalid_key; - error: - return key; +error: + return key_ref; - invalid_key: - key_put(key); - key = ERR_PTR(ret); +invalid_key: + key_ref_put(key_ref); + key_ref = ERR_PTR(ret); goto error; } /* end lookup_user_key() */ @@ -694,9 +732,9 @@ long join_session_keyring(const char *name) ret = keyring->serial; key_put(keyring); - error2: +error2: up(&key_session_sem); - error: +error: return ret; } /* end join_session_keyring() */ diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 90c1506d007c..5cc4bba70db6 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c @@ -7,6 +7,8 @@ * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. + * + * See Documentation/keys-request-key.txt */ #include @@ -129,7 +131,7 @@ static struct key *__request_key_construction(struct key_type *type, /* create a key and add it to the queue */ key = key_alloc(type, description, - current->fsuid, current->fsgid, KEY_USR_ALL, 0); + current->fsuid, current->fsgid, KEY_POS_ALL, 0); if (IS_ERR(key)) goto alloc_failed; @@ -365,14 +367,24 @@ struct key *request_key_and_link(struct key_type *type, { struct key_user *user; struct key *key; + key_ref_t key_ref; kenter("%s,%s,%s,%p", type->name, description, callout_info, dest_keyring); /* search all the process keyrings for a key */ - key = search_process_keyrings(type, description, type->match, current); + key_ref = search_process_keyrings(type, description, type->match, + current); - if (PTR_ERR(key) == -EAGAIN) { + kdebug("search 1: %p", key_ref); + + if (!IS_ERR(key_ref)) { + key = key_ref_to_ptr(key_ref); + } + else if (PTR_ERR(key_ref) != -EAGAIN) { + key = ERR_PTR(PTR_ERR(key_ref)); + } + else { /* the search failed, but the keyrings were searchable, so we * should consult userspace if we can */ key = ERR_PTR(-ENOKEY); @@ -384,7 +396,7 @@ struct key *request_key_and_link(struct key_type *type, if (!user) goto nomem; - do { + for (;;) { if (signal_pending(current)) goto interrupted; @@ -397,10 +409,22 @@ struct key *request_key_and_link(struct key_type *type, /* someone else made the key we want, so we need to * search again as it might now be available to us */ - key = search_process_keyrings(type, description, - type->match, current); + key_ref = search_process_keyrings(type, description, + type->match, + current); - } while (PTR_ERR(key) == -EAGAIN); + kdebug("search 2: %p", key_ref); + + if (!IS_ERR(key_ref)) { + key = key_ref_to_ptr(key_ref); + break; + } + + if (PTR_ERR(key_ref) != -EAGAIN) { + key = ERR_PTR(PTR_ERR(key_ref)); + break; + } + } key_user_put(user); diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index f22264632229..a8e4069d48cb 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c @@ -7,6 +7,8 @@ * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. + * + * See Documentation/keys-request-key.txt */ #include @@ -96,6 +98,7 @@ static void request_key_auth_destroy(struct key *key) kenter("{%d}", key->serial); key_put(rka->target_key); + kfree(rka); } /* end request_key_auth_destroy() */ @@ -126,7 +129,7 @@ struct key *request_key_auth_new(struct key *target, struct key **_rkakey) rkakey = key_alloc(&key_type_request_key_auth, desc, current->fsuid, current->fsgid, - KEY_USR_VIEW, 1); + KEY_POS_VIEW | KEY_USR_VIEW, 1); if (IS_ERR(rkakey)) { key_put(keyring); kleave("= %ld", PTR_ERR(rkakey)); diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 6e4937fe062b..447a1e0f48cb 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -262,7 +262,7 @@ static void superblock_free_security(struct super_block *sb) } #ifdef CONFIG_SECURITY_NETWORK -static int sk_alloc_security(struct sock *sk, int family, int priority) +static int sk_alloc_security(struct sock *sk, int family, gfp_t priority) { struct sk_security_struct *ssec; @@ -630,6 +630,16 @@ static inline u16 inode_mode_to_security_class(umode_t mode) return SECCLASS_FILE; } +static inline int default_protocol_stream(int protocol) +{ + return (protocol == IPPROTO_IP || protocol == IPPROTO_TCP); +} + +static inline int default_protocol_dgram(int protocol) +{ + return (protocol == IPPROTO_IP || protocol == IPPROTO_UDP); +} + static inline u16 socket_type_to_security_class(int family, int type, int protocol) { switch (family) { @@ -646,10 +656,16 @@ static inline u16 socket_type_to_security_class(int family, int type, int protoc case PF_INET6: switch (type) { case SOCK_STREAM: - return SECCLASS_TCP_SOCKET; + if (default_protocol_stream(protocol)) + return SECCLASS_TCP_SOCKET; + else + return SECCLASS_RAWIP_SOCKET; case SOCK_DGRAM: - return SECCLASS_UDP_SOCKET; - case SOCK_RAW: + if (default_protocol_dgram(protocol)) + return SECCLASS_UDP_SOCKET; + else + return SECCLASS_RAWIP_SOCKET; + default: return SECCLASS_RAWIP_SOCKET; } break; @@ -2970,6 +2986,8 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in /* * If PF_INET or PF_INET6, check name_bind permission for the port. + * Multiple address binding for SCTP is not supported yet: we just + * check the first address now. */ family = sock->sk->sk_family; if (family == PF_INET || family == PF_INET6) { @@ -3014,12 +3032,12 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in goto out; } - switch(sk->sk_protocol) { - case IPPROTO_TCP: + switch(isec->sclass) { + case SECCLASS_TCP_SOCKET: node_perm = TCP_SOCKET__NODE_BIND; break; - case IPPROTO_UDP: + case SECCLASS_UDP_SOCKET: node_perm = UDP_SOCKET__NODE_BIND; break; @@ -3362,7 +3380,7 @@ out: return err; } -static int selinux_sk_alloc_security(struct sock *sk, int family, int priority) +static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority) { return sk_alloc_security(sk, family, priority); } diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 8eb140dd2e4b..a45cc971e735 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -879,7 +879,7 @@ static ssize_t sel_commit_bools_write(struct file *filep, if (sscanf(page, "%d", &new_value) != 1) goto out; - if (new_value) { + if (new_value && bool_pending_values) { security_set_bools(bool_num, bool_pending_values); } @@ -952,6 +952,7 @@ static int sel_make_bools(void) /* remove any existing files */ kfree(bool_pending_values); + bool_pending_values = NULL; sel_remove_bools(dir); @@ -1002,6 +1003,7 @@ out: } return ret; err: + kfree(values); d_genocide(dir); ret = -ENOMEM; goto out; diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index 0a758323a9cf..8e6262d12aa9 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -650,8 +650,10 @@ void policydb_destroy(struct policydb *p) } if (lrt) kfree(lrt); - for (i = 0; i < p->p_types.nprim; i++) - ebitmap_destroy(&p->type_attr_map[i]); + if (p->type_attr_map) { + for (i = 0; i < p->p_types.nprim; i++) + ebitmap_destroy(&p->type_attr_map[i]); + } kfree(p->type_attr_map); return; diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c index 29450befb5da..38b20efc9c0b 100644 --- a/sound/arm/pxa2xx-ac97.c +++ b/sound/arm/pxa2xx-ac97.c @@ -245,7 +245,7 @@ static pxa2xx_pcm_client_t pxa2xx_ac97_pcm_client = { #ifdef CONFIG_PM -static int pxa2xx_ac97_do_suspend(snd_card_t *card, unsigned int state) +static int pxa2xx_ac97_do_suspend(snd_card_t *card, pm_message_t state) { if (card->power_state != SNDRV_CTL_POWER_D3cold) { pxa2xx_audio_ops_t *platform_ops = card->dev->platform_data; diff --git a/sound/core/init.c b/sound/core/init.c index a5702014a704..c72a79115cca 100644 --- a/sound/core/init.c +++ b/sound/core/init.c @@ -828,7 +828,8 @@ static int snd_generic_suspend(struct device *dev, pm_message_t state, u32 level card = get_snd_generic_card(dev); if (card->power_state == SNDRV_CTL_POWER_D3hot) return 0; - card->pm_suspend(card, PMSG_SUSPEND); + if (card->pm_suspend) + card->pm_suspend(card, PMSG_SUSPEND); snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); return 0; } @@ -843,7 +844,8 @@ static int snd_generic_resume(struct device *dev, u32 level) card = get_snd_generic_card(dev); if (card->power_state == SNDRV_CTL_POWER_D0) return 0; - card->pm_resume(card); + if (card->pm_suspend) + card->pm_resume(card); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index 91124ddbdda9..129abab5ce98 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c @@ -106,7 +106,7 @@ struct snd_mem_list { static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - unsigned int __nocast flags) + gfp_t flags) { void *ret; u64 dma_mask, coherent_dma_mask; @@ -190,7 +190,7 @@ static void unmark_pages(struct page *page, int order) * * Returns the pointer of the buffer, or NULL if no enoguh memory. */ -void *snd_malloc_pages(size_t size, unsigned int gfp_flags) +void *snd_malloc_pages(size_t size, gfp_t gfp_flags) { int pg; void *res; @@ -235,7 +235,7 @@ static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *d { int pg; void *res; - unsigned int gfp_flags; + gfp_t gfp_flags; snd_assert(size > 0, return NULL); snd_assert(dma != NULL, return NULL); diff --git a/sound/core/memory.c b/sound/core/memory.c index 8fa888fc53a0..7d8e2eebba51 100644 --- a/sound/core/memory.c +++ b/sound/core/memory.c @@ -89,7 +89,7 @@ void snd_memory_done(void) } } -static void *__snd_kmalloc(size_t size, unsigned int __nocast flags, void *caller) +static void *__snd_kmalloc(size_t size, gfp_t flags, void *caller) { unsigned long cpu_flags; struct snd_alloc_track *t; @@ -111,12 +111,12 @@ static void *__snd_kmalloc(size_t size, unsigned int __nocast flags, void *calle } #define _snd_kmalloc(size, flags) __snd_kmalloc((size), (flags), __builtin_return_address(0)); -void *snd_hidden_kmalloc(size_t size, unsigned int __nocast flags) +void *snd_hidden_kmalloc(size_t size, gfp_t flags) { return _snd_kmalloc(size, flags); } -void *snd_hidden_kzalloc(size_t size, unsigned int __nocast flags) +void *snd_hidden_kzalloc(size_t size, gfp_t flags) { void *ret = _snd_kmalloc(size, flags); if (ret) @@ -125,7 +125,7 @@ void *snd_hidden_kzalloc(size_t size, unsigned int __nocast flags) } EXPORT_SYMBOL(snd_hidden_kzalloc); -void *snd_hidden_kcalloc(size_t n, size_t size, unsigned int __nocast flags) +void *snd_hidden_kcalloc(size_t n, size_t size, gfp_t flags) { void *ret = NULL; if (n != 0 && size > INT_MAX / n) @@ -190,7 +190,7 @@ void snd_hidden_vfree(void *obj) snd_wrapper_vfree(obj); } -char *snd_hidden_kstrdup(const char *s, unsigned int __nocast flags) +char *snd_hidden_kstrdup(const char *s, gfp_t flags) { int len; char *buf; diff --git a/sound/core/seq/instr/ainstr_gf1.c b/sound/core/seq/instr/ainstr_gf1.c index 207c2c54bf1d..0e4df8826eed 100644 --- a/sound/core/seq/instr/ainstr_gf1.c +++ b/sound/core/seq/instr/ainstr_gf1.c @@ -51,7 +51,7 @@ static int snd_seq_gf1_copy_wave_from_stream(snd_gf1_ops_t *ops, gf1_wave_t *wp, *prev; gf1_xwave_t xp; int err; - unsigned int gfp_mask; + gfp_t gfp_mask; unsigned int real_size; gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL; @@ -144,7 +144,8 @@ static int snd_seq_gf1_put(void *private_data, snd_seq_kinstr_t *instr, snd_gf1_ops_t *ops = (snd_gf1_ops_t *)private_data; gf1_instrument_t *ip; gf1_xinstrument_t ix; - int err, gfp_mask; + int err; + gfp_t gfp_mask; if (cmd != SNDRV_SEQ_INSTR_PUT_CMD_CREATE) return -EINVAL; diff --git a/sound/core/seq/instr/ainstr_iw.c b/sound/core/seq/instr/ainstr_iw.c index b3cee092b1a4..7c19fbbc5d0f 100644 --- a/sound/core/seq/instr/ainstr_iw.c +++ b/sound/core/seq/instr/ainstr_iw.c @@ -58,7 +58,7 @@ static int snd_seq_iwffff_copy_env_from_stream(__u32 req_stype, iwffff_xenv_t *ex, char __user **data, long *len, - unsigned int __nocast gfp_mask) + gfp_t gfp_mask) { __u32 stype; iwffff_env_record_t *rp, *rp_last; @@ -129,7 +129,7 @@ static int snd_seq_iwffff_copy_wave_from_stream(snd_iwffff_ops_t *ops, iwffff_wave_t *wp, *prev; iwffff_xwave_t xp; int err; - unsigned int gfp_mask; + gfp_t gfp_mask; unsigned int real_size; gfp_mask = atomic ? GFP_ATOMIC : GFP_KERNEL; @@ -236,7 +236,7 @@ static int snd_seq_iwffff_put(void *private_data, snd_seq_kinstr_t *instr, iwffff_layer_t *lp, *prev_lp; iwffff_xlayer_t lx; int err; - unsigned int gfp_mask; + gfp_t gfp_mask; if (cmd != SNDRV_SEQ_INSTR_PUT_CMD_CREATE) return -EINVAL; diff --git a/sound/core/seq/instr/ainstr_simple.c b/sound/core/seq/instr/ainstr_simple.c index 6183d2151034..17ab94e76073 100644 --- a/sound/core/seq/instr/ainstr_simple.c +++ b/sound/core/seq/instr/ainstr_simple.c @@ -57,7 +57,8 @@ static int snd_seq_simple_put(void *private_data, snd_seq_kinstr_t *instr, snd_simple_ops_t *ops = (snd_simple_ops_t *)private_data; simple_instrument_t *ip; simple_xinstrument_t ix; - int err, gfp_mask; + int err; + gfp_t gfp_mask; unsigned int real_size; if (cmd != SNDRV_SEQ_INSTR_PUT_CMD_CREATE) diff --git a/sound/core/wrappers.c b/sound/core/wrappers.c index 508e6d67ee19..296b716f1376 100644 --- a/sound/core/wrappers.c +++ b/sound/core/wrappers.c @@ -27,7 +27,7 @@ #include #ifdef CONFIG_SND_DEBUG_MEMORY -void *snd_wrapper_kmalloc(size_t size, unsigned int __nocast flags) +void *snd_wrapper_kmalloc(size_t size, gfp_t flags) { return kmalloc(size, flags); } diff --git a/sound/isa/opl3sa2.c b/sound/isa/opl3sa2.c index e2d2babcd20b..4ba268f251e3 100644 --- a/sound/isa/opl3sa2.c +++ b/sound/isa/opl3sa2.c @@ -914,6 +914,7 @@ static int __init alsa_card_opl3sa2_init(void) #endif #ifdef CONFIG_PNP pnp_unregister_card_driver(&opl3sa2_pnpc_driver); + pnp_unregister_driver(&opl3sa2_pnp_driver); #endif return -ENODEV; } @@ -927,6 +928,7 @@ static void __exit alsa_card_opl3sa2_exit(void) #ifdef CONFIG_PNP /* PnP cards first */ pnp_unregister_card_driver(&opl3sa2_pnpc_driver); + pnp_unregister_driver(&opl3sa2_pnp_driver); #endif for (idx = 0; idx < SNDRV_CARDS; idx++) snd_card_free(snd_opl3sa2_legacy[idx]); diff --git a/sound/oss/au1000.c b/sound/oss/au1000.c index 4491733c9e4e..2c2ae2ee01ac 100644 --- a/sound/oss/au1000.c +++ b/sound/oss/au1000.c @@ -1295,7 +1295,7 @@ static int au1000_mmap(struct file *file, struct vm_area_struct *vma) unsigned long size; int ret = 0; - dbg(__FUNCTION__); + dbg("%s", __FUNCTION__); lock_kernel(); down(&s->sem); diff --git a/sound/oss/dmasound/dmasound.h b/sound/oss/dmasound/dmasound.h index 9a2f50f0b184..222014cafc1a 100644 --- a/sound/oss/dmasound/dmasound.h +++ b/sound/oss/dmasound/dmasound.h @@ -116,7 +116,7 @@ typedef struct { const char *name; const char *name2; struct module *owner; - void *(*dma_alloc)(unsigned int, int); + void *(*dma_alloc)(unsigned int, gfp_t); void (*dma_free)(void *, unsigned int); int (*irqinit)(void); #ifdef MODULE diff --git a/sound/oss/dmasound/dmasound_atari.c b/sound/oss/dmasound/dmasound_atari.c index 8daaf87664ba..59eb53f89318 100644 --- a/sound/oss/dmasound/dmasound_atari.c +++ b/sound/oss/dmasound/dmasound_atari.c @@ -114,7 +114,7 @@ static ssize_t ata_ctx_u16le(const u_char *userPtr, size_t userCount, /*** Low level stuff *********************************************************/ -static void *AtaAlloc(unsigned int size, int flags); +static void *AtaAlloc(unsigned int size, gfp_t flags); static void AtaFree(void *, unsigned int size); static int AtaIrqInit(void); #ifdef MODULE @@ -810,7 +810,7 @@ static TRANS transFalconExpanding = { * Atari (TT/Falcon) */ -static void *AtaAlloc(unsigned int size, int flags) +static void *AtaAlloc(unsigned int size, gfp_t flags) { return atari_stram_alloc(size, "dmasound"); } diff --git a/sound/oss/dmasound/dmasound_awacs.c b/sound/oss/dmasound/dmasound_awacs.c index 2ceb46f1d40f..b2bf8bac842d 100644 --- a/sound/oss/dmasound/dmasound_awacs.c +++ b/sound/oss/dmasound/dmasound_awacs.c @@ -271,7 +271,7 @@ int expand_read_bal; /* Balance factor for expanding reads (not volume!) */ /*** Low level stuff *********************************************************/ -static void *PMacAlloc(unsigned int size, int flags); +static void *PMacAlloc(unsigned int size, gfp_t flags); static void PMacFree(void *ptr, unsigned int size); static int PMacIrqInit(void); #ifdef MODULE @@ -614,7 +614,7 @@ tas_init_frame_rates(unsigned int *prop, unsigned int l) /* * PCI PowerMac, with AWACS, Screamer, Burgundy, DACA or Tumbler and DBDMA. */ -static void *PMacAlloc(unsigned int size, int flags) +static void *PMacAlloc(unsigned int size, gfp_t flags) { return kmalloc(size, flags); } diff --git a/sound/oss/dmasound/dmasound_paula.c b/sound/oss/dmasound/dmasound_paula.c index 558db5311e06..d59f60b26410 100644 --- a/sound/oss/dmasound/dmasound_paula.c +++ b/sound/oss/dmasound/dmasound_paula.c @@ -69,7 +69,7 @@ static int write_sq_block_size_half, write_sq_block_size_quarter; /*** Low level stuff *********************************************************/ -static void *AmiAlloc(unsigned int size, int flags); +static void *AmiAlloc(unsigned int size, gfp_t flags); static void AmiFree(void *obj, unsigned int size); static int AmiIrqInit(void); #ifdef MODULE @@ -317,7 +317,7 @@ static inline void StopDMA(void) enable_heartbeat(); } -static void *AmiAlloc(unsigned int size, int flags) +static void *AmiAlloc(unsigned int size, gfp_t flags) { return amiga_chip_alloc((long)size, "dmasound [Paula]"); } diff --git a/sound/oss/dmasound/dmasound_q40.c b/sound/oss/dmasound/dmasound_q40.c index 92c25a0174db..1ddaa6284b08 100644 --- a/sound/oss/dmasound/dmasound_q40.c +++ b/sound/oss/dmasound/dmasound_q40.c @@ -36,7 +36,7 @@ static int expand_data; /* Data for expanding */ /*** Low level stuff *********************************************************/ -static void *Q40Alloc(unsigned int size, int flags); +static void *Q40Alloc(unsigned int size, gfp_t flags); static void Q40Free(void *, unsigned int); static int Q40IrqInit(void); #ifdef MODULE @@ -358,7 +358,7 @@ static TRANS transQ40Compressing = { /*** Low level stuff *********************************************************/ -static void *Q40Alloc(unsigned int size, int flags) +static void *Q40Alloc(unsigned int size, gfp_t flags) { return kmalloc(size, flags); /* change to vmalloc */ } diff --git a/sound/oss/ite8172.c b/sound/oss/ite8172.c index 58f879fda975..26e5944b6ba8 100644 --- a/sound/oss/ite8172.c +++ b/sound/oss/ite8172.c @@ -1859,7 +1859,7 @@ static int it8172_release(struct inode *inode, struct file *file) struct it8172_state *s = (struct it8172_state *)file->private_data; #ifdef IT8172_VERBOSE_DEBUG - dbg(__FUNCTION__); + dbg("%s", __FUNCTION__); #endif lock_kernel(); if (file->f_mode & FMODE_WRITE) diff --git a/sound/pci/ac97/ac97_bus.c b/sound/pci/ac97/ac97_bus.c index 227f8b9f67ce..becbc420ba41 100644 --- a/sound/pci/ac97/ac97_bus.c +++ b/sound/pci/ac97/ac97_bus.c @@ -17,25 +17,21 @@ #include /* - * Codec families have names seperated by commas, so we search for an - * individual codec name within the family string. + * Let drivers decide whether they want to support given codec from their + * probe method. Drivers have direct access to the ac97_t structure and may + * decide based on the id field amongst other things. */ static int ac97_bus_match(struct device *dev, struct device_driver *drv) { - return (strstr(dev->bus_id, drv->name) != NULL); + return 1; } static int ac97_bus_suspend(struct device *dev, pm_message_t state) { int ret = 0; - if (dev->driver && dev->driver->suspend) { - ret = dev->driver->suspend(dev, state, SUSPEND_DISABLE); - if (ret == 0) - ret = dev->driver->suspend(dev, state, SUSPEND_SAVE_STATE); - if (ret == 0) - ret = dev->driver->suspend(dev, state, SUSPEND_POWER_DOWN); - } + if (dev->driver && dev->driver->suspend) + ret = dev->driver->suspend(dev, state, SUSPEND_POWER_DOWN); return ret; } @@ -43,13 +39,8 @@ static int ac97_bus_resume(struct device *dev) { int ret = 0; - if (dev->driver && dev->driver->resume) { + if (dev->driver && dev->driver->resume) ret = dev->driver->resume(dev, RESUME_POWER_ON); - if (ret == 0) - ret = dev->driver->resume(dev, RESUME_RESTORE_STATE); - if (ret == 0) - ret = dev->driver->resume(dev, RESUME_ENABLE); - } return ret; } diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c index e64cb07a39c2..41fc290149ed 100644 --- a/sound/pci/ac97/ac97_codec.c +++ b/sound/pci/ac97/ac97_codec.c @@ -1557,7 +1557,7 @@ static int snd_ac97_modem_build(snd_card_t * card, ac97_t * ac97) /* build modem switches */ for (idx = 0; idx < ARRAY_SIZE(snd_ac97_controls_modem_switches); idx++) - if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_modem_switches[idx], ac97))) < 0) + if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_ac97_controls_modem_switches[idx], ac97))) < 0) return err; /* build chip specific controls */ @@ -1828,7 +1828,6 @@ static int snd_ac97_dev_register(snd_device_t *device) ac97->dev.bus = &ac97_bus_type; ac97->dev.parent = ac97->bus->card->dev; - ac97->dev.platform_data = ac97; ac97->dev.release = ac97_device_release; snprintf(ac97->dev.bus_id, BUS_ID_SIZE, "card%d-%d", ac97->bus->card->number, ac97->num); if ((err = device_register(&ac97->dev)) < 0) { diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c index 045ddc743edc..0238cc65d32a 100644 --- a/sound/pci/ac97/ac97_patch.c +++ b/sound/pci/ac97/ac97_patch.c @@ -2752,7 +2752,11 @@ AC97_DOUBLE("Modem Speaker Volume", 0x5c, 14, 12, 3, 1) static int patch_si3036_specific(ac97_t * ac97) { - return patch_build_controls(ac97, snd_ac97_controls_si3036, ARRAY_SIZE(snd_ac97_controls_si3036)); + int idx, err; + for (idx = 0; idx < ARRAY_SIZE(snd_ac97_controls_si3036); idx++) + if ((err = snd_ctl_add(ac97->bus->card, snd_ctl_new1(&snd_ac97_controls_si3036[idx], ac97))) < 0) + return err; + return 0; } static struct snd_ac97_build_ops patch_si3036_ops = { diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c index d683f7736a63..f35b558c29b2 100644 --- a/sound/pci/ali5451/ali5451.c +++ b/sound/pci/ali5451/ali5451.c @@ -1993,8 +1993,10 @@ static int __devinit snd_ali_mixer(ali_t * codec) if ((err = snd_ac97_mixer(codec->ac97_bus, &ac97, &codec->ac97[i])) < 0) { snd_printk("ali mixer %d creating error.\n", i); if(i == 0) - return err; - } + return err; + codec->num_of_codecs = 1; + break; + } } if (codec->spdif_support) { diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c index 8a59598167f9..c1a239a4dac6 100644 --- a/sound/pci/atiixp_modem.c +++ b/sound/pci/atiixp_modem.c @@ -405,7 +405,7 @@ static int snd_atiixp_acquire_codec(atiixp_t *chip) while (atiixp_read(chip, PHYS_OUT_ADDR) & ATI_REG_PHYS_OUT_ADDR_EN) { if (! timeout--) { - snd_printk(KERN_WARNING "atiixp: codec acquire timeout\n"); + snd_printk(KERN_WARNING "atiixp-modem: codec acquire timeout\n"); return -EBUSY; } udelay(1); @@ -436,7 +436,7 @@ static unsigned short snd_atiixp_codec_read(atiixp_t *chip, unsigned short codec } while (--timeout); /* time out may happen during reset */ if (reg < 0x7c) - snd_printk(KERN_WARNING "atiixp: codec read timeout (reg %x)\n", reg); + snd_printk(KERN_WARNING "atiixp-modem: codec read timeout (reg %x)\n", reg); return 0xffff; } @@ -498,7 +498,7 @@ static int snd_atiixp_aclink_reset(atiixp_t *chip) do_delay(); atiixp_update(chip, CMD, ATI_REG_CMD_AC_RESET, ATI_REG_CMD_AC_RESET); if (--timeout) { - snd_printk(KERN_ERR "atiixp: codec reset timeout\n"); + snd_printk(KERN_ERR "atiixp-modem: codec reset timeout\n"); break; } } @@ -552,7 +552,7 @@ static int snd_atiixp_codec_detect(atiixp_t *chip) atiixp_write(chip, IER, 0); /* disable irqs */ if ((chip->codec_not_ready_bits & ALL_CODEC_NOT_READY) == ALL_CODEC_NOT_READY) { - snd_printk(KERN_ERR "atiixp: no codec detected!\n"); + snd_printk(KERN_ERR "atiixp-modem: no codec detected!\n"); return -ENXIO; } return 0; @@ -635,7 +635,7 @@ static void snd_atiixp_xrun_dma(atiixp_t *chip, atiixp_dma_t *dma) { if (! dma->substream || ! dma->running) return; - snd_printdd("atiixp: XRUN detected (DMA %d)\n", dma->ops->type); + snd_printdd("atiixp-modem: XRUN detected (DMA %d)\n", dma->ops->type); snd_pcm_stop(dma->substream, SNDRV_PCM_STATE_XRUN); } @@ -1081,14 +1081,14 @@ static int __devinit snd_atiixp_mixer_new(atiixp_t *chip, int clock) ac97.scaps = AC97_SCAP_SKIP_AUDIO; if ((err = snd_ac97_mixer(pbus, &ac97, &chip->ac97[i])) < 0) { chip->ac97[i] = NULL; /* to be sure */ - snd_printdd("atiixp: codec %d not available for modem\n", i); + snd_printdd("atiixp-modem: codec %d not available for modem\n", i); continue; } codec_count++; } if (! codec_count) { - snd_printk(KERN_ERR "atiixp: no codec available\n"); + snd_printk(KERN_ERR "atiixp-modem: no codec available\n"); return -ENODEV; } @@ -1159,7 +1159,7 @@ static void __devinit snd_atiixp_proc_init(atiixp_t *chip) { snd_info_entry_t *entry; - if (! snd_card_proc_new(chip->card, "atiixp", &entry)) + if (! snd_card_proc_new(chip->card, "atiixp-modem", &entry)) snd_info_set_text_ops(entry, chip, 1024, snd_atiixp_proc_read); } diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c index e87e8427f25f..e9cd8e054f25 100644 --- a/sound/pci/emu10k1/emu10k1_main.c +++ b/sound/pci/emu10k1/emu10k1_main.c @@ -756,9 +756,12 @@ static emu_chip_details_t emu_chip_details[] = { .sblive51 = 1} , /* Tested by alsa bugtrack user "hus" bug #1297 12th Aug 2005 */ {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80611102, - .driver = "EMU10K1", .name = "SBLive! Platinum 5.1 [SB0060]", + .driver = "EMU10K1", .name = "SBLive 5.1 [SB0060]", .id = "Live", .emu10k1_chip = 1, + .ac97_chip = 2, /* ac97 is optional; both SBLive 5.1 and platinum + * share the same IDs! + */ .sblive51 = 1} , {.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80511102, .driver = "EMU10K1", .name = "SBLive! Value [CT4850]", diff --git a/sound/pci/emu10k1/emumixer.c b/sound/pci/emu10k1/emumixer.c index d71a72e84bcc..7cc831ccd0cb 100644 --- a/sound/pci/emu10k1/emumixer.c +++ b/sound/pci/emu10k1/emumixer.c @@ -810,8 +810,14 @@ int __devinit snd_emu10k1_mixer(emu10k1_t *emu, ac97.private_data = emu; ac97.private_free = snd_emu10k1_mixer_free_ac97; ac97.scaps = AC97_SCAP_NO_SPDIF; - if ((err = snd_ac97_mixer(pbus, &ac97, &emu->ac97)) < 0) - return err; + if ((err = snd_ac97_mixer(pbus, &ac97, &emu->ac97)) < 0) { + if (emu->card_capabilities->ac97_chip == 1) + return err; + snd_printd(KERN_INFO "emu10k1: AC97 is optional on this board\n"); + snd_printd(KERN_INFO" Proceeding without ac97 mixers...\n"); + snd_device_free(emu->card, pbus); + goto no_ac97; /* FIXME: get rid of ugly gotos.. */ + } if (emu->audigy) { /* set master volume to 0 dB */ snd_ac97_write(emu->ac97, AC97_MASTER, 0x0000); @@ -836,6 +842,7 @@ int __devinit snd_emu10k1_mixer(emu10k1_t *emu, for (; *c; c++) remove_ctl(card, *c); } else { + no_ac97: if (emu->card_capabilities->ecard) strcpy(emu->card->mixername, "EMU APS"); else if (emu->audigy) diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 5b829a1a4c60..d0eb9f2250aa 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -881,10 +881,8 @@ int snd_hda_parse_generic_codec(struct hda_codec *codec) struct hda_gspec *spec; int err; - if(!codec->afg) { - snd_printdd("hda_generic: no generic modem yet\n"); - return -ENODEV; - } + if(!codec->afg) + return 0; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) { diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 9590ece2099d..6fe696e53ea6 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1137,6 +1137,7 @@ static snd_pcm_uframes_t azx_pcm_pointer(snd_pcm_substream_t *substream) pos = azx_sd_readl(azx_dev, SD_LPIB); if (chip->position_fix == POS_FIX_FIFO) pos += azx_dev->fifo_size; +#if 0 /* disabled temprarily, auto-correction doesn't work well... */ else if (chip->position_fix == POS_FIX_AUTO && azx_dev->period_updating) { /* check the validity of DMA position */ unsigned int diff = 0; @@ -1157,6 +1158,10 @@ static snd_pcm_uframes_t azx_pcm_pointer(snd_pcm_substream_t *substream) } azx_dev->period_updating = 0; } +#else + else if (chip->position_fix == POS_FIX_AUTO) + pos += azx_dev->fifo_size; +#endif } if (pos >= azx_dev->bufsize) pos = 0; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 849b5b50c921..7327deb6df9f 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -1385,8 +1385,8 @@ static snd_kcontrol_new_t alc880_test_mixer[] = { HDA_CODEC_VOLUME("Side Playback Volume", 0x0f, 0x0, HDA_OUTPUT), ALC_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), ALC_BIND_MUTE("Surround Playback Switch", 0x0d, 2, HDA_INPUT), - ALC_BIND_MUTE("CLFE Playback Volume", 0x0e, 2, HDA_INPUT), - ALC_BIND_MUTE("Side Playback Volume", 0x0f, 2, HDA_INPUT), + ALC_BIND_MUTE("CLFE Playback Switch", 0x0e, 2, HDA_INPUT), + ALC_BIND_MUTE("Side Playback Switch", 0x0f, 2, HDA_INPUT), PIN_CTL_TEST("Front Pin Mode", 0x14), PIN_CTL_TEST("Surround Pin Mode", 0x15), PIN_CTL_TEST("CLFE Pin Mode", 0x16), @@ -1409,18 +1409,6 @@ static snd_kcontrol_new_t alc880_test_mixer[] = { HDA_CODEC_MUTE("In-4 Playback Switch", 0x0b, 0x3, HDA_INPUT), HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x4, HDA_INPUT), HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x4, HDA_INPUT), - HDA_CODEC_VOLUME("Capture Volume", 0x08, 0x0, HDA_INPUT), - HDA_CODEC_MUTE("Capture Switch", 0x08, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x09, 0x0, HDA_INPUT), - HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x09, 0x0, HDA_INPUT), - { - .iface = SNDRV_CTL_ELEM_IFACE_MIXER, - .name = "Input Source", - .count = 2, - .info = alc_mux_enum_info, - .get = alc_mux_enum_get, - .put = alc_mux_enum_put, - }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Channel Mode", @@ -2243,7 +2231,7 @@ static snd_kcontrol_new_t alc260_base_mixer[] = { HDA_CODEC_VOLUME("Headphone Playback Volume", 0x09, 0x0, HDA_OUTPUT), ALC_BIND_MUTE("Headphone Playback Switch", 0x09, 2, HDA_INPUT), HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x0a, 1, 0x0, HDA_OUTPUT), - ALC_BIND_MUTE_MONO("Mono Playback Switch", 0x0a, 1, 2, HDA_OUTPUT), + ALC_BIND_MUTE_MONO("Mono Playback Switch", 0x0a, 1, 2, HDA_INPUT), HDA_CODEC_VOLUME("Capture Volume", 0x04, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Capture Switch", 0x04, 0x0, HDA_INPUT), { @@ -2270,7 +2258,7 @@ static snd_kcontrol_new_t alc260_hp_mixer[] = { HDA_CODEC_VOLUME("Headphone Playback Volume", 0x09, 0x0, HDA_OUTPUT), ALC_BIND_MUTE("Headphone Playback Switch", 0x09, 2, HDA_INPUT), HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x0a, 1, 0x0, HDA_OUTPUT), - ALC_BIND_MUTE_MONO("Mono Playback Switch", 0x0a, 1, 2, HDA_OUTPUT), + ALC_BIND_MUTE_MONO("Mono Playback Switch", 0x0a, 1, 2, HDA_INPUT), HDA_CODEC_VOLUME("Capture Volume", 0x05, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Capture Switch", 0x05, 0x0, HDA_INPUT), { @@ -2501,7 +2489,7 @@ static snd_kcontrol_new_t alc882_base_mixer[] = { HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x0e, 1, 0x0, HDA_OUTPUT), HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x0e, 2, 0x0, HDA_OUTPUT), ALC_BIND_MUTE_MONO("Center Playback Switch", 0x0e, 1, 2, HDA_INPUT), - ALC_BIND_MUTE_MONO("LFE Playback Switch", 0x0e, 2, 2, HDA_OUTPUT), + ALC_BIND_MUTE_MONO("LFE Playback Switch", 0x0e, 2, 2, HDA_INPUT), HDA_CODEC_VOLUME("Side Playback Volume", 0x0f, 0x0, HDA_OUTPUT), ALC_BIND_MUTE("Side Playback Switch", 0x0f, 2, HDA_INPUT), HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT), diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c index 09f9cbe116a3..5561fd4091e8 100644 --- a/sound/pci/korg1212/korg1212.c +++ b/sound/pci/korg1212/korg1212.c @@ -442,7 +442,7 @@ static char* stateName[] = { "Setup for play", "Playing", "Monitor mode on", - "Calibrating" + "Calibrating", "Invalid" }; diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c index 6db7de6b9719..3c0205b91e10 100644 --- a/sound/pci/via82xx.c +++ b/sound/pci/via82xx.c @@ -2147,11 +2147,13 @@ static int __devinit check_dxs_list(struct pci_dev *pci) { .subvendor = 0x1019, .subdevice = 0x0996, .action = VIA_DXS_48K }, { .subvendor = 0x1019, .subdevice = 0x0a81, .action = VIA_DXS_NO_VRA }, /* ECS K7VTA3 v8.0 */ { .subvendor = 0x1019, .subdevice = 0x0a85, .action = VIA_DXS_NO_VRA }, /* ECS L7VMM2 */ + { .subvendor = 0x1019, .subdevice = 0xa101, .action = VIA_DXS_SRC }, { .subvendor = 0x1025, .subdevice = 0x0033, .action = VIA_DXS_NO_VRA }, /* Acer Inspire 1353LM */ { .subvendor = 0x1025, .subdevice = 0x0046, .action = VIA_DXS_SRC }, /* Acer Aspire 1524 WLMi */ { .subvendor = 0x1043, .subdevice = 0x8095, .action = VIA_DXS_NO_VRA }, /* ASUS A7V8X (FIXME: possibly VIA_DXS_ENABLE?)*/ { .subvendor = 0x1043, .subdevice = 0x80a1, .action = VIA_DXS_NO_VRA }, /* ASUS A7V8-X */ { .subvendor = 0x1043, .subdevice = 0x80b0, .action = VIA_DXS_NO_VRA }, /* ASUS A7V600 & K8V*/ + { .subvendor = 0x1043, .subdevice = 0x810d, .action = VIA_DXS_SRC }, /* ASUS */ { .subvendor = 0x1043, .subdevice = 0x812a, .action = VIA_DXS_SRC }, /* ASUS A8V Deluxe */ { .subvendor = 0x1071, .subdevice = 0x8375, .action = VIA_DXS_NO_VRA }, /* Vobis/Yakumo/Mitac notebook */ { .subvendor = 0x1071, .subdevice = 0x8399, .action = VIA_DXS_NO_VRA }, /* Umax AB 595T (VIA K8N800A - VT8237) */ diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c index e35b48d29c45..392b2abd9f13 100644 --- a/sound/ppc/pmac.c +++ b/sound/ppc/pmac.c @@ -988,6 +988,7 @@ static int __init snd_pmac_detect(pmac_t *chip) case 0x33: case 0x29: case 0x24: + case 0x50: case 0x5c: chip->num_freqs = ARRAY_SIZE(tumbler_freqs); chip->model = PMAC_SNAPPER; diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c index d5ae2055b896..2ead878bcb8f 100644 --- a/sound/usb/usbaudio.c +++ b/sound/usb/usbaudio.c @@ -1444,9 +1444,9 @@ static snd_pcm_hardware_t snd_usb_playback = SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER, - .buffer_bytes_max = (256*1024), + .buffer_bytes_max = 1024 * 1024, .period_bytes_min = 64, - .period_bytes_max = (128*1024), + .period_bytes_max = 512 * 1024, .periods_min = 2, .periods_max = 1024, }; @@ -1458,9 +1458,9 @@ static snd_pcm_hardware_t snd_usb_capture = SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER, - .buffer_bytes_max = (256*1024), + .buffer_bytes_max = 1024 * 1024, .period_bytes_min = 64, - .period_bytes_max = (128*1024), + .period_bytes_max = 512 * 1024, .periods_min = 2, .periods_max = 1024, }; diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c index e0d0365453b3..f1a2e2c2e02f 100644 --- a/sound/usb/usbmidi.c +++ b/sound/usb/usbmidi.c @@ -163,7 +163,7 @@ static const uint8_t snd_usbmidi_cin_length[] = { /* * Submits the URB, with error handling. */ -static int snd_usbmidi_submit_urb(struct urb* urb, int flags) +static int snd_usbmidi_submit_urb(struct urb* urb, gfp_t flags) { int err = usb_submit_urb(urb, flags); if (err < 0 && err != -ENODEV) diff --git a/sound/usb/usbmixer_maps.c b/sound/usb/usbmixer_maps.c index f05500b05ec0..c1264434e50a 100644 --- a/sound/usb/usbmixer_maps.c +++ b/sound/usb/usbmixer_maps.c @@ -237,6 +237,16 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = { .map = audigy2nx_map, .selector_map = audigy2nx_selectors, }, + { + /* Hercules DJ Console (Windows Edition) */ + .id = USB_ID(0x06f8, 0xb000), + .ignore_ctl_error = 1, + }, + { + /* Hercules DJ Console (Macintosh Edition) */ + .id = USB_ID(0x06f8, 0xd002), + .ignore_ctl_error = 1, + }, { .id = USB_ID(0x08bb, 0x2702), .map = linex_map, diff --git a/sound/usb/usbquirks.h b/sound/usb/usbquirks.h index f74e652a1e51..948759da6563 100644 --- a/sound/usb/usbquirks.h +++ b/sound/usb/usbquirks.h @@ -117,6 +117,10 @@ YAMAHA_DEVICE(0x103a, NULL), YAMAHA_DEVICE(0x103b, NULL), YAMAHA_DEVICE(0x103c, NULL), YAMAHA_DEVICE(0x103d, NULL), +YAMAHA_DEVICE(0x103e, NULL), +YAMAHA_DEVICE(0x103f, NULL), +YAMAHA_DEVICE(0x1040, NULL), +YAMAHA_DEVICE(0x1041, NULL), YAMAHA_DEVICE(0x2000, "DGP-7"), YAMAHA_DEVICE(0x2001, "DGP-5"), YAMAHA_DEVICE(0x2002, NULL), @@ -1010,6 +1014,40 @@ YAMAHA_DEVICE(0x7010, "UB99"), } } }, +{ + USB_DEVICE_VENDOR_SPEC(0x0582, 0x007a), + .driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) { + .vendor_name = "Roland", + /* RD-700SX, RD-300SX */ + .ifnum = 0, + .type = QUIRK_MIDI_FIXED_ENDPOINT, + .data = & (const snd_usb_midi_endpoint_info_t) { + .out_cables = 0x0003, + .in_cables = 0x0003 + } + } +}, + +/* Guillemot devices */ +{ + /* + * This is for the "Windows Edition" where the external MIDI ports are + * the only MIDI ports; the control data is reported through HID + * interfaces. The "Macintosh Edition" has ID 0xd002 and uses standard + * compliant USB MIDI ports for external MIDI and controls. + */ + USB_DEVICE_VENDOR_SPEC(0x06f8, 0xb000), + .driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) { + .vendor_name = "Hercules", + .product_name = "DJ Console (WE)", + .ifnum = 4, + .type = QUIRK_MIDI_FIXED_ENDPOINT, + .data = & (const snd_usb_midi_endpoint_info_t) { + .out_cables = 0x0001, + .in_cables = 0x0001 + } + } +}, /* Midiman/M-Audio devices */ { @@ -1339,10 +1377,20 @@ YAMAHA_DEVICE(0x7010, "UB99"), } }, +/* TerraTec devices */ +{ + USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012), + .driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) { + .vendor_name = "TerraTec", + .product_name = "PHASE 26", + .ifnum = 3, + .type = QUIRK_MIDI_STANDARD_INTERFACE + } +}, { USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0013), .driver_info = (unsigned long) & (const snd_usb_audio_quirk_t) { - .vendor_name = "Terratec", + .vendor_name = "TerraTec", .product_name = "PHASE 26", .ifnum = 3, .type = QUIRK_MIDI_STANDARD_INTERFACE diff --git a/usr/.gitignore b/usr/.gitignore new file mode 100644 index 000000000000..be186a82e8d0 --- /dev/null +++ b/usr/.gitignore @@ -0,0 +1,7 @@ +# +# Generated files +# +gen_init_cpio +initramfs_data.cpio +initramfs_data.cpio.gz +initramfs_list