Răsfoiți Sursa

Merge branch 'linux-2.6' into for-2.6.24

Paul Mackerras 17 ani în urmă
părinte
comite
70f227d884
100 a modificat fișierele cu 1547 adăugiri și 956 ștergeri
  1. 219 0
      Documentation/crypto/async-tx-api.txt
  2. 2 0
      Documentation/devices.txt
  3. 254 254
      Documentation/input/iforce-protocol.txt
  4. 1 1
      Documentation/lguest/lguest.c
  5. 3 3
      MAINTAINERS
  6. 2 2
      Makefile
  7. 2 2
      arch/arm/kernel/bios32.c
  8. 1 1
      arch/arm/mach-ep93xx/core.c
  9. 11 1
      arch/arm/mm/cache-l2x0.c
  10. 1 1
      arch/i386/boot/header.S
  11. 29 10
      arch/i386/boot/memory.c
  12. 10 4
      arch/i386/boot/video.c
  13. 10 31
      arch/i386/kernel/acpi/wakeup.S
  14. 4 1
      arch/i386/xen/mmu.c
  15. 1 4
      arch/mips/kernel/i8259.c
  16. 2 8
      arch/mips/kernel/irq-msc01.c
  17. 1 9
      arch/mips/kernel/irq.c
  18. 1 1
      arch/mips/kernel/scall64-o32.S
  19. 4 1
      arch/mips/kernel/smtc.c
  20. 2 0
      arch/mips/kernel/vmlinux.lds.S
  21. 2 2
      arch/mips/sgi-ip32/ip32-platform.c
  22. 2 0
      arch/mips/sibyte/bcm1480/setup.c
  23. 1 0
      arch/powerpc/boot/dts/mpc8349emitx.dts
  24. 7 0
      arch/powerpc/kernel/process.c
  25. 2 2
      arch/powerpc/platforms/83xx/usb.c
  26. 2 2
      arch/powerpc/platforms/cell/spufs/file.c
  27. 1 1
      arch/powerpc/platforms/pseries/xics.c
  28. 1 1
      arch/powerpc/sysdev/commproc.c
  29. 1 1
      arch/ppc/8xx_io/commproc.c
  30. 2 0
      arch/sparc/kernel/ebus.c
  31. 2 2
      arch/sparc64/kernel/binfmt_aout32.c
  32. 4 1
      arch/sparc64/kernel/ebus.c
  33. 4 4
      arch/sparc64/lib/NGcopy_from_user.S
  34. 4 4
      arch/sparc64/lib/NGcopy_to_user.S
  35. 213 158
      arch/sparc64/lib/NGmemcpy.S
  36. 0 8
      arch/x86_64/Kconfig
  37. 15 3
      arch/x86_64/ia32/ia32entry.S
  38. 13 34
      arch/x86_64/kernel/acpi/wakeup.S
  39. 0 1
      arch/x86_64/kernel/process.c
  40. 0 4
      arch/x86_64/kernel/ptrace.c
  41. 1 1
      arch/x86_64/kernel/smp.c
  42. 1 1
      arch/x86_64/vdso/voffset.h
  43. 10 2
      crypto/async_tx/async_tx.c
  44. 2 0
      drivers/acpi/processor_core.c
  45. 18 1
      drivers/acpi/processor_idle.c
  46. 2 2
      drivers/acpi/sleep/Makefile
  47. 55 6
      drivers/acpi/sleep/main.c
  48. 0 75
      drivers/acpi/sleep/poweroff.c
  49. 1 2
      drivers/acpi/video.c
  50. 6 4
      drivers/ata/ahci.c
  51. 7 0
      drivers/ata/ata_piix.c
  52. 4 0
      drivers/ata/libata-core.c
  53. 4 1
      drivers/ata/libata-sff.c
  54. 2 1
      drivers/ata/pata_sis.c
  55. 12 4
      drivers/ata/sata_sil24.c
  56. 1 0
      drivers/base/core.c
  57. 4 0
      drivers/cdrom/cdrom.c
  58. 6 0
      drivers/char/drm/i915_drv.h
  59. 12 0
      drivers/char/drm/i915_irq.c
  60. 6 3
      drivers/char/hpet.c
  61. 8 18
      drivers/char/mspec.c
  62. 6 4
      drivers/char/random.c
  63. 10 5
      drivers/char/vt_ioctl.c
  64. 1 1
      drivers/ieee1394/ieee1394_core.c
  65. 1 3
      drivers/ieee1394/ohci1394.c
  66. 49 13
      drivers/infiniband/hw/mlx4/qp.c
  67. 1 1
      drivers/input/joystick/Kconfig
  68. 4 2
      drivers/input/mouse/appletouch.c
  69. 2 1
      drivers/kvm/Kconfig
  70. 3 3
      drivers/lguest/lguest_asm.S
  71. 7 10
      drivers/md/raid5.c
  72. 4 2
      drivers/media/video/ivtv/ivtv-fileops.c
  73. 2 3
      drivers/media/video/usbvision/usbvision-video.c
  74. 4 3
      drivers/net/bnx2.c
  75. 1 0
      drivers/net/e1000/e1000_ethtool.c
  76. 1 0
      drivers/net/e1000/e1000_hw.c
  77. 1 0
      drivers/net/e1000/e1000_hw.h
  78. 2 0
      drivers/net/e1000/e1000_main.c
  79. 1 4
      drivers/net/mv643xx_eth.c
  80. 3 1
      drivers/net/mv643xx_eth.h
  81. 3 0
      drivers/net/myri10ge/myri10ge.c
  82. 1 1
      drivers/net/pcmcia/3c589_cs.c
  83. 1 0
      drivers/net/phy/phy.c
  84. 6 8
      drivers/net/ppp_mppe.c
  85. 1 2
      drivers/net/pppoe.c
  86. 53 65
      drivers/net/pppol2tp.c
  87. 7 0
      drivers/net/qla3xxx.c
  88. 13 1
      drivers/net/r8169.c
  89. 293 115
      drivers/net/sky2.c
  90. 33 8
      drivers/net/sky2.h
  91. 1 1
      drivers/net/usb/dm9601.c
  92. 1 1
      drivers/net/wireless/Makefile
  93. 3 4
      drivers/pci/quirks.c
  94. 1 0
      drivers/power/power_supply_sysfs.c
  95. 2 2
      drivers/scsi/aic94xx/aic94xx_task.c
  96. 2 1
      drivers/scsi/esp_scsi.c
  97. 22 6
      drivers/scsi/scsi_transport_spi.c
  98. 1 1
      drivers/serial/cpm_uart/cpm_uart_cpm1.h
  99. 1 1
      drivers/serial/sunsab.c
  100. 1 0
      drivers/w1/w1.c

+ 219 - 0
Documentation/crypto/async-tx-api.txt

@@ -0,0 +1,219 @@
+		 Asynchronous Transfers/Transforms API
+
+1 INTRODUCTION
+
+2 GENEALOGY
+
+3 USAGE
+3.1 General format of the API
+3.2 Supported operations
+3.3 Descriptor management
+3.4 When does the operation execute?
+3.5 When does the operation complete?
+3.6 Constraints
+3.7 Example
+
+4 DRIVER DEVELOPER NOTES
+4.1 Conformance points
+4.2 "My application needs finer control of hardware channels"
+
+5 SOURCE
+
+---
+
+1 INTRODUCTION
+
+The async_tx API provides methods for describing a chain of asynchronous
+bulk memory transfers/transforms with support for inter-transactional
+dependencies.  It is implemented as a dmaengine client that smooths over
+the details of different hardware offload engine implementations.  Code
+that is written to the API can optimize for asynchronous operation and
+the API will fit the chain of operations to the available offload
+resources.
+
+2 GENEALOGY
+
+The API was initially designed to offload the memory copy and
+xor-parity-calculations of the md-raid5 driver using the offload engines
+present in the Intel(R) Xscale series of I/O processors.  It also built
+on the 'dmaengine' layer developed for offloading memory copies in the
+network stack using Intel(R) I/OAT engines.  The following design
+features surfaced as a result:
+1/ implicit synchronous path: users of the API do not need to know if
+   the platform they are running on has offload capabilities.  The
+   operation will be offloaded when an engine is available and carried out
+   in software otherwise.
+2/ cross channel dependency chains: the API allows a chain of dependent
+   operations to be submitted, like xor->copy->xor in the raid5 case.  The
+   API automatically handles cases where the transition from one operation
+   to another implies a hardware channel switch.
+3/ dmaengine extensions to support multiple clients and operation types
+   beyond 'memcpy'
+
+3 USAGE
+
+3.1 General format of the API:
+struct dma_async_tx_descriptor *
+async_<operation>(<op specific parameters>,
+		  enum async_tx_flags flags,
+        	  struct dma_async_tx_descriptor *dependency,
+        	  dma_async_tx_callback callback_routine,
+		  void *callback_parameter);
+
+3.2 Supported operations:
+memcpy       - memory copy between a source and a destination buffer
+memset       - fill a destination buffer with a byte value
+xor          - xor a series of source buffers and write the result to a
+	       destination buffer
+xor_zero_sum - xor a series of source buffers and set a flag if the
+	       result is zero.  The implementation attempts to prevent
+	       writes to memory
+
+3.3 Descriptor management:
+The return value is non-NULL and points to a 'descriptor' when the operation
+has been queued to execute asynchronously.  Descriptors are recycled
+resources, under control of the offload engine driver, to be reused as
+operations complete.  When an application needs to submit a chain of
+operations it must guarantee that the descriptor is not automatically recycled
+before the dependency is submitted.  This requires that all descriptors be
+acknowledged by the application before the offload engine driver is allowed to
+recycle (or free) the descriptor.  A descriptor can be acked by one of the
+following methods:
+1/ setting the ASYNC_TX_ACK flag if no child operations are to be submitted
+2/ setting the ASYNC_TX_DEP_ACK flag to acknowledge the parent
+   descriptor of a new operation.
+3/ calling async_tx_ack() on the descriptor.
+
+3.4 When does the operation execute?
+Operations do not immediately issue after return from the
+async_<operation> call.  Offload engine drivers batch operations to
+improve performance by reducing the number of mmio cycles needed to
+manage the channel.  Once a driver-specific threshold is met the driver
+automatically issues pending operations.  An application can force this
+event by calling async_tx_issue_pending_all().  This operates on all
+channels since the application has no knowledge of channel to operation
+mapping.
+
+3.5 When does the operation complete?
+There are two methods for an application to learn about the completion
+of an operation.
+1/ Call dma_wait_for_async_tx().  This call causes the CPU to spin while
+   it polls for the completion of the operation.  It handles dependency
+   chains and issuing pending operations.
+2/ Specify a completion callback.  The callback routine runs in tasklet
+   context if the offload engine driver supports interrupts, or it is
+   called in application context if the operation is carried out
+   synchronously in software.  The callback can be set in the call to
+   async_<operation>, or when the application needs to submit a chain of
+   unknown length it can use the async_trigger_callback() routine to set a
+   completion interrupt/callback at the end of the chain.
+
+3.6 Constraints:
+1/ Calls to async_<operation> are not permitted in IRQ context.  Other
+   contexts are permitted provided constraint #2 is not violated.
+2/ Completion callback routines cannot submit new operations.  This
+   results in recursion in the synchronous case and spin_locks being
+   acquired twice in the asynchronous case.
+
+3.7 Example:
+Perform a xor->copy->xor operation where each operation depends on the
+result from the previous operation:
+
+void complete_xor_copy_xor(void *param)
+{
+	printk("complete\n");
+}
+
+int run_xor_copy_xor(struct page **xor_srcs,
+		     int xor_src_cnt,
+		     struct page *xor_dest,
+		     size_t xor_len,
+		     struct page *copy_src,
+		     struct page *copy_dest,
+		     size_t copy_len)
+{
+	struct dma_async_tx_descriptor *tx;
+
+	tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len,
+		       ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL);
+	tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len,
+			  ASYNC_TX_DEP_ACK, tx, NULL, NULL);
+	tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len,
+		       ASYNC_TX_XOR_DROP_DST | ASYNC_TX_DEP_ACK | ASYNC_TX_ACK,
+		       tx, complete_xor_copy_xor, NULL);
+
+	async_tx_issue_pending_all();
+}
+
+See include/linux/async_tx.h for more information on the flags.  See the
+ops_run_* and ops_complete_* routines in drivers/md/raid5.c for more
+implementation examples.
+
+4 DRIVER DEVELOPMENT NOTES
+4.1 Conformance points:
+There are a few conformance points required in dmaengine drivers to
+accommodate assumptions made by applications using the async_tx API:
+1/ Completion callbacks are expected to happen in tasklet context
+2/ dma_async_tx_descriptor fields are never manipulated in IRQ context
+3/ Use async_tx_run_dependencies() in the descriptor clean up path to
+   handle submission of dependent operations
+
+4.2 "My application needs finer control of hardware channels"
+This requirement seems to arise from cases where a DMA engine driver is
+trying to support device-to-memory DMA.  The dmaengine and async_tx
+implementations were designed for offloading memory-to-memory
+operations; however, there are some capabilities of the dmaengine layer
+that can be used for platform-specific channel management.
+Platform-specific constraints can be handled by registering the
+application as a 'dma_client' and implementing a 'dma_event_callback' to
+apply a filter to the available channels in the system.  Before showing
+how to implement a custom dma_event callback some background of
+dmaengine's client support is required.
+
+The following routines in dmaengine support multiple clients requesting
+use of a channel:
+- dma_async_client_register(struct dma_client *client)
+- dma_async_client_chan_request(struct dma_client *client)
+
+dma_async_client_register takes a pointer to an initialized dma_client
+structure.  It expects that the 'event_callback' and 'cap_mask' fields
+are already initialized.
+
+dma_async_client_chan_request triggers dmaengine to notify the client of
+all channels that satisfy the capability mask.  It is up to the client's
+event_callback routine to track how many channels the client needs and
+how many it is currently using.  The dma_event_callback routine returns a
+dma_state_client code to let dmaengine know the status of the
+allocation.
+
+Below is the example of how to extend this functionality for
+platform-specific filtering of the available channels beyond the
+standard capability mask:
+
+static enum dma_state_client
+my_dma_client_callback(struct dma_client *client,
+			struct dma_chan *chan, enum dma_state state)
+{
+	struct dma_device *dma_dev;
+	struct my_platform_specific_dma *plat_dma_dev;
+	
+	dma_dev = chan->device;
+	plat_dma_dev = container_of(dma_dev,
+				    struct my_platform_specific_dma,
+				    dma_dev);
+
+	if (!plat_dma_dev->platform_specific_capability)
+		return DMA_DUP;
+
+	. . .
+}
+
+5 SOURCE
+include/linux/dmaengine.h: core header file for DMA drivers and clients
+drivers/dma/dmaengine.c: offload engine channel management routines
+drivers/dma/: location for offload engine drivers
+include/linux/async_tx.h: core header file for the async_tx api
+crypto/async_tx/async_tx.c: async_tx interface to dmaengine and common code
+crypto/async_tx/async_memcpy.c: copy offload
+crypto/async_tx/async_memset.c: memory fill offload
+crypto/async_tx/async_xor.c: xor and xor zero sum offload

+ 2 - 0
Documentation/devices.txt

@@ -94,6 +94,8 @@ Your cooperation is appreciated.
 		  9 = /dev/urandom	Faster, less secure random number gen.
 		  9 = /dev/urandom	Faster, less secure random number gen.
 		 10 = /dev/aio		Asynchronous I/O notification interface
 		 10 = /dev/aio		Asynchronous I/O notification interface
 		 11 = /dev/kmsg		Writes to this come out as printk's
 		 11 = /dev/kmsg		Writes to this come out as printk's
+		 12 = /dev/oldmem	Used by crashdump kernels to access
+					the memory of the kernel that crashed.
 
 
   1 block	RAM disk
   1 block	RAM disk
 		  0 = /dev/ram0		First RAM disk
 		  0 = /dev/ram0		First RAM disk

+ 254 - 254
Documentation/input/iforce-protocol.txt

@@ -1,254 +1,254 @@
-** Introduction
-This document describes what I managed to discover about the protocol used to
-specify force effects to I-Force 2.0 devices.  None of this information comes
-from Immerse. That's why you should not trust what is written in this
-document. This document is intended to help understanding the protocol.
-This is not a reference. Comments and corrections are welcome.  To contact me,
-send an email to: deneux@ifrance.com
-
-** WARNING **
-I may not be held responsible for any dammage or harm caused if you try to
-send data to your I-Force device based on what you read in this document.
-
-** Preliminary Notes:
-All values are hexadecimal with big-endian encoding (msb on the left). Beware,
-values inside packets are encoded using little-endian.  Bytes whose roles are
-unknown are marked ???  Information that needs deeper inspection is marked (?)
-
-** General form of a packet **
-This is how packets look when the device uses the rs232 to communicate.
-2B OP LEN DATA CS
-CS is the checksum. It is equal to the exclusive or of all bytes.
-
-When using USB:
-OP DATA
-The 2B, LEN and CS fields have disappeared, probably because USB handles frames and
-data corruption is handled or unsignificant.
-
-First, I describe effects that are sent by the device to the computer
-
-** Device input state
-This packet is used to indicate the state of each button and the value of each
-axis
-OP= 01 for a joystick, 03 for a wheel
-LEN= Varies from device to device
-00 X-Axis lsb
-01 X-Axis msb
-02 Y-Axis lsb, or gas pedal for a wheel
-03 Y-Axis msb, or brake pedal for a wheel
-04 Throttle
-05 Buttons
-06 Lower 4 bits: Buttons
-   Upper 4 bits: Hat
-07 Rudder
-
-** Device effects states
-OP= 02
-LEN= Varies
-00 ? Bit 1 (Value 2) is the value of the deadman switch
-01 Bit 8 is set if the effect is playing. Bits 0 to 7 are the effect id.
-02 ??
-03 Address of parameter block changed (lsb)
-04 Address of parameter block changed (msb)
-05 Address of second parameter block changed (lsb)
-... depending on the number of parameter blocks updated
-
-** Force effect **
-OP=  01
-LEN= 0e
-00 Channel (when playing several effects at the same time, each must be assigned a channel)
-01 Wave form
-	Val 00 Constant
-	Val 20 Square
-	Val 21 Triangle
-	Val 22 Sine
-	Val 23 Sawtooth up
-	Val 24 Sawtooth down
-	Val 40 Spring (Force = f(pos))
-	Val 41 Friction (Force = f(velocity)) and Inertia (Force = f(acceleration))
-
-	
-02 Axes affected and trigger
-	Bits 4-7: Val 2 = effect along one axis. Byte 05 indicates direction
-	          Val 4 = X axis only. Byte 05 must contain 5a
-	          Val 8 = Y axis only. Byte 05 must contain b4
-	          Val c = X and Y axes. Bytes 05 must contain 60
-	Bits 0-3: Val 0 = No trigger
-	          Val x+1 = Button x triggers the effect
-	When the whole byte is 0, cancel the previously set trigger
-
-03-04 Duration of effect (little endian encoding, in ms)
-
-05 Direction of effect, if applicable. Else, see 02 for value to assign.
-
-06-07 Minimum time between triggering.
-
-08-09 Address of periodicity or magnitude parameters
-0a-0b Address of attack and fade parameters, or ffff if none.
-*or*
-08-09 Address of interactive parameters for X-axis, or ffff if not applicable
-0a-0b Address of interactive parameters for Y-axis, or ffff if not applicable
-
-0c-0d Delay before execution of effect (little endian encoding, in ms)
-
-
-** Time based parameters **
-
-*** Attack and fade ***
-OP=  02
-LEN= 08
-00-01 Address where to store the parameteres
-02-03 Duration of attack (little endian encoding, in ms)
-04 Level at end of attack. Signed byte.
-05-06 Duration of fade.
-07 Level at end of fade.
-
-*** Magnitude ***
-OP=  03
-LEN= 03
-00-01 Address
-02 Level. Signed byte.
-
-*** Periodicity ***
-OP=  04
-LEN= 07
-00-01 Address
-02 Magnitude. Signed byte.
-03 Offset. Signed byte.
-04 Phase. Val 00 = 0 deg, Val 40 = 90 degs.
-05-06 Period (little endian encoding, in ms)
-
-** Interactive parameters **
-OP=  05
-LEN= 0a
-00-01 Address
-02 Positive Coeff
-03 Negative Coeff
-04+05 Offset (center)
-06+07 Dead band (Val 01F4 = 5000 (decimal))
-08 Positive saturation (Val 0a = 1000 (decimal) Val 64 = 10000 (decimal))
-09 Negative saturation
-
-The encoding is a bit funny here: For coeffs, these are signed values. The
-maximum value is 64 (100 decimal), the min is 9c.
-For the offset, the minimum value is FE0C, the maximum value is 01F4.
-For the deadband, the minimum value is 0, the max is 03E8.
-
-** Controls **
-OP=  41
-LEN= 03
-00 Channel
-01 Start/Stop
-	Val 00: Stop
-	Val 01: Start and play once.
-	Val 41: Start and play n times (See byte 02 below)
-02 Number of iterations n.
-
-** Init **
-
-*** Querying features ***
-OP=  ff
-Query command. Length varies according to the query type.
-The general format of this packet is:
-ff 01 QUERY [INDEX] CHECKSUM
-reponses are of the same form:
-FF LEN QUERY VALUE_QUERIED CHECKSUM2
-where LEN = 1 + length(VALUE_QUERIED)
-
-**** Query ram size ****
-QUERY = 42 ('B'uffer size)
-The device should reply with the same packet plus two additionnal bytes
-containing the size of the memory:
-ff 03 42 03 e8 CS would mean that the device has 1000 bytes of ram available.
-
-**** Query number of effects ****
-QUERY = 4e ('N'umber of effects)
-The device should respond by sending the number of effects that can be played
-at the same time (one byte)
-ff 02 4e 14 CS would stand for 20 effects.
-
-**** Vendor's id ****
-QUERY = 4d ('M'anufacturer)
-Query the vendors'id (2 bytes)
-
-**** Product id *****
-QUERY = 50 ('P'roduct)
-Query the product id (2 bytes)
-
-**** Open device ****
-QUERY = 4f ('O'pen) 
-No data returned.
-
-**** Close device *****
-QUERY = 43 ('C')lose
-No data returned.
-
-**** Query effect ****
-QUERY = 45 ('E') 
-Send effect type.
-Returns nonzero if supported (2 bytes)
-
-**** Firmware Version ****
-QUERY = 56 ('V'ersion)
-Sends back 3 bytes - major, minor, subminor
-
-*** Initialisation of the device ***
-
-**** Set Control ****
-!!! Device dependent, can be different on different models !!!
-OP=  40 <idx> <val> [<val>]
-LEN= 2 or 3
-00 Idx
-   Idx 00 Set dead zone (0..2048) 
-   Idx 01 Ignore Deadman sensor (0..1)     
-   Idx 02 Enable comm watchdog (0..1)     
-   Idx 03 Set the strength of the spring (0..100)   
-   Idx 04 Enable or disable the spring (0/1)
-   Idx 05 Set axis saturation threshold (0..2048) 
-
-**** Set Effect State ****
-OP=  42 <val>
-LEN= 1
-00 State
-   Bit 3 Pause force feedback
-   Bit 2 Enable force feedback
-   Bit 0 Stop all effects
-
-**** Set overall gain ****
-OP=  43 <val>
-LEN= 1
-00 Gain
-   Val 00 = 0%
-   Val 40 = 50%
-   Val 80 = 100%
-
-** Parameter memory **
-
-Each device has a certain amount of memory to store parameters of effects.
-The amount of RAM may vary, I encountered values from 200 to 1000 bytes. Below
-is the amount of memory apparently needed for every set of parameters:
- - period : 0c
- - magnitude : 02
- - attack and fade : 0e
- - interactive : 08
-
-** Appendix: How to study the protocol ? **
-
-1. Generate effects using the force editor provided with the DirectX SDK, or use Immersion Studio (freely available at their web site in the developer section: www.immersion.com)
-2. Start a soft spying RS232 or USB (depending on where you connected your joystick/wheel). I used ComPortSpy from fCoder (alpha version!)
-3. Play the effect, and watch what happens on the spy screen.
-
-A few words about ComPortSpy:
-At first glance, this soft seems, hum, well... buggy. In fact, data appear with a few seconds latency. Personnaly, I restart it every time I play an effect.
-Remember it's free (as in free beer) and alpha!
-
-** URLS **
-Check www.immerse.com for Immersion Studio, and www.fcoder.com for ComPortSpy.
-
-** Author of this document **
-Johann Deneux <deneux@ifrance.com>
-Home page at http://www.esil.univ-mrs.fr/~jdeneux/projects/ff/
-
-Additions by Vojtech Pavlik.
-
-I-Force is trademark of Immersion Corp.
+** Introduction
+This document describes what I managed to discover about the protocol used to
+specify force effects to I-Force 2.0 devices.  None of this information comes
+from Immerse. That's why you should not trust what is written in this
+document. This document is intended to help understanding the protocol.
+This is not a reference. Comments and corrections are welcome.  To contact me,
+send an email to: deneux@ifrance.com
+
+** WARNING **
+I may not be held responsible for any dammage or harm caused if you try to
+send data to your I-Force device based on what you read in this document.
+
+** Preliminary Notes:
+All values are hexadecimal with big-endian encoding (msb on the left). Beware,
+values inside packets are encoded using little-endian.  Bytes whose roles are
+unknown are marked ???  Information that needs deeper inspection is marked (?)
+
+** General form of a packet **
+This is how packets look when the device uses the rs232 to communicate.
+2B OP LEN DATA CS
+CS is the checksum. It is equal to the exclusive or of all bytes.
+
+When using USB:
+OP DATA
+The 2B, LEN and CS fields have disappeared, probably because USB handles frames and
+data corruption is handled or unsignificant.
+
+First, I describe effects that are sent by the device to the computer
+
+** Device input state
+This packet is used to indicate the state of each button and the value of each
+axis
+OP= 01 for a joystick, 03 for a wheel
+LEN= Varies from device to device
+00 X-Axis lsb
+01 X-Axis msb
+02 Y-Axis lsb, or gas pedal for a wheel
+03 Y-Axis msb, or brake pedal for a wheel
+04 Throttle
+05 Buttons
+06 Lower 4 bits: Buttons
+   Upper 4 bits: Hat
+07 Rudder
+
+** Device effects states
+OP= 02
+LEN= Varies
+00 ? Bit 1 (Value 2) is the value of the deadman switch
+01 Bit 8 is set if the effect is playing. Bits 0 to 7 are the effect id.
+02 ??
+03 Address of parameter block changed (lsb)
+04 Address of parameter block changed (msb)
+05 Address of second parameter block changed (lsb)
+... depending on the number of parameter blocks updated
+
+** Force effect **
+OP=  01
+LEN= 0e
+00 Channel (when playing several effects at the same time, each must be assigned a channel)
+01 Wave form
+	Val 00 Constant
+	Val 20 Square
+	Val 21 Triangle
+	Val 22 Sine
+	Val 23 Sawtooth up
+	Val 24 Sawtooth down
+	Val 40 Spring (Force = f(pos))
+	Val 41 Friction (Force = f(velocity)) and Inertia (Force = f(acceleration))
+
+
+02 Axes affected and trigger
+	Bits 4-7: Val 2 = effect along one axis. Byte 05 indicates direction
+	          Val 4 = X axis only. Byte 05 must contain 5a
+	          Val 8 = Y axis only. Byte 05 must contain b4
+	          Val c = X and Y axes. Bytes 05 must contain 60
+	Bits 0-3: Val 0 = No trigger
+	          Val x+1 = Button x triggers the effect
+	When the whole byte is 0, cancel the previously set trigger
+
+03-04 Duration of effect (little endian encoding, in ms)
+
+05 Direction of effect, if applicable. Else, see 02 for value to assign.
+
+06-07 Minimum time between triggering.
+
+08-09 Address of periodicity or magnitude parameters
+0a-0b Address of attack and fade parameters, or ffff if none.
+*or*
+08-09 Address of interactive parameters for X-axis, or ffff if not applicable
+0a-0b Address of interactive parameters for Y-axis, or ffff if not applicable
+
+0c-0d Delay before execution of effect (little endian encoding, in ms)
+
+
+** Time based parameters **
+
+*** Attack and fade ***
+OP=  02
+LEN= 08
+00-01 Address where to store the parameteres
+02-03 Duration of attack (little endian encoding, in ms)
+04 Level at end of attack. Signed byte.
+05-06 Duration of fade.
+07 Level at end of fade.
+
+*** Magnitude ***
+OP=  03
+LEN= 03
+00-01 Address
+02 Level. Signed byte.
+
+*** Periodicity ***
+OP=  04
+LEN= 07
+00-01 Address
+02 Magnitude. Signed byte.
+03 Offset. Signed byte.
+04 Phase. Val 00 = 0 deg, Val 40 = 90 degs.
+05-06 Period (little endian encoding, in ms)
+
+** Interactive parameters **
+OP=  05
+LEN= 0a
+00-01 Address
+02 Positive Coeff
+03 Negative Coeff
+04+05 Offset (center)
+06+07 Dead band (Val 01F4 = 5000 (decimal))
+08 Positive saturation (Val 0a = 1000 (decimal) Val 64 = 10000 (decimal))
+09 Negative saturation
+
+The encoding is a bit funny here: For coeffs, these are signed values. The
+maximum value is 64 (100 decimal), the min is 9c.
+For the offset, the minimum value is FE0C, the maximum value is 01F4.
+For the deadband, the minimum value is 0, the max is 03E8.
+
+** Controls **
+OP=  41
+LEN= 03
+00 Channel
+01 Start/Stop
+	Val 00: Stop
+	Val 01: Start and play once.
+	Val 41: Start and play n times (See byte 02 below)
+02 Number of iterations n.
+
+** Init **
+
+*** Querying features ***
+OP=  ff
+Query command. Length varies according to the query type.
+The general format of this packet is:
+ff 01 QUERY [INDEX] CHECKSUM
+reponses are of the same form:
+FF LEN QUERY VALUE_QUERIED CHECKSUM2
+where LEN = 1 + length(VALUE_QUERIED)
+
+**** Query ram size ****
+QUERY = 42 ('B'uffer size)
+The device should reply with the same packet plus two additionnal bytes
+containing the size of the memory:
+ff 03 42 03 e8 CS would mean that the device has 1000 bytes of ram available.
+
+**** Query number of effects ****
+QUERY = 4e ('N'umber of effects)
+The device should respond by sending the number of effects that can be played
+at the same time (one byte)
+ff 02 4e 14 CS would stand for 20 effects.
+
+**** Vendor's id ****
+QUERY = 4d ('M'anufacturer)
+Query the vendors'id (2 bytes)
+
+**** Product id *****
+QUERY = 50 ('P'roduct)
+Query the product id (2 bytes)
+
+**** Open device ****
+QUERY = 4f ('O'pen)
+No data returned.
+
+**** Close device *****
+QUERY = 43 ('C')lose
+No data returned.
+
+**** Query effect ****
+QUERY = 45 ('E')
+Send effect type.
+Returns nonzero if supported (2 bytes)
+
+**** Firmware Version ****
+QUERY = 56 ('V'ersion)
+Sends back 3 bytes - major, minor, subminor
+
+*** Initialisation of the device ***
+
+**** Set Control ****
+!!! Device dependent, can be different on different models !!!
+OP=  40 <idx> <val> [<val>]
+LEN= 2 or 3
+00 Idx
+   Idx 00 Set dead zone (0..2048)
+   Idx 01 Ignore Deadman sensor (0..1)
+   Idx 02 Enable comm watchdog (0..1)
+   Idx 03 Set the strength of the spring (0..100)
+   Idx 04 Enable or disable the spring (0/1)
+   Idx 05 Set axis saturation threshold (0..2048)
+
+**** Set Effect State ****
+OP=  42 <val>
+LEN= 1
+00 State
+   Bit 3 Pause force feedback
+   Bit 2 Enable force feedback
+   Bit 0 Stop all effects
+
+**** Set overall gain ****
+OP=  43 <val>
+LEN= 1
+00 Gain
+   Val 00 = 0%
+   Val 40 = 50%
+   Val 80 = 100%
+
+** Parameter memory **
+
+Each device has a certain amount of memory to store parameters of effects.
+The amount of RAM may vary, I encountered values from 200 to 1000 bytes. Below
+is the amount of memory apparently needed for every set of parameters:
+ - period : 0c
+ - magnitude : 02
+ - attack and fade : 0e
+ - interactive : 08
+
+** Appendix: How to study the protocol ? **
+
+1. Generate effects using the force editor provided with the DirectX SDK, or use Immersion Studio (freely available at their web site in the developer section: www.immersion.com)
+2. Start a soft spying RS232 or USB (depending on where you connected your joystick/wheel). I used ComPortSpy from fCoder (alpha version!)
+3. Play the effect, and watch what happens on the spy screen.
+
+A few words about ComPortSpy:
+At first glance, this soft seems, hum, well... buggy. In fact, data appear with a few seconds latency. Personnaly, I restart it every time I play an effect.
+Remember it's free (as in free beer) and alpha!
+
+** URLS **
+Check www.immerse.com for Immersion Studio, and www.fcoder.com for ComPortSpy.
+
+** Author of this document **
+Johann Deneux <deneux@ifrance.com>
+Home page at http://www.esil.univ-mrs.fr/~jdeneux/projects/ff/
+
+Additions by Vojtech Pavlik.
+
+I-Force is trademark of Immersion Corp.

+ 1 - 1
Documentation/lguest/lguest.c

@@ -882,7 +882,7 @@ static u32 handle_block_output(int fd, const struct iovec *iov,
 		 * of the block file (possibly extending it). */
 		 * of the block file (possibly extending it). */
 		if (off + len > device_len) {
 		if (off + len > device_len) {
 			/* Trim it back to the correct length */
 			/* Trim it back to the correct length */
-			ftruncate(dev->fd, device_len);
+			ftruncate64(dev->fd, device_len);
 			/* Die, bad Guest, die. */
 			/* Die, bad Guest, die. */
 			errx(1, "Write past end %llu+%u", off, len);
 			errx(1, "Write past end %llu+%u", off, len);
 		}
 		}

+ 3 - 3
MAINTAINERS

@@ -2624,8 +2624,8 @@ P:	Harald Welte
 P:	Jozsef Kadlecsik
 P:	Jozsef Kadlecsik
 P:	Patrick McHardy
 P:	Patrick McHardy
 M:	kaber@trash.net
 M:	kaber@trash.net
-L:	netfilter-devel@lists.netfilter.org
-L:	netfilter@lists.netfilter.org (subscribers-only)
+L:	netfilter-devel@vger.kernel.org
+L:	netfilter@vger.kernel.org
 L:	coreteam@netfilter.org
 L:	coreteam@netfilter.org
 W:	http://www.netfilter.org/
 W:	http://www.netfilter.org/
 W:	http://www.iptables.org/
 W:	http://www.iptables.org/
@@ -2678,7 +2678,7 @@ M:	jmorris@namei.org
 P:	Hideaki YOSHIFUJI
 P:	Hideaki YOSHIFUJI
 M:	yoshfuji@linux-ipv6.org
 M:	yoshfuji@linux-ipv6.org
 P:	Patrick McHardy
 P:	Patrick McHardy
-M:	kaber@coreworks.de
+M:	kaber@trash.net
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
 T:	git kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6.git
 T:	git kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6.git
 S:	Maintained
 S:	Maintained

+ 2 - 2
Makefile

@@ -1,8 +1,8 @@
 VERSION = 2
 VERSION = 2
 PATCHLEVEL = 6
 PATCHLEVEL = 6
 SUBLEVEL = 23
 SUBLEVEL = 23
-EXTRAVERSION =-rc6
-NAME = Pink Farting Weasel
+EXTRAVERSION =-rc9
+NAME = Arr Matey! A Hairy Bilge Rat!
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
 # To see a list of typical targets execute "make help"

+ 2 - 2
arch/arm/kernel/bios32.c

@@ -338,7 +338,7 @@ pbus_assign_bus_resources(struct pci_bus *bus, struct pci_sys_data *root)
  * pcibios_fixup_bus - Called after each bus is probed,
  * pcibios_fixup_bus - Called after each bus is probed,
  * but before its children are examined.
  * but before its children are examined.
  */
  */
-void __devinit pcibios_fixup_bus(struct pci_bus *bus)
+void pcibios_fixup_bus(struct pci_bus *bus)
 {
 {
 	struct pci_sys_data *root = bus->sysdata;
 	struct pci_sys_data *root = bus->sysdata;
 	struct pci_dev *dev;
 	struct pci_dev *dev;
@@ -419,7 +419,7 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus)
 /*
 /*
  * Convert from Linux-centric to bus-centric addresses for bridge devices.
  * Convert from Linux-centric to bus-centric addresses for bridge devices.
  */
  */
-void __devinit
+void
 pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
 pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
 			 struct resource *res)
 			 struct resource *res)
 {
 {

+ 1 - 1
arch/arm/mach-ep93xx/core.c

@@ -336,7 +336,7 @@ static int ep93xx_gpio_irq_type(unsigned int irq, unsigned int type)
 	if (line >= 0 && line < 16) {
 	if (line >= 0 && line < 16) {
 		gpio_line_config(line, GPIO_IN);
 		gpio_line_config(line, GPIO_IN);
 	} else {
 	} else {
-		gpio_line_config(EP93XX_GPIO_LINE_F(line), GPIO_IN);
+		gpio_line_config(EP93XX_GPIO_LINE_F(line-16), GPIO_IN);
 	}
 	}
 
 
 	port = line >> 3;
 	port = line >> 3;

+ 11 - 1
arch/arm/mm/cache-l2x0.c

@@ -57,7 +57,17 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
 {
 {
 	unsigned long addr;
 	unsigned long addr;
 
 
-	start &= ~(CACHE_LINE_SIZE - 1);
+	if (start & (CACHE_LINE_SIZE - 1)) {
+		start &= ~(CACHE_LINE_SIZE - 1);
+		sync_writel(start, L2X0_CLEAN_INV_LINE_PA, 1);
+		start += CACHE_LINE_SIZE;
+	}
+
+	if (end & (CACHE_LINE_SIZE - 1)) {
+		end &= ~(CACHE_LINE_SIZE - 1);
+		sync_writel(end, L2X0_CLEAN_INV_LINE_PA, 1);
+	}
+
 	for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
 	for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
 		sync_writel(addr, L2X0_INV_LINE_PA, 1);
 		sync_writel(addr, L2X0_INV_LINE_PA, 1);
 	cache_sync();
 	cache_sync();

+ 1 - 1
arch/i386/boot/header.S

@@ -275,7 +275,7 @@ die:
 	hlt
 	hlt
 	jmp	die
 	jmp	die
 
 
-	.size	die, .-due
+	.size	die, .-die
 
 
 	.section ".initdata", "a"
 	.section ".initdata", "a"
 setup_corrupt:
 setup_corrupt:

+ 29 - 10
arch/i386/boot/memory.c

@@ -20,6 +20,7 @@
 
 
 static int detect_memory_e820(void)
 static int detect_memory_e820(void)
 {
 {
+	int count = 0;
 	u32 next = 0;
 	u32 next = 0;
 	u32 size, id;
 	u32 size, id;
 	u8 err;
 	u8 err;
@@ -27,20 +28,33 @@ static int detect_memory_e820(void)
 
 
 	do {
 	do {
 		size = sizeof(struct e820entry);
 		size = sizeof(struct e820entry);
-		id = SMAP;
+
+		/* Important: %edx is clobbered by some BIOSes,
+		   so it must be either used for the error output
+		   or explicitly marked clobbered. */
 		asm("int $0x15; setc %0"
 		asm("int $0x15; setc %0"
-		    : "=am" (err), "+b" (next), "+d" (id), "+c" (size),
+		    : "=d" (err), "+b" (next), "=a" (id), "+c" (size),
 		      "=m" (*desc)
 		      "=m" (*desc)
-		    : "D" (desc), "a" (0xe820));
+		    : "D" (desc), "d" (SMAP), "a" (0xe820));
+
+		/* Some BIOSes stop returning SMAP in the middle of
+		   the search loop.  We don't know exactly how the BIOS
+		   screwed up the map at that point, we might have a
+		   partial map, the full map, or complete garbage, so
+		   just return failure. */
+		if (id != SMAP) {
+			count = 0;
+			break;
+		}
 
 
-		if (err || id != SMAP)
+		if (err)
 			break;
 			break;
 
 
-		boot_params.e820_entries++;
+		count++;
 		desc++;
 		desc++;
-	} while (next && boot_params.e820_entries < E820MAX);
+	} while (next && count < E820MAX);
 
 
-	return boot_params.e820_entries;
+	return boot_params.e820_entries = count;
 }
 }
 
 
 static int detect_memory_e801(void)
 static int detect_memory_e801(void)
@@ -89,11 +103,16 @@ static int detect_memory_88(void)
 
 
 int detect_memory(void)
 int detect_memory(void)
 {
 {
+	int err = -1;
+
 	if (detect_memory_e820() > 0)
 	if (detect_memory_e820() > 0)
-		return 0;
+		err = 0;
 
 
 	if (!detect_memory_e801())
 	if (!detect_memory_e801())
-		return 0;
+		err = 0;
+
+	if (!detect_memory_88())
+		err = 0;
 
 
-	return detect_memory_88();
+	return err;
 }
 }

+ 10 - 4
arch/i386/boot/video.c

@@ -147,7 +147,7 @@ int mode_defined(u16 mode)
 }
 }
 
 
 /* Set mode (without recalc) */
 /* Set mode (without recalc) */
-static int raw_set_mode(u16 mode)
+static int raw_set_mode(u16 mode, u16 *real_mode)
 {
 {
 	int nmode, i;
 	int nmode, i;
 	struct card_info *card;
 	struct card_info *card;
@@ -165,8 +165,10 @@ static int raw_set_mode(u16 mode)
 
 
 			if ((mode == nmode && visible) ||
 			if ((mode == nmode && visible) ||
 			    mode == mi->mode ||
 			    mode == mi->mode ||
-			    mode == (mi->y << 8)+mi->x)
+			    mode == (mi->y << 8)+mi->x) {
+				*real_mode = mi->mode;
 				return card->set_mode(mi);
 				return card->set_mode(mi);
+			}
 
 
 			if (visible)
 			if (visible)
 				nmode++;
 				nmode++;
@@ -178,7 +180,7 @@ static int raw_set_mode(u16 mode)
 		if (mode >= card->xmode_first &&
 		if (mode >= card->xmode_first &&
 		    mode < card->xmode_first+card->xmode_n) {
 		    mode < card->xmode_first+card->xmode_n) {
 			struct mode_info mix;
 			struct mode_info mix;
-			mix.mode = mode;
+			*real_mode = mix.mode = mode;
 			mix.x = mix.y = 0;
 			mix.x = mix.y = 0;
 			return card->set_mode(&mix);
 			return card->set_mode(&mix);
 		}
 		}
@@ -223,6 +225,7 @@ static void vga_recalc_vertical(void)
 static int set_mode(u16 mode)
 static int set_mode(u16 mode)
 {
 {
 	int rv;
 	int rv;
+	u16 real_mode;
 
 
 	/* Very special mode numbers... */
 	/* Very special mode numbers... */
 	if (mode == VIDEO_CURRENT_MODE)
 	if (mode == VIDEO_CURRENT_MODE)
@@ -232,13 +235,16 @@ static int set_mode(u16 mode)
 	else if (mode == EXTENDED_VGA)
 	else if (mode == EXTENDED_VGA)
 		mode = VIDEO_8POINT;
 		mode = VIDEO_8POINT;
 
 
-	rv = raw_set_mode(mode);
+	rv = raw_set_mode(mode, &real_mode);
 	if (rv)
 	if (rv)
 		return rv;
 		return rv;
 
 
 	if (mode & VIDEO_RECALC)
 	if (mode & VIDEO_RECALC)
 		vga_recalc_vertical();
 		vga_recalc_vertical();
 
 
+	/* Save the canonical mode number for the kernel, not
+	   an alias, size specification or menu position */
+	boot_params.hdr.vid_mode = real_mode;
 	return 0;
 	return 0;
 }
 }
 
 

+ 10 - 31
arch/i386/kernel/acpi/wakeup.S

@@ -151,51 +151,30 @@ bogus_real_magic:
 #define VIDEO_FIRST_V7 0x0900
 #define VIDEO_FIRST_V7 0x0900
 
 
 # Setting of user mode (AX=mode ID) => CF=success
 # Setting of user mode (AX=mode ID) => CF=success
+
+# For now, we only handle VESA modes (0x0200..0x03ff).  To handle other
+# modes, we should probably compile in the video code from the boot
+# directory.
 mode_set:
 mode_set:
 	movw	%ax, %bx
 	movw	%ax, %bx
-#if 0
-	cmpb	$0xff, %ah
-	jz	setalias
-
-	testb	$VIDEO_RECALC>>8, %ah
-	jnz	_setrec
-
-	cmpb	$VIDEO_FIRST_RESOLUTION>>8, %ah
-	jnc	setres
-	
-	cmpb	$VIDEO_FIRST_SPECIAL>>8, %ah
-	jz	setspc
-
-	cmpb	$VIDEO_FIRST_V7>>8, %ah
-	jz	setv7
-#endif
-	
-	cmpb	$VIDEO_FIRST_VESA>>8, %ah
-	jnc	check_vesa
-#if 0	
-	orb	%ah, %ah
-	jz	setmenu
-#endif
-	
-	decb	%ah
-#	jz	setbios				  Add bios modes later
+	subb	$VIDEO_FIRST_VESA>>8, %bh
+	cmpb	$2, %bh
+	jb	check_vesa
 
 
-setbad:	clc
+setbad:
+	clc
 	ret
 	ret
 
 
 check_vesa:
 check_vesa:
-	subb	$VIDEO_FIRST_VESA>>8, %bh
 	orw	$0x4000, %bx			# Use linear frame buffer
 	orw	$0x4000, %bx			# Use linear frame buffer
 	movw	$0x4f02, %ax			# VESA BIOS mode set call
 	movw	$0x4f02, %ax			# VESA BIOS mode set call
 	int	$0x10
 	int	$0x10
 	cmpw	$0x004f, %ax			# AL=4f if implemented
 	cmpw	$0x004f, %ax			# AL=4f if implemented
-	jnz	_setbad				# AH=0 if OK
+	jnz	setbad				# AH=0 if OK
 
 
 	stc
 	stc
 	ret
 	ret
 
 
-_setbad: jmp setbad
-
 	.code32
 	.code32
 	ALIGN
 	ALIGN
 
 

+ 4 - 1
arch/i386/xen/mmu.c

@@ -559,6 +559,9 @@ void xen_exit_mmap(struct mm_struct *mm)
 	put_cpu();
 	put_cpu();
 
 
 	spin_lock(&mm->page_table_lock);
 	spin_lock(&mm->page_table_lock);
-	xen_pgd_unpin(mm->pgd);
+
+	/* pgd may not be pinned in the error exit path of execve */
+	if (PagePinned(virt_to_page(mm->pgd)))
+		xen_pgd_unpin(mm->pgd);
 	spin_unlock(&mm->page_table_lock);
 	spin_unlock(&mm->page_table_lock);
 }
 }

+ 1 - 4
arch/mips/kernel/i8259.c

@@ -177,10 +177,7 @@ handle_real_irq:
 		outb(cached_master_mask, PIC_MASTER_IMR);
 		outb(cached_master_mask, PIC_MASTER_IMR);
 		outb(0x60+irq,PIC_MASTER_CMD);	/* 'Specific EOI to master */
 		outb(0x60+irq,PIC_MASTER_CMD);	/* 'Specific EOI to master */
 	}
 	}
-#ifdef CONFIG_MIPS_MT_SMTC
-	if (irq_hwmask[irq] & ST0_IM)
-		set_c0_status(irq_hwmask[irq] & ST0_IM);
-#endif /* CONFIG_MIPS_MT_SMTC */
+	smtc_im_ack_irq(irq);
 	spin_unlock_irqrestore(&i8259A_lock, flags);
 	spin_unlock_irqrestore(&i8259A_lock, flags);
 	return;
 	return;
 
 

+ 2 - 8
arch/mips/kernel/irq-msc01.c

@@ -52,11 +52,8 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
 	mask_msc_irq(irq);
 	mask_msc_irq(irq);
 	if (!cpu_has_veic)
 	if (!cpu_has_veic)
 		MSCIC_WRITE(MSC01_IC_EOI, 0);
 		MSCIC_WRITE(MSC01_IC_EOI, 0);
-#ifdef CONFIG_MIPS_MT_SMTC
 	/* This actually needs to be a call into platform code */
 	/* This actually needs to be a call into platform code */
-	if (irq_hwmask[irq] & ST0_IM)
-		set_c0_status(irq_hwmask[irq] & ST0_IM);
-#endif /* CONFIG_MIPS_MT_SMTC */
+	smtc_im_ack_irq(irq);
 }
 }
 
 
 /*
 /*
@@ -73,10 +70,7 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq)
 		MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
 		MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
 		MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
 		MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
 	}
 	}
-#ifdef CONFIG_MIPS_MT_SMTC
-	if (irq_hwmask[irq] & ST0_IM)
-		set_c0_status(irq_hwmask[irq] & ST0_IM);
-#endif /* CONFIG_MIPS_MT_SMTC */
+	smtc_im_ack_irq(irq);
 }
 }
 
 
 /*
 /*

+ 1 - 9
arch/mips/kernel/irq.c

@@ -74,20 +74,12 @@ EXPORT_SYMBOL_GPL(free_irqno);
  */
  */
 void ack_bad_irq(unsigned int irq)
 void ack_bad_irq(unsigned int irq)
 {
 {
+	smtc_im_ack_irq(irq);
 	printk("unexpected IRQ # %d\n", irq);
 	printk("unexpected IRQ # %d\n", irq);
 }
 }
 
 
 atomic_t irq_err_count;
 atomic_t irq_err_count;
 
 
-#ifdef CONFIG_MIPS_MT_SMTC
-/*
- * SMTC Kernel needs to manipulate low-level CPU interrupt mask
- * in do_IRQ. These are passed in setup_irq_smtc() and stored
- * in this table.
- */
-unsigned long irq_hwmask[NR_IRQS];
-#endif /* CONFIG_MIPS_MT_SMTC */
-
 /*
 /*
  * Generic, controller-independent functions:
  * Generic, controller-independent functions:
  */
  */

+ 1 - 1
arch/mips/kernel/scall64-o32.S

@@ -525,5 +525,5 @@ sys_call_table:
 	PTR	compat_sys_signalfd
 	PTR	compat_sys_signalfd
 	PTR	compat_sys_timerfd
 	PTR	compat_sys_timerfd
 	PTR	sys_eventfd
 	PTR	sys_eventfd
-	PTR	sys_fallocate			/* 4320 */
+	PTR	sys32_fallocate			/* 4320 */
 	.size	sys_call_table,.-sys_call_table
 	.size	sys_call_table,.-sys_call_table

+ 4 - 1
arch/mips/kernel/smtc.c

@@ -25,8 +25,11 @@
 #include <asm/smtc_proc.h>
 #include <asm/smtc_proc.h>
 
 
 /*
 /*
- * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
+ * SMTC Kernel needs to manipulate low-level CPU interrupt mask
+ * in do_IRQ. These are passed in setup_irq_smtc() and stored
+ * in this table.
  */
  */
+unsigned long irq_hwmask[NR_IRQS];
 
 
 #define LOCK_MT_PRA() \
 #define LOCK_MT_PRA() \
 	local_irq_save(flags); \
 	local_irq_save(flags); \

+ 2 - 0
arch/mips/kernel/vmlinux.lds.S

@@ -45,6 +45,8 @@ SECTIONS
   __dbe_table : { *(__dbe_table) }
   __dbe_table : { *(__dbe_table) }
   __stop___dbe_table = .;
   __stop___dbe_table = .;
 
 
+  NOTES
+
   RODATA
   RODATA
 
 
   /* writeable */
   /* writeable */

+ 2 - 2
arch/mips/sgi-ip32/ip32-platform.c

@@ -41,8 +41,8 @@ static struct platform_device uart8250_device = {
 
 
 static int __init uart8250_init(void)
 static int __init uart8250_init(void)
 {
 {
-	uart8250_data[0].iobase = (unsigned long) &mace->isa.serial1;
-	uart8250_data[1].iobase = (unsigned long) &mace->isa.serial1;
+	uart8250_data[0].membase = (void __iomem *) &mace->isa.serial1;
+	uart8250_data[1].membase = (void __iomem *) &mace->isa.serial1;
 
 
 	return platform_device_register(&uart8250_device);
 	return platform_device_register(&uart8250_device);
 }
 }

+ 2 - 0
arch/mips/sibyte/bcm1480/setup.c

@@ -15,6 +15,7 @@
  * along with this program; if not, write to the Free Software
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  */
  */
+#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/reboot.h>
 #include <linux/reboot.h>
@@ -35,6 +36,7 @@ unsigned int soc_type;
 EXPORT_SYMBOL(soc_type);
 EXPORT_SYMBOL(soc_type);
 unsigned int periph_rev;
 unsigned int periph_rev;
 unsigned int zbbus_mhz;
 unsigned int zbbus_mhz;
+EXPORT_SYMBOL(zbbus_mhz);
 
 
 static unsigned int part_type;
 static unsigned int part_type;
 
 

+ 1 - 0
arch/powerpc/boot/dts/mpc8349emitx.dts

@@ -97,6 +97,7 @@
 			#size-cells = <0>;
 			#size-cells = <0>;
 			interrupt-parent = < &ipic >;
 			interrupt-parent = < &ipic >;
 			interrupts = <26 8>;
 			interrupts = <26 8>;
+			dr_mode = "peripheral";
 			phy_type = "ulpi";
 			phy_type = "ulpi";
 		};
 		};
 
 

+ 7 - 0
arch/powerpc/kernel/process.c

@@ -613,6 +613,13 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
 	regs->ccr = 0;
 	regs->ccr = 0;
 	regs->gpr[1] = sp;
 	regs->gpr[1] = sp;
 
 
+	/*
+	 * We have just cleared all the nonvolatile GPRs, so make
+	 * FULL_REGS(regs) return true.  This is necessary to allow
+	 * ptrace to examine the thread immediately after exec.
+	 */
+	regs->trap &= ~1UL;
+
 #ifdef CONFIG_PPC32
 #ifdef CONFIG_PPC32
 	regs->mq = 0;
 	regs->mq = 0;
 	regs->nip = start;
 	regs->nip = start;

+ 2 - 2
arch/powerpc/platforms/83xx/usb.c

@@ -76,14 +76,14 @@ int mpc834x_usb_cfg(void)
 			if (port0_is_dr)
 			if (port0_is_dr)
 				printk(KERN_WARNING
 				printk(KERN_WARNING
 					"834x USB port0 can't be used by both DR and MPH!\n");
 					"834x USB port0 can't be used by both DR and MPH!\n");
-			sicrl |= MPC834X_SICRL_USB0;
+			sicrl &= ~MPC834X_SICRL_USB0;
 		}
 		}
 		prop = of_get_property(np, "port1", NULL);
 		prop = of_get_property(np, "port1", NULL);
 		if (prop) {
 		if (prop) {
 			if (port1_is_dr)
 			if (port1_is_dr)
 				printk(KERN_WARNING
 				printk(KERN_WARNING
 					"834x USB port1 can't be used by both DR and MPH!\n");
 					"834x USB port1 can't be used by both DR and MPH!\n");
-			sicrl |= MPC834X_SICRL_USB1;
+			sicrl &= ~MPC834X_SICRL_USB1;
 		}
 		}
 		of_node_put(np);
 		of_node_put(np);
 	}
 	}

+ 2 - 2
arch/powerpc/platforms/cell/spufs/file.c

@@ -2110,8 +2110,8 @@ struct tree_descr spufs_dir_contents[] = {
 	{ "mbox_stat", &spufs_mbox_stat_fops, 0444, },
 	{ "mbox_stat", &spufs_mbox_stat_fops, 0444, },
 	{ "ibox_stat", &spufs_ibox_stat_fops, 0444, },
 	{ "ibox_stat", &spufs_ibox_stat_fops, 0444, },
 	{ "wbox_stat", &spufs_wbox_stat_fops, 0444, },
 	{ "wbox_stat", &spufs_wbox_stat_fops, 0444, },
-	{ "signal1", &spufs_signal1_nosched_fops, 0222, },
-	{ "signal2", &spufs_signal2_nosched_fops, 0222, },
+	{ "signal1", &spufs_signal1_fops, 0666, },
+	{ "signal2", &spufs_signal2_fops, 0666, },
 	{ "signal1_type", &spufs_signal1_type, 0666, },
 	{ "signal1_type", &spufs_signal1_type, 0666, },
 	{ "signal2_type", &spufs_signal2_type, 0666, },
 	{ "signal2_type", &spufs_signal2_type, 0666, },
 	{ "cntl", &spufs_cntl_fops,  0666, },
 	{ "cntl", &spufs_cntl_fops,  0666, },

+ 1 - 1
arch/powerpc/platforms/pseries/xics.c

@@ -419,7 +419,7 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
 	 * For the moment only implement delivery to all cpus or one cpu.
 	 * For the moment only implement delivery to all cpus or one cpu.
 	 * Get current irq_server for the given irq
 	 * Get current irq_server for the given irq
 	 */
 	 */
-	irq_server = get_irq_server(irq, 1);
+	irq_server = get_irq_server(virq, 1);
 	if (irq_server == -1) {
 	if (irq_server == -1) {
 		char cpulist[128];
 		char cpulist[128];
 		cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
 		cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);

+ 1 - 1
arch/powerpc/sysdev/commproc.c

@@ -387,4 +387,4 @@ uint cpm_dpram_phys(u8* addr)
 {
 {
 	return (dpram_pbase + (uint)(addr - dpram_vbase));
 	return (dpram_pbase + (uint)(addr - dpram_vbase));
 }
 }
-EXPORT_SYMBOL(cpm_dpram_addr);
+EXPORT_SYMBOL(cpm_dpram_phys);

+ 1 - 1
arch/ppc/8xx_io/commproc.c

@@ -459,7 +459,7 @@ EXPORT_SYMBOL(cpm_dpdump);
 
 
 void *cpm_dpram_addr(unsigned long offset)
 void *cpm_dpram_addr(unsigned long offset)
 {
 {
-	return ((immap_t *)IMAP_ADDR)->im_cpm.cp_dpmem + offset;
+	return (void *)(dpram_vbase + offset);
 }
 }
 EXPORT_SYMBOL(cpm_dpram_addr);
 EXPORT_SYMBOL(cpm_dpram_addr);
 
 

+ 2 - 0
arch/sparc/kernel/ebus.c

@@ -156,6 +156,8 @@ void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_device *d
 	dev->prom_node = dp;
 	dev->prom_node = dp;
 
 
 	regs = of_get_property(dp, "reg", &len);
 	regs = of_get_property(dp, "reg", &len);
+	if (!regs)
+		len = 0;
 	if (len % sizeof(struct linux_prom_registers)) {
 	if (len % sizeof(struct linux_prom_registers)) {
 		prom_printf("UGH: proplen for %s was %d, need multiple of %d\n",
 		prom_printf("UGH: proplen for %s was %d, need multiple of %d\n",
 			    dev->prom_node->name, len,
 			    dev->prom_node->name, len,

+ 2 - 2
arch/sparc64/kernel/binfmt_aout32.c

@@ -177,7 +177,7 @@ static u32 __user *create_aout32_tables(char __user *p, struct linux_binprm *bpr
 			get_user(c,p++);
 			get_user(c,p++);
 		} while (c);
 		} while (c);
 	}
 	}
-	put_user(NULL,argv);
+	put_user(0,argv);
 	current->mm->arg_end = current->mm->env_start = (unsigned long) p;
 	current->mm->arg_end = current->mm->env_start = (unsigned long) p;
 	while (envc-->0) {
 	while (envc-->0) {
 		char c;
 		char c;
@@ -186,7 +186,7 @@ static u32 __user *create_aout32_tables(char __user *p, struct linux_binprm *bpr
 			get_user(c,p++);
 			get_user(c,p++);
 		} while (c);
 		} while (c);
 	}
 	}
-	put_user(NULL,envp);
+	put_user(0,envp);
 	current->mm->env_end = (unsigned long) p;
 	current->mm->env_end = (unsigned long) p;
 	return sp;
 	return sp;
 }
 }

+ 4 - 1
arch/sparc64/kernel/ebus.c

@@ -375,7 +375,10 @@ static void __init fill_ebus_device(struct device_node *dp, struct linux_ebus_de
 		dev->num_addrs = 0;
 		dev->num_addrs = 0;
 		dev->num_irqs = 0;
 		dev->num_irqs = 0;
 	} else {
 	} else {
-		(void) of_get_property(dp, "reg", &len);
+		const int *regs = of_get_property(dp, "reg", &len);
+
+		if (!regs)
+			len = 0;
 		dev->num_addrs = len / sizeof(struct linux_prom_registers);
 		dev->num_addrs = len / sizeof(struct linux_prom_registers);
 
 
 		for (i = 0; i < dev->num_addrs; i++)
 		for (i = 0; i < dev->num_addrs; i++)

+ 4 - 4
arch/sparc64/lib/NGcopy_from_user.S

@@ -1,6 +1,6 @@
 /* NGcopy_from_user.S: Niagara optimized copy from userspace.
 /* NGcopy_from_user.S: Niagara optimized copy from userspace.
  *
  *
- * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
  */
  */
 
 
 #define EX_LD(x)		\
 #define EX_LD(x)		\
@@ -8,8 +8,8 @@
 	.section .fixup;	\
 	.section .fixup;	\
 	.align 4;		\
 	.align 4;		\
 99:	wr	%g0, ASI_AIUS, %asi;\
 99:	wr	%g0, ASI_AIUS, %asi;\
-	retl;			\
-	 mov	1, %o0;		\
+	ret;			\
+	 restore %g0, 1, %o0;	\
 	.section __ex_table,"a";\
 	.section __ex_table,"a";\
 	.align 4;		\
 	.align 4;		\
 	.word 98b, 99b;		\
 	.word 98b, 99b;		\
@@ -24,7 +24,7 @@
 #define LOAD(type,addr,dest)	type##a [addr] ASI_AIUS, dest
 #define LOAD(type,addr,dest)	type##a [addr] ASI_AIUS, dest
 #define LOAD_TWIN(addr_reg,dest0,dest1)	\
 #define LOAD_TWIN(addr_reg,dest0,dest1)	\
 	ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_AIUS, dest0
 	ldda [addr_reg] ASI_BLK_INIT_QUAD_LDD_AIUS, dest0
-#define EX_RETVAL(x)		0
+#define EX_RETVAL(x)		%g0
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 #define PREAMBLE					\
 #define PREAMBLE					\

+ 4 - 4
arch/sparc64/lib/NGcopy_to_user.S

@@ -1,6 +1,6 @@
 /* NGcopy_to_user.S: Niagara optimized copy to userspace.
 /* NGcopy_to_user.S: Niagara optimized copy to userspace.
  *
  *
- * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
  */
  */
 
 
 #define EX_ST(x)		\
 #define EX_ST(x)		\
@@ -8,8 +8,8 @@
 	.section .fixup;	\
 	.section .fixup;	\
 	.align 4;		\
 	.align 4;		\
 99:	wr	%g0, ASI_AIUS, %asi;\
 99:	wr	%g0, ASI_AIUS, %asi;\
-	retl;			\
-	 mov	1, %o0;		\
+	ret;			\
+	 restore %g0, 1, %o0;	\
 	.section __ex_table,"a";\
 	.section __ex_table,"a";\
 	.align 4;		\
 	.align 4;		\
 	.word 98b, 99b;		\
 	.word 98b, 99b;		\
@@ -23,7 +23,7 @@
 #define FUNC_NAME		NGcopy_to_user
 #define FUNC_NAME		NGcopy_to_user
 #define STORE(type,src,addr)	type##a src, [addr] ASI_AIUS
 #define STORE(type,src,addr)	type##a src, [addr] ASI_AIUS
 #define STORE_ASI		ASI_BLK_INIT_QUAD_LDD_AIUS
 #define STORE_ASI		ASI_BLK_INIT_QUAD_LDD_AIUS
-#define EX_RETVAL(x)		0
+#define EX_RETVAL(x)		%g0
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 	/* Writing to %asi is _expensive_ so we hardcode it.
 	/* Writing to %asi is _expensive_ so we hardcode it.

+ 213 - 158
arch/sparc64/lib/NGmemcpy.S

@@ -1,6 +1,6 @@
 /* NGmemcpy.S: Niagara optimized memcpy.
 /* NGmemcpy.S: Niagara optimized memcpy.
  *
  *
- * Copyright (C) 2006 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
  */
  */
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
@@ -16,6 +16,12 @@
 	wr	%g0, ASI_PNF, %asi
 	wr	%g0, ASI_PNF, %asi
 #endif
 #endif
 
 
+#ifdef __sparc_v9__
+#define SAVE_AMOUNT	128
+#else
+#define SAVE_AMOUNT	64
+#endif
+
 #ifndef STORE_ASI
 #ifndef STORE_ASI
 #define STORE_ASI	ASI_BLK_INIT_QUAD_LDD_P
 #define STORE_ASI	ASI_BLK_INIT_QUAD_LDD_P
 #endif
 #endif
@@ -50,7 +56,11 @@
 #endif
 #endif
 
 
 #ifndef STORE_INIT
 #ifndef STORE_INIT
+#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA
 #define STORE_INIT(src,addr)	stxa src, [addr] %asi
 #define STORE_INIT(src,addr)	stxa src, [addr] %asi
+#else
+#define STORE_INIT(src,addr)	stx src, [addr + 0x00]
+#endif
 #endif
 #endif
 
 
 #ifndef FUNC_NAME
 #ifndef FUNC_NAME
@@ -73,18 +83,19 @@
 
 
 	.globl	FUNC_NAME
 	.globl	FUNC_NAME
 	.type	FUNC_NAME,#function
 	.type	FUNC_NAME,#function
-FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
-	srlx		%o2, 31, %g2
+FUNC_NAME:	/* %i0=dst, %i1=src, %i2=len */
+	PREAMBLE
+	save		%sp, -SAVE_AMOUNT, %sp
+	srlx		%i2, 31, %g2
 	cmp		%g2, 0
 	cmp		%g2, 0
 	tne		%xcc, 5
 	tne		%xcc, 5
-	PREAMBLE
-	mov		%o0, GLOBAL_SPARE
-	cmp		%o2, 0
+	mov		%i0, %o0
+	cmp		%i2, 0
 	be,pn		%XCC, 85f
 	be,pn		%XCC, 85f
-	 or		%o0, %o1, %o3
-	cmp		%o2, 16
+	 or		%o0, %i1, %i3
+	cmp		%i2, 16
 	blu,a,pn	%XCC, 80f
 	blu,a,pn	%XCC, 80f
-	 or		%o3, %o2, %o3
+	 or		%i3, %i2, %i3
 
 
 	/* 2 blocks (128 bytes) is the minimum we can do the block
 	/* 2 blocks (128 bytes) is the minimum we can do the block
 	 * copy with.  We need to ensure that we'll iterate at least
 	 * copy with.  We need to ensure that we'll iterate at least
@@ -93,31 +104,31 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
 	 * to (64 - 1) bytes from the length before we perform the
 	 * to (64 - 1) bytes from the length before we perform the
 	 * block copy loop.
 	 * block copy loop.
 	 */
 	 */
-	cmp		%o2, (2 * 64)
+	cmp		%i2, (2 * 64)
 	blu,pt		%XCC, 70f
 	blu,pt		%XCC, 70f
-	 andcc		%o3, 0x7, %g0
+	 andcc		%i3, 0x7, %g0
 
 
 	/* %o0:	dst
 	/* %o0:	dst
-	 * %o1:	src
-	 * %o2:	len  (known to be >= 128)
+	 * %i1:	src
+	 * %i2:	len  (known to be >= 128)
 	 *
 	 *
-	 * The block copy loops will use %o4/%o5,%g2/%g3 as
+	 * The block copy loops will use %i4/%i5,%g2/%g3 as
 	 * temporaries while copying the data.
 	 * temporaries while copying the data.
 	 */
 	 */
 
 
-	LOAD(prefetch, %o1, #one_read)
+	LOAD(prefetch, %i1, #one_read)
 	wr		%g0, STORE_ASI, %asi
 	wr		%g0, STORE_ASI, %asi
 
 
 	/* Align destination on 64-byte boundary.  */
 	/* Align destination on 64-byte boundary.  */
-	andcc		%o0, (64 - 1), %o4
+	andcc		%o0, (64 - 1), %i4
 	be,pt		%XCC, 2f
 	be,pt		%XCC, 2f
-	 sub		%o4, 64, %o4
-	sub		%g0, %o4, %o4	! bytes to align dst
-	sub		%o2, %o4, %o2
-1:	subcc		%o4, 1, %o4
-	EX_LD(LOAD(ldub, %o1, %g1))
+	 sub		%i4, 64, %i4
+	sub		%g0, %i4, %i4	! bytes to align dst
+	sub		%i2, %i4, %i2
+1:	subcc		%i4, 1, %i4
+	EX_LD(LOAD(ldub, %i1, %g1))
 	EX_ST(STORE(stb, %g1, %o0))
 	EX_ST(STORE(stb, %g1, %o0))
-	add		%o1, 1, %o1
+	add		%i1, 1, %i1
 	bne,pt		%XCC, 1b
 	bne,pt		%XCC, 1b
 	add		%o0, 1, %o0
 	add		%o0, 1, %o0
 
 
@@ -136,111 +147,155 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
 	 * aligned store data at a time, this is easy to ensure.
 	 * aligned store data at a time, this is easy to ensure.
 	 */
 	 */
 2:
 2:
-	andcc		%o1, (16 - 1), %o4
-	andn		%o2, (64 - 1), %g1	! block copy loop iterator
-	sub		%o2, %g1, %o2		! final sub-block copy bytes
+	andcc		%i1, (16 - 1), %i4
+	andn		%i2, (64 - 1), %g1	! block copy loop iterator
 	be,pt		%XCC, 50f
 	be,pt		%XCC, 50f
-	 cmp		%o4, 8
-	be,a,pt		%XCC, 10f
-	 sub		%o1, 0x8, %o1
+	 sub		%i2, %g1, %i2		! final sub-block copy bytes
+
+	cmp		%i4, 8
+	be,pt		%XCC, 10f
+	 sub		%i1, %i4, %i1
 
 
 	/* Neither 8-byte nor 16-byte aligned, shift and mask.  */
 	/* Neither 8-byte nor 16-byte aligned, shift and mask.  */
-	mov		%g1, %o4
-	and		%o1, 0x7, %g1
-	sll		%g1, 3, %g1
-	mov		64, %o3
-	andn		%o1, 0x7, %o1
-	EX_LD(LOAD(ldx, %o1, %g2))
-	sub		%o3, %g1, %o3
-	sllx		%g2, %g1, %g2
+	and		%i4, 0x7, GLOBAL_SPARE
+	sll		GLOBAL_SPARE, 3, GLOBAL_SPARE
+	mov		64, %i5
+	EX_LD(LOAD_TWIN(%i1, %g2, %g3))
+	sub		%i5, GLOBAL_SPARE, %i5
+	mov		16, %o4
+	mov		32, %o5
+	mov		48, %o7
+	mov		64, %i3
+
+	bg,pn	   	%XCC, 9f
+	 nop
 
 
-#define SWIVEL_ONE_DWORD(SRC, TMP1, TMP2, PRE_VAL, PRE_SHIFT, POST_SHIFT, DST)\
-	EX_LD(LOAD(ldx, SRC, TMP1)); \
-	srlx		TMP1, PRE_SHIFT, TMP2; \
-	or		TMP2, PRE_VAL, TMP2; \
-	EX_ST(STORE_INIT(TMP2, DST)); \
-	sllx		TMP1, POST_SHIFT, PRE_VAL;
-
-1:	add		%o1, 0x8, %o1
-	SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x00)
-	add		%o1, 0x8, %o1
-	SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x08)
-	add		%o1, 0x8, %o1
-	SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x10)
-	add		%o1, 0x8, %o1
-	SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x18)
-	add		%o1, 32, %o1
-	LOAD(prefetch, %o1, #one_read)
-	sub		%o1, 32 - 8, %o1
-	SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x20)
-	add		%o1, 8, %o1
-	SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x28)
-	add		%o1, 8, %o1
-	SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x30)
-	add		%o1, 8, %o1
-	SWIVEL_ONE_DWORD(%o1, %g3, %o5, %g2, %o3, %g1, %o0 + 0x38)
-	subcc		%o4, 64, %o4
-	bne,pt		%XCC, 1b
+#define MIX_THREE_WORDS(WORD1, WORD2, WORD3, PRE_SHIFT, POST_SHIFT, TMP) \
+	sllx		WORD1, POST_SHIFT, WORD1; \
+	srlx		WORD2, PRE_SHIFT, TMP; \
+	sllx		WORD2, POST_SHIFT, WORD2; \
+	or		WORD1, TMP, WORD1; \
+	srlx		WORD3, PRE_SHIFT, TMP; \
+	or		WORD2, TMP, WORD2;
+
+8:	EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3))
+	MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
+	LOAD(prefetch, %i1 + %i3, #one_read)
+
+	EX_ST(STORE_INIT(%g2, %o0 + 0x00))
+	EX_ST(STORE_INIT(%g3, %o0 + 0x08))
+
+	EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3))
+	MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
+
+	EX_ST(STORE_INIT(%o2, %o0 + 0x10))
+	EX_ST(STORE_INIT(%o3, %o0 + 0x18))
+
+	EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+	MIX_THREE_WORDS(%g2, %g3, %o2, %i5, GLOBAL_SPARE, %o1)
+
+	EX_ST(STORE_INIT(%g2, %o0 + 0x20))
+	EX_ST(STORE_INIT(%g3, %o0 + 0x28))
+
+	EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3))
+	add		%i1, 64, %i1
+	MIX_THREE_WORDS(%o2, %o3, %g2, %i5, GLOBAL_SPARE, %o1)
+
+	EX_ST(STORE_INIT(%o2, %o0 + 0x30))
+	EX_ST(STORE_INIT(%o3, %o0 + 0x38))
+
+	subcc		%g1, 64, %g1
+	bne,pt		%XCC, 8b
 	 add		%o0, 64, %o0
 	 add		%o0, 64, %o0
 
 
-#undef SWIVEL_ONE_DWORD
+	ba,pt		%XCC, 60f
+	 add		%i1, %i4, %i1
+
+9:	EX_LD(LOAD_TWIN(%i1 + %o4, %o2, %o3))
+	MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
+	LOAD(prefetch, %i1 + %i3, #one_read)
+
+	EX_ST(STORE_INIT(%g3, %o0 + 0x00))
+	EX_ST(STORE_INIT(%o2, %o0 + 0x08))
+
+	EX_LD(LOAD_TWIN(%i1 + %o5, %g2, %g3))
+	MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
+
+	EX_ST(STORE_INIT(%o3, %o0 + 0x10))
+	EX_ST(STORE_INIT(%g2, %o0 + 0x18))
+
+	EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+	MIX_THREE_WORDS(%g3, %o2, %o3, %i5, GLOBAL_SPARE, %o1)
+
+	EX_ST(STORE_INIT(%g3, %o0 + 0x20))
+	EX_ST(STORE_INIT(%o2, %o0 + 0x28))
+
+	EX_LD(LOAD_TWIN(%i1 + %i3, %g2, %g3))
+	add		%i1, 64, %i1
+	MIX_THREE_WORDS(%o3, %g2, %g3, %i5, GLOBAL_SPARE, %o1)
+
+	EX_ST(STORE_INIT(%o3, %o0 + 0x30))
+	EX_ST(STORE_INIT(%g2, %o0 + 0x38))
+
+	subcc		%g1, 64, %g1
+	bne,pt		%XCC, 9b
+	 add		%o0, 64, %o0
 
 
-	srl		%g1, 3, %g1
 	ba,pt		%XCC, 60f
 	ba,pt		%XCC, 60f
-	 add		%o1, %g1, %o1
+	 add		%i1, %i4, %i1
 
 
 10:	/* Destination is 64-byte aligned, source was only 8-byte
 10:	/* Destination is 64-byte aligned, source was only 8-byte
 	 * aligned but it has been subtracted by 8 and we perform
 	 * aligned but it has been subtracted by 8 and we perform
 	 * one twin load ahead, then add 8 back into source when
 	 * one twin load ahead, then add 8 back into source when
 	 * we finish the loop.
 	 * we finish the loop.
 	 */
 	 */
-	EX_LD(LOAD_TWIN(%o1, %o4, %o5))
-1:	add		%o1, 16, %o1
-	EX_LD(LOAD_TWIN(%o1, %g2, %g3))
-	add		%o1, 16 + 32, %o1
-	LOAD(prefetch, %o1, #one_read)
-	sub		%o1, 32, %o1
+	EX_LD(LOAD_TWIN(%i1, %o4, %o5))
+	mov	16, %o7
+	mov	32, %g2
+	mov	48, %g3
+	mov	64, %o1
+1:	EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+	LOAD(prefetch, %i1 + %o1, #one_read)
 	EX_ST(STORE_INIT(%o5, %o0 + 0x00))	! initializes cache line
 	EX_ST(STORE_INIT(%o5, %o0 + 0x00))	! initializes cache line
-	EX_ST(STORE_INIT(%g2, %o0 + 0x08))
-	EX_LD(LOAD_TWIN(%o1, %o4, %o5))
-	add		%o1, 16, %o1
-	EX_ST(STORE_INIT(%g3, %o0 + 0x10))
+	EX_ST(STORE_INIT(%o2, %o0 + 0x08))
+	EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5))
+	EX_ST(STORE_INIT(%o3, %o0 + 0x10))
 	EX_ST(STORE_INIT(%o4, %o0 + 0x18))
 	EX_ST(STORE_INIT(%o4, %o0 + 0x18))
-	EX_LD(LOAD_TWIN(%o1, %g2, %g3))
-	add		%o1, 16, %o1
+	EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3))
 	EX_ST(STORE_INIT(%o5, %o0 + 0x20))
 	EX_ST(STORE_INIT(%o5, %o0 + 0x20))
-	EX_ST(STORE_INIT(%g2, %o0 + 0x28))
-	EX_LD(LOAD_TWIN(%o1, %o4, %o5))
-	EX_ST(STORE_INIT(%g3, %o0 + 0x30))
+	EX_ST(STORE_INIT(%o2, %o0 + 0x28))
+	EX_LD(LOAD_TWIN(%i1 + %o1, %o4, %o5))
+	add		%i1, 64, %i1
+	EX_ST(STORE_INIT(%o3, %o0 + 0x30))
 	EX_ST(STORE_INIT(%o4, %o0 + 0x38))
 	EX_ST(STORE_INIT(%o4, %o0 + 0x38))
 	subcc		%g1, 64, %g1
 	subcc		%g1, 64, %g1
 	bne,pt		%XCC, 1b
 	bne,pt		%XCC, 1b
 	 add		%o0, 64, %o0
 	 add		%o0, 64, %o0
 
 
 	ba,pt		%XCC, 60f
 	ba,pt		%XCC, 60f
-	 add		%o1, 0x8, %o1
+	 add		%i1, 0x8, %i1
 
 
 50:	/* Destination is 64-byte aligned, and source is 16-byte
 50:	/* Destination is 64-byte aligned, and source is 16-byte
 	 * aligned.
 	 * aligned.
 	 */
 	 */
-1:	EX_LD(LOAD_TWIN(%o1, %o4, %o5))
-	add	%o1, 16, %o1
-	EX_LD(LOAD_TWIN(%o1, %g2, %g3))
-	add	%o1, 16 + 32, %o1
-	LOAD(prefetch, %o1, #one_read)
-	sub	%o1, 32, %o1
+	mov	16, %o7
+	mov	32, %g2
+	mov	48, %g3
+	mov	64, %o1
+1:	EX_LD(LOAD_TWIN(%i1 + %g0, %o4, %o5))
+	EX_LD(LOAD_TWIN(%i1 + %o7, %o2, %o3))
+	LOAD(prefetch, %i1 + %o1, #one_read)
 	EX_ST(STORE_INIT(%o4, %o0 + 0x00))	! initializes cache line
 	EX_ST(STORE_INIT(%o4, %o0 + 0x00))	! initializes cache line
 	EX_ST(STORE_INIT(%o5, %o0 + 0x08))
 	EX_ST(STORE_INIT(%o5, %o0 + 0x08))
-	EX_LD(LOAD_TWIN(%o1, %o4, %o5))
-	add	%o1, 16, %o1
-	EX_ST(STORE_INIT(%g2, %o0 + 0x10))
-	EX_ST(STORE_INIT(%g3, %o0 + 0x18))
-	EX_LD(LOAD_TWIN(%o1, %g2, %g3))
-	add	%o1, 16, %o1
+	EX_LD(LOAD_TWIN(%i1 + %g2, %o4, %o5))
+	EX_ST(STORE_INIT(%o2, %o0 + 0x10))
+	EX_ST(STORE_INIT(%o3, %o0 + 0x18))
+	EX_LD(LOAD_TWIN(%i1 + %g3, %o2, %o3))
+	add	%i1, 64, %i1
 	EX_ST(STORE_INIT(%o4, %o0 + 0x20))
 	EX_ST(STORE_INIT(%o4, %o0 + 0x20))
 	EX_ST(STORE_INIT(%o5, %o0 + 0x28))
 	EX_ST(STORE_INIT(%o5, %o0 + 0x28))
-	EX_ST(STORE_INIT(%g2, %o0 + 0x30))
-	EX_ST(STORE_INIT(%g3, %o0 + 0x38))
+	EX_ST(STORE_INIT(%o2, %o0 + 0x30))
+	EX_ST(STORE_INIT(%o3, %o0 + 0x38))
 	subcc	%g1, 64, %g1
 	subcc	%g1, 64, %g1
 	bne,pt	%XCC, 1b
 	bne,pt	%XCC, 1b
 	 add	%o0, 64, %o0
 	 add	%o0, 64, %o0
@@ -249,47 +304,47 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
 60:	
 60:	
 	membar		#Sync
 	membar		#Sync
 
 
-	/* %o2 contains any final bytes still needed to be copied
+	/* %i2 contains any final bytes still needed to be copied
 	 * over. If anything is left, we copy it one byte at a time.
 	 * over. If anything is left, we copy it one byte at a time.
 	 */
 	 */
-	RESTORE_ASI(%o3)
-	brz,pt		%o2, 85f
-	 sub		%o0, %o1, %o3
+	RESTORE_ASI(%i3)
+	brz,pt		%i2, 85f
+	 sub		%o0, %i1, %i3
 	ba,a,pt		%XCC, 90f
 	ba,a,pt		%XCC, 90f
 
 
 	.align		64
 	.align		64
 70: /* 16 < len <= 64 */
 70: /* 16 < len <= 64 */
 	bne,pn		%XCC, 75f
 	bne,pn		%XCC, 75f
-	 sub		%o0, %o1, %o3
+	 sub		%o0, %i1, %i3
 
 
 72:
 72:
-	andn		%o2, 0xf, %o4
-	and		%o2, 0xf, %o2
-1:	subcc		%o4, 0x10, %o4
-	EX_LD(LOAD(ldx, %o1, %o5))
-	add		%o1, 0x08, %o1
-	EX_LD(LOAD(ldx, %o1, %g1))
-	sub		%o1, 0x08, %o1
-	EX_ST(STORE(stx, %o5, %o1 + %o3))
-	add		%o1, 0x8, %o1
-	EX_ST(STORE(stx, %g1, %o1 + %o3))
+	andn		%i2, 0xf, %i4
+	and		%i2, 0xf, %i2
+1:	subcc		%i4, 0x10, %i4
+	EX_LD(LOAD(ldx, %i1, %i5))
+	add		%i1, 0x08, %i1
+	EX_LD(LOAD(ldx, %i1, %g1))
+	sub		%i1, 0x08, %i1
+	EX_ST(STORE(stx, %i5, %i1 + %i3))
+	add		%i1, 0x8, %i1
+	EX_ST(STORE(stx, %g1, %i1 + %i3))
 	bgu,pt		%XCC, 1b
 	bgu,pt		%XCC, 1b
-	 add		%o1, 0x8, %o1
-73:	andcc		%o2, 0x8, %g0
+	 add		%i1, 0x8, %i1
+73:	andcc		%i2, 0x8, %g0
 	be,pt		%XCC, 1f
 	be,pt		%XCC, 1f
 	 nop
 	 nop
-	sub		%o2, 0x8, %o2
-	EX_LD(LOAD(ldx, %o1, %o5))
-	EX_ST(STORE(stx, %o5, %o1 + %o3))
-	add		%o1, 0x8, %o1
-1:	andcc		%o2, 0x4, %g0
+	sub		%i2, 0x8, %i2
+	EX_LD(LOAD(ldx, %i1, %i5))
+	EX_ST(STORE(stx, %i5, %i1 + %i3))
+	add		%i1, 0x8, %i1
+1:	andcc		%i2, 0x4, %g0
 	be,pt		%XCC, 1f
 	be,pt		%XCC, 1f
 	 nop
 	 nop
-	sub		%o2, 0x4, %o2
-	EX_LD(LOAD(lduw, %o1, %o5))
-	EX_ST(STORE(stw, %o5, %o1 + %o3))
-	add		%o1, 0x4, %o1
-1:	cmp		%o2, 0
+	sub		%i2, 0x4, %i2
+	EX_LD(LOAD(lduw, %i1, %i5))
+	EX_ST(STORE(stw, %i5, %i1 + %i3))
+	add		%i1, 0x4, %i1
+1:	cmp		%i2, 0
 	be,pt		%XCC, 85f
 	be,pt		%XCC, 85f
 	 nop
 	 nop
 	ba,pt		%xcc, 90f
 	ba,pt		%xcc, 90f
@@ -300,71 +355,71 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
 	sub		%g1, 0x8, %g1
 	sub		%g1, 0x8, %g1
 	be,pn		%icc, 2f
 	be,pn		%icc, 2f
 	 sub		%g0, %g1, %g1
 	 sub		%g0, %g1, %g1
-	sub		%o2, %g1, %o2
+	sub		%i2, %g1, %i2
 
 
 1:	subcc		%g1, 1, %g1
 1:	subcc		%g1, 1, %g1
-	EX_LD(LOAD(ldub, %o1, %o5))
-	EX_ST(STORE(stb, %o5, %o1 + %o3))
+	EX_LD(LOAD(ldub, %i1, %i5))
+	EX_ST(STORE(stb, %i5, %i1 + %i3))
 	bgu,pt		%icc, 1b
 	bgu,pt		%icc, 1b
-	 add		%o1, 1, %o1
+	 add		%i1, 1, %i1
 
 
-2:	add		%o1, %o3, %o0
-	andcc		%o1, 0x7, %g1
+2:	add		%i1, %i3, %o0
+	andcc		%i1, 0x7, %g1
 	bne,pt		%icc, 8f
 	bne,pt		%icc, 8f
 	 sll		%g1, 3, %g1
 	 sll		%g1, 3, %g1
 
 
-	cmp		%o2, 16
+	cmp		%i2, 16
 	bgeu,pt		%icc, 72b
 	bgeu,pt		%icc, 72b
 	 nop
 	 nop
 	ba,a,pt		%xcc, 73b
 	ba,a,pt		%xcc, 73b
 
 
-8:	mov		64, %o3
-	andn		%o1, 0x7, %o1
-	EX_LD(LOAD(ldx, %o1, %g2))
-	sub		%o3, %g1, %o3
-	andn		%o2, 0x7, %o4
+8:	mov		64, %i3
+	andn		%i1, 0x7, %i1
+	EX_LD(LOAD(ldx, %i1, %g2))
+	sub		%i3, %g1, %i3
+	andn		%i2, 0x7, %i4
 	sllx		%g2, %g1, %g2
 	sllx		%g2, %g1, %g2
-1:	add		%o1, 0x8, %o1
-	EX_LD(LOAD(ldx, %o1, %g3))
-	subcc		%o4, 0x8, %o4
-	srlx		%g3, %o3, %o5
-	or		%o5, %g2, %o5
-	EX_ST(STORE(stx, %o5, %o0))
+1:	add		%i1, 0x8, %i1
+	EX_LD(LOAD(ldx, %i1, %g3))
+	subcc		%i4, 0x8, %i4
+	srlx		%g3, %i3, %i5
+	or		%i5, %g2, %i5
+	EX_ST(STORE(stx, %i5, %o0))
 	add		%o0, 0x8, %o0
 	add		%o0, 0x8, %o0
 	bgu,pt		%icc, 1b
 	bgu,pt		%icc, 1b
 	 sllx		%g3, %g1, %g2
 	 sllx		%g3, %g1, %g2
 
 
 	srl		%g1, 3, %g1
 	srl		%g1, 3, %g1
-	andcc		%o2, 0x7, %o2
+	andcc		%i2, 0x7, %i2
 	be,pn		%icc, 85f
 	be,pn		%icc, 85f
-	 add		%o1, %g1, %o1
+	 add		%i1, %g1, %i1
 	ba,pt		%xcc, 90f
 	ba,pt		%xcc, 90f
-	 sub		%o0, %o1, %o3
+	 sub		%o0, %i1, %i3
 
 
 	.align		64
 	.align		64
 80: /* 0 < len <= 16 */
 80: /* 0 < len <= 16 */
-	andcc		%o3, 0x3, %g0
+	andcc		%i3, 0x3, %g0
 	bne,pn		%XCC, 90f
 	bne,pn		%XCC, 90f
-	 sub		%o0, %o1, %o3
+	 sub		%o0, %i1, %i3
 
 
 1:
 1:
-	subcc		%o2, 4, %o2
-	EX_LD(LOAD(lduw, %o1, %g1))
-	EX_ST(STORE(stw, %g1, %o1 + %o3))
+	subcc		%i2, 4, %i2
+	EX_LD(LOAD(lduw, %i1, %g1))
+	EX_ST(STORE(stw, %g1, %i1 + %i3))
 	bgu,pt		%XCC, 1b
 	bgu,pt		%XCC, 1b
-	 add		%o1, 4, %o1
+	 add		%i1, 4, %i1
 
 
-85:	retl
-	 mov		EX_RETVAL(GLOBAL_SPARE), %o0
+85:	ret
+	 restore	EX_RETVAL(%i0), %g0, %o0
 
 
 	.align		32
 	.align		32
 90:
 90:
-	subcc		%o2, 1, %o2
-	EX_LD(LOAD(ldub, %o1, %g1))
-	EX_ST(STORE(stb, %g1, %o1 + %o3))
+	subcc		%i2, 1, %i2
+	EX_LD(LOAD(ldub, %i1, %g1))
+	EX_ST(STORE(stb, %g1, %i1 + %i3))
 	bgu,pt		%XCC, 90b
 	bgu,pt		%XCC, 90b
-	 add		%o1, 1, %o1
-	retl
-	 mov		EX_RETVAL(GLOBAL_SPARE), %o0
+	 add		%i1, 1, %i1
+	ret
+	 restore	EX_RETVAL(%i0), %g0, %o0
 
 
 	.size		FUNC_NAME, .-FUNC_NAME
 	.size		FUNC_NAME, .-FUNC_NAME

+ 0 - 8
arch/x86_64/Kconfig

@@ -60,14 +60,6 @@ config ZONE_DMA
 	bool
 	bool
 	default y
 	default y
 
 
-config QUICKLIST
-	bool
-	default y
-
-config NR_QUICK
-	int
-	default 2
-
 config ISA
 config ISA
 	bool
 	bool
 
 

+ 15 - 3
arch/x86_64/ia32/ia32entry.S

@@ -38,6 +38,18 @@
 	movq	%rax,R8(%rsp)
 	movq	%rax,R8(%rsp)
 	.endm
 	.endm
 
 
+	.macro LOAD_ARGS32 offset
+	movl \offset(%rsp),%r11d
+	movl \offset+8(%rsp),%r10d
+	movl \offset+16(%rsp),%r9d
+	movl \offset+24(%rsp),%r8d
+	movl \offset+40(%rsp),%ecx
+	movl \offset+48(%rsp),%edx
+	movl \offset+56(%rsp),%esi
+	movl \offset+64(%rsp),%edi
+	movl \offset+72(%rsp),%eax
+	.endm
+	
 	.macro CFI_STARTPROC32 simple
 	.macro CFI_STARTPROC32 simple
 	CFI_STARTPROC	\simple
 	CFI_STARTPROC	\simple
 	CFI_UNDEFINED	r8
 	CFI_UNDEFINED	r8
@@ -152,7 +164,7 @@ sysenter_tracesys:
 	movq	$-ENOSYS,RAX(%rsp)	/* really needed? */
 	movq	$-ENOSYS,RAX(%rsp)	/* really needed? */
 	movq	%rsp,%rdi        /* &pt_regs -> arg1 */
 	movq	%rsp,%rdi        /* &pt_regs -> arg1 */
 	call	syscall_trace_enter
 	call	syscall_trace_enter
-	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
+	LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
 	RESTORE_REST
 	RESTORE_REST
 	movl	%ebp, %ebp
 	movl	%ebp, %ebp
 	/* no need to do an access_ok check here because rbp has been
 	/* no need to do an access_ok check here because rbp has been
@@ -255,7 +267,7 @@ cstar_tracesys:
 	movq $-ENOSYS,RAX(%rsp)	/* really needed? */
 	movq $-ENOSYS,RAX(%rsp)	/* really needed? */
 	movq %rsp,%rdi        /* &pt_regs -> arg1 */
 	movq %rsp,%rdi        /* &pt_regs -> arg1 */
 	call syscall_trace_enter
 	call syscall_trace_enter
-	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
+	LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
 	RESTORE_REST
 	RESTORE_REST
 	movl RSP-ARGOFFSET(%rsp), %r8d
 	movl RSP-ARGOFFSET(%rsp), %r8d
 	/* no need to do an access_ok check here because r8 has been
 	/* no need to do an access_ok check here because r8 has been
@@ -334,7 +346,7 @@ ia32_tracesys:
 	movq $-ENOSYS,RAX(%rsp)	/* really needed? */
 	movq $-ENOSYS,RAX(%rsp)	/* really needed? */
 	movq %rsp,%rdi        /* &pt_regs -> arg1 */
 	movq %rsp,%rdi        /* &pt_regs -> arg1 */
 	call syscall_trace_enter
 	call syscall_trace_enter
-	LOAD_ARGS ARGOFFSET  /* reload args from stack in case ptrace changed it */
+	LOAD_ARGS32 ARGOFFSET  /* reload args from stack in case ptrace changed it */
 	RESTORE_REST
 	RESTORE_REST
 	jmp ia32_do_syscall
 	jmp ia32_do_syscall
 END(ia32_syscall)
 END(ia32_syscall)

+ 13 - 34
arch/x86_64/kernel/acpi/wakeup.S

@@ -81,7 +81,7 @@ wakeup_code:
 	testl	$2, realmode_flags - wakeup_code
 	testl	$2, realmode_flags - wakeup_code
 	jz	1f
 	jz	1f
 	mov	video_mode - wakeup_code, %ax
 	mov	video_mode - wakeup_code, %ax
-	call	mode_seta
+	call	mode_set
 1:
 1:
 
 
  	movw	$0xb800, %ax
  	movw	$0xb800, %ax
@@ -291,52 +291,31 @@ no_longmode:
 #define VIDEO_FIRST_V7 0x0900
 #define VIDEO_FIRST_V7 0x0900
 
 
 # Setting of user mode (AX=mode ID) => CF=success
 # Setting of user mode (AX=mode ID) => CF=success
+
+# For now, we only handle VESA modes (0x0200..0x03ff).  To handle other
+# modes, we should probably compile in the video code from the boot
+# directory.
 .code16
 .code16
-mode_seta:
+mode_set:
 	movw	%ax, %bx
 	movw	%ax, %bx
-#if 0
-	cmpb	$0xff, %ah
-	jz	setalias
-
-	testb	$VIDEO_RECALC>>8, %ah
-	jnz	_setrec
-
-	cmpb	$VIDEO_FIRST_RESOLUTION>>8, %ah
-	jnc	setres
-	
-	cmpb	$VIDEO_FIRST_SPECIAL>>8, %ah
-	jz	setspc
-
-	cmpb	$VIDEO_FIRST_V7>>8, %ah
-	jz	setv7
-#endif
-	
-	cmpb	$VIDEO_FIRST_VESA>>8, %ah
-	jnc	check_vesaa
-#if 0	
-	orb	%ah, %ah
-	jz	setmenu
-#endif
-	
-	decb	%ah
-#	jz	setbios				  Add bios modes later
+	subb	$VIDEO_FIRST_VESA>>8, %bh
+	cmpb	$2, %bh
+	jb	check_vesa
 
 
-setbada:	clc
+setbad:
+	clc
 	ret
 	ret
 
 
-check_vesaa:
-	subb	$VIDEO_FIRST_VESA>>8, %bh
+check_vesa:
 	orw	$0x4000, %bx			# Use linear frame buffer
 	orw	$0x4000, %bx			# Use linear frame buffer
 	movw	$0x4f02, %ax			# VESA BIOS mode set call
 	movw	$0x4f02, %ax			# VESA BIOS mode set call
 	int	$0x10
 	int	$0x10
 	cmpw	$0x004f, %ax			# AL=4f if implemented
 	cmpw	$0x004f, %ax			# AL=4f if implemented
-	jnz	_setbada				# AH=0 if OK
+	jnz	setbad				# AH=0 if OK
 
 
 	stc
 	stc
 	ret
 	ret
 
 
-_setbada: jmp setbada
-
 wakeup_stack_begin:	# Stack grows down
 wakeup_stack_begin:	# Stack grows down
 
 
 .org	0xff0
 .org	0xff0

+ 0 - 1
arch/x86_64/kernel/process.c

@@ -208,7 +208,6 @@ void cpu_idle (void)
 			if (__get_cpu_var(cpu_idle_state))
 			if (__get_cpu_var(cpu_idle_state))
 				__get_cpu_var(cpu_idle_state) = 0;
 				__get_cpu_var(cpu_idle_state) = 0;
 
 
-			check_pgt_cache();
 			rmb();
 			rmb();
 			idle = pm_idle;
 			idle = pm_idle;
 			if (!idle)
 			if (!idle)

+ 0 - 4
arch/x86_64/kernel/ptrace.c

@@ -232,10 +232,6 @@ static int putreg(struct task_struct *child,
 {
 {
 	unsigned long tmp; 
 	unsigned long tmp; 
 	
 	
-	/* Some code in the 64bit emulation may not be 64bit clean.
-	   Don't take any chances. */
-	if (test_tsk_thread_flag(child, TIF_IA32))
-		value &= 0xffffffff;
 	switch (regno) {
 	switch (regno) {
 		case offsetof(struct user_regs_struct,fs):
 		case offsetof(struct user_regs_struct,fs):
 			if (value && (value & 3) != 3)
 			if (value && (value & 3) != 3)

+ 1 - 1
arch/x86_64/kernel/smp.c

@@ -241,7 +241,7 @@ void flush_tlb_mm (struct mm_struct * mm)
 	}
 	}
 	if (!cpus_empty(cpu_mask))
 	if (!cpus_empty(cpu_mask))
 		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
 		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
-	check_pgt_cache();
+
 	preempt_enable();
 	preempt_enable();
 }
 }
 EXPORT_SYMBOL(flush_tlb_mm);
 EXPORT_SYMBOL(flush_tlb_mm);

+ 1 - 1
arch/x86_64/vdso/voffset.h

@@ -1 +1 @@
-#define VDSO_TEXT_OFFSET 0x500
+#define VDSO_TEXT_OFFSET 0x600

+ 10 - 2
crypto/async_tx/async_tx.c

@@ -80,6 +80,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
 {
 {
 	enum dma_status status;
 	enum dma_status status;
 	struct dma_async_tx_descriptor *iter;
 	struct dma_async_tx_descriptor *iter;
+	struct dma_async_tx_descriptor *parent;
 
 
 	if (!tx)
 	if (!tx)
 		return DMA_SUCCESS;
 		return DMA_SUCCESS;
@@ -87,8 +88,15 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
 	/* poll through the dependency chain, return when tx is complete */
 	/* poll through the dependency chain, return when tx is complete */
 	do {
 	do {
 		iter = tx;
 		iter = tx;
-		while (iter->cookie == -EBUSY)
-			iter = iter->parent;
+
+		/* find the root of the unsubmitted dependency chain */
+		while (iter->cookie == -EBUSY) {
+			parent = iter->parent;
+			if (parent && parent->cookie == -EBUSY)
+				iter = iter->parent;
+			else
+				break;
+		}
 
 
 		status = dma_sync_wait(iter->chan, iter->cookie);
 		status = dma_sync_wait(iter->chan, iter->cookie);
 	} while (status == DMA_IN_PROGRESS || (iter != tx));
 	} while (status == DMA_IN_PROGRESS || (iter != tx));

+ 2 - 0
drivers/acpi/processor_core.c

@@ -102,6 +102,8 @@ static struct acpi_driver acpi_processor_driver = {
 		.add = acpi_processor_add,
 		.add = acpi_processor_add,
 		.remove = acpi_processor_remove,
 		.remove = acpi_processor_remove,
 		.start = acpi_processor_start,
 		.start = acpi_processor_start,
+		.suspend = acpi_processor_suspend,
+		.resume = acpi_processor_resume,
 		},
 		},
 };
 };
 
 

+ 18 - 1
drivers/acpi/processor_idle.c

@@ -325,6 +325,23 @@ static void acpi_state_timer_broadcast(struct acpi_processor *pr,
 
 
 #endif
 #endif
 
 
+/*
+ * Suspend / resume control
+ */
+static int acpi_idle_suspend;
+
+int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
+{
+	acpi_idle_suspend = 1;
+	return 0;
+}
+
+int acpi_processor_resume(struct acpi_device * device)
+{
+	acpi_idle_suspend = 0;
+	return 0;
+}
+
 static void acpi_processor_idle(void)
 static void acpi_processor_idle(void)
 {
 {
 	struct acpi_processor *pr = NULL;
 	struct acpi_processor *pr = NULL;
@@ -355,7 +372,7 @@ static void acpi_processor_idle(void)
 	}
 	}
 
 
 	cx = pr->power.state;
 	cx = pr->power.state;
-	if (!cx) {
+	if (!cx || acpi_idle_suspend) {
 		if (pm_idle_save)
 		if (pm_idle_save)
 			pm_idle_save();
 			pm_idle_save();
 		else
 		else

+ 2 - 2
drivers/acpi/sleep/Makefile

@@ -1,5 +1,5 @@
-obj-y					:= poweroff.o wakeup.o
-obj-$(CONFIG_ACPI_SLEEP)		+= main.o
+obj-y					:= wakeup.o
+obj-y					+= main.o
 obj-$(CONFIG_ACPI_SLEEP)		+= proc.o
 obj-$(CONFIG_ACPI_SLEEP)		+= proc.o
 
 
 EXTRA_CFLAGS += $(ACPI_CFLAGS)
 EXTRA_CFLAGS += $(ACPI_CFLAGS)

+ 55 - 6
drivers/acpi/sleep/main.c

@@ -15,13 +15,39 @@
 #include <linux/dmi.h>
 #include <linux/dmi.h>
 #include <linux/device.h>
 #include <linux/device.h>
 #include <linux/suspend.h>
 #include <linux/suspend.h>
+
+#include <asm/io.h>
+
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
 #include <acpi/acpi_drivers.h>
 #include "sleep.h"
 #include "sleep.h"
 
 
 u8 sleep_states[ACPI_S_STATE_COUNT];
 u8 sleep_states[ACPI_S_STATE_COUNT];
 
 
+#ifdef CONFIG_PM_SLEEP
 static u32 acpi_target_sleep_state = ACPI_STATE_S0;
 static u32 acpi_target_sleep_state = ACPI_STATE_S0;
+#endif
+
+int acpi_sleep_prepare(u32 acpi_state)
+{
+#ifdef CONFIG_ACPI_SLEEP
+	/* do we have a wakeup address for S2 and S3? */
+	if (acpi_state == ACPI_STATE_S3) {
+		if (!acpi_wakeup_address) {
+			return -EFAULT;
+		}
+		acpi_set_firmware_waking_vector((acpi_physical_address)
+						virt_to_phys((void *)
+							     acpi_wakeup_address));
+
+	}
+	ACPI_FLUSH_CPU_CACHE();
+	acpi_enable_wakeup_device_prep(acpi_state);
+#endif
+	acpi_gpe_sleep_prepare(acpi_state);
+	acpi_enter_sleep_state_prep(acpi_state);
+	return 0;
+}
 
 
 #ifdef CONFIG_SUSPEND
 #ifdef CONFIG_SUSPEND
 static struct pm_ops acpi_pm_ops;
 static struct pm_ops acpi_pm_ops;
@@ -275,6 +301,7 @@ int acpi_suspend(u32 acpi_state)
 	return -EINVAL;
 	return -EINVAL;
 }
 }
 
 
+#ifdef CONFIG_PM_SLEEP
 /**
 /**
  *	acpi_pm_device_sleep_state - return preferred power state of ACPI device
  *	acpi_pm_device_sleep_state - return preferred power state of ACPI device
  *		in the system sleep state given by %acpi_target_sleep_state
  *		in the system sleep state given by %acpi_target_sleep_state
@@ -349,6 +376,21 @@ int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p)
 		*d_min_p = d_min;
 		*d_min_p = d_min;
 	return d_max;
 	return d_max;
 }
 }
+#endif
+
+static void acpi_power_off_prepare(void)
+{
+	/* Prepare to power off the system */
+	acpi_sleep_prepare(ACPI_STATE_S5);
+}
+
+static void acpi_power_off(void)
+{
+	/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
+	printk("%s called\n", __FUNCTION__);
+	local_irq_disable();
+	acpi_enter_sleep_state(ACPI_STATE_S5);
+}
 
 
 int __init acpi_sleep_init(void)
 int __init acpi_sleep_init(void)
 {
 {
@@ -363,16 +405,17 @@ int __init acpi_sleep_init(void)
 	if (acpi_disabled)
 	if (acpi_disabled)
 		return 0;
 		return 0;
 
 
+	sleep_states[ACPI_STATE_S0] = 1;
+	printk(KERN_INFO PREFIX "(supports S0");
+
 #ifdef CONFIG_SUSPEND
 #ifdef CONFIG_SUSPEND
-	printk(KERN_INFO PREFIX "(supports");
-	for (i = ACPI_STATE_S0; i < ACPI_STATE_S4; i++) {
+	for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
 		status = acpi_get_sleep_type_data(i, &type_a, &type_b);
 		status = acpi_get_sleep_type_data(i, &type_a, &type_b);
 		if (ACPI_SUCCESS(status)) {
 		if (ACPI_SUCCESS(status)) {
 			sleep_states[i] = 1;
 			sleep_states[i] = 1;
 			printk(" S%d", i);
 			printk(" S%d", i);
 		}
 		}
 	}
 	}
-	printk(")\n");
 
 
 	pm_set_ops(&acpi_pm_ops);
 	pm_set_ops(&acpi_pm_ops);
 #endif
 #endif
@@ -382,10 +425,16 @@ int __init acpi_sleep_init(void)
 	if (ACPI_SUCCESS(status)) {
 	if (ACPI_SUCCESS(status)) {
 		hibernation_set_ops(&acpi_hibernation_ops);
 		hibernation_set_ops(&acpi_hibernation_ops);
 		sleep_states[ACPI_STATE_S4] = 1;
 		sleep_states[ACPI_STATE_S4] = 1;
+		printk(" S4");
 	}
 	}
-#else
-	sleep_states[ACPI_STATE_S4] = 0;
 #endif
 #endif
-
+	status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
+	if (ACPI_SUCCESS(status)) {
+		sleep_states[ACPI_STATE_S5] = 1;
+		printk(" S5");
+		pm_power_off_prepare = acpi_power_off_prepare;
+		pm_power_off = acpi_power_off;
+	}
+	printk(")\n");
 	return 0;
 	return 0;
 }
 }

+ 0 - 75
drivers/acpi/sleep/poweroff.c

@@ -1,75 +0,0 @@
-/*
- * poweroff.c - ACPI handler for powering off the system.
- *
- * AKA S5, but it is independent of whether or not the kernel supports
- * any other sleep support in the system.
- *
- * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
- *
- * This file is released under the GPLv2.
- */
-
-#include <linux/pm.h>
-#include <linux/init.h>
-#include <acpi/acpi_bus.h>
-#include <linux/sysdev.h>
-#include <asm/io.h>
-#include "sleep.h"
-
-int acpi_sleep_prepare(u32 acpi_state)
-{
-#ifdef CONFIG_ACPI_SLEEP
-	/* do we have a wakeup address for S2 and S3? */
-	if (acpi_state == ACPI_STATE_S3) {
-		if (!acpi_wakeup_address) {
-			return -EFAULT;
-		}
-		acpi_set_firmware_waking_vector((acpi_physical_address)
-						virt_to_phys((void *)
-							     acpi_wakeup_address));
-
-	}
-	ACPI_FLUSH_CPU_CACHE();
-	acpi_enable_wakeup_device_prep(acpi_state);
-#endif
-	acpi_gpe_sleep_prepare(acpi_state);
-	acpi_enter_sleep_state_prep(acpi_state);
-	return 0;
-}
-
-#ifdef CONFIG_PM
-
-static void acpi_power_off_prepare(void)
-{
-	/* Prepare to power off the system */
-	acpi_sleep_prepare(ACPI_STATE_S5);
-}
-
-static void acpi_power_off(void)
-{
-	/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
-	printk("%s called\n", __FUNCTION__);
-	local_irq_disable();
-	/* Some SMP machines only can poweroff in boot CPU */
-	acpi_enter_sleep_state(ACPI_STATE_S5);
-}
-
-static int acpi_poweroff_init(void)
-{
-	if (!acpi_disabled) {
-		u8 type_a, type_b;
-		acpi_status status;
-
-		status =
-		    acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
-		if (ACPI_SUCCESS(status)) {
-			pm_power_off_prepare = acpi_power_off_prepare;
-			pm_power_off = acpi_power_off;
-		}
-	}
-	return 0;
-}
-
-late_initcall(acpi_poweroff_init);
-
-#endif				/* CONFIG_PM */

+ 1 - 2
drivers/acpi/video.c

@@ -417,7 +417,6 @@ acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
 	arg0.integer.value = level;
 	arg0.integer.value = level;
 	status = acpi_evaluate_object(device->dev->handle, "_BCM", &args, NULL);
 	status = acpi_evaluate_object(device->dev->handle, "_BCM", &args, NULL);
 
 
-	printk(KERN_DEBUG "set_level status: %x\n", status);
 	return status;
 	return status;
 }
 }
 
 
@@ -1754,7 +1753,7 @@ static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
 
 
 static int acpi_video_bus_start_devices(struct acpi_video_bus *video)
 static int acpi_video_bus_start_devices(struct acpi_video_bus *video)
 {
 {
-	return acpi_video_bus_DOS(video, 1, 0);
+	return acpi_video_bus_DOS(video, 0, 0);
 }
 }
 
 
 static int acpi_video_bus_stop_devices(struct acpi_video_bus *video)
 static int acpi_video_bus_stop_devices(struct acpi_video_bus *video)

+ 6 - 4
drivers/ata/ahci.c

@@ -418,10 +418,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
 
 
 	/* ATI */
 	/* ATI */
 	{ PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
 	{ PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
-	{ PCI_VDEVICE(ATI, 0x4390), board_ahci_sb600 }, /* ATI SB700 IDE */
-	{ PCI_VDEVICE(ATI, 0x4391), board_ahci_sb600 }, /* ATI SB700 AHCI */
-	{ PCI_VDEVICE(ATI, 0x4392), board_ahci_sb600 }, /* ATI SB700 nraid5 */
-	{ PCI_VDEVICE(ATI, 0x4393), board_ahci_sb600 }, /* ATI SB700 raid5 */
+	{ PCI_VDEVICE(ATI, 0x4390), board_ahci_sb600 }, /* ATI SB700/800 */
+	{ PCI_VDEVICE(ATI, 0x4391), board_ahci_sb600 }, /* ATI SB700/800 */
+	{ PCI_VDEVICE(ATI, 0x4392), board_ahci_sb600 }, /* ATI SB700/800 */
+	{ PCI_VDEVICE(ATI, 0x4393), board_ahci_sb600 }, /* ATI SB700/800 */
+	{ PCI_VDEVICE(ATI, 0x4394), board_ahci_sb600 }, /* ATI SB700/800 */
+	{ PCI_VDEVICE(ATI, 0x4395), board_ahci_sb600 }, /* ATI SB700/800 */
 
 
 	/* VIA */
 	/* VIA */
 	{ PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
 	{ PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */

+ 7 - 0
drivers/ata/ata_piix.c

@@ -920,6 +920,13 @@ static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev)
 static int piix_broken_suspend(void)
 static int piix_broken_suspend(void)
 {
 {
 	static struct dmi_system_id sysids[] = {
 	static struct dmi_system_id sysids[] = {
+		{
+			.ident = "TECRA M3",
+			.matches = {
+				DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+				DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M3"),
+			},
+		},
 		{
 		{
 			.ident = "TECRA M5",
 			.ident = "TECRA M5",
 			.matches = {
 			.matches = {

+ 4 - 0
drivers/ata/libata-core.c

@@ -3778,6 +3778,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
 	{ "Maxtor 6L250S0",     "BANC1G10",     ATA_HORKAGE_NONCQ },
 	{ "Maxtor 6L250S0",     "BANC1G10",     ATA_HORKAGE_NONCQ },
 	{ "Maxtor 6B200M0",	"BANC1BM0",	ATA_HORKAGE_NONCQ },
 	{ "Maxtor 6B200M0",	"BANC1BM0",	ATA_HORKAGE_NONCQ },
 	{ "Maxtor 6B200M0",	"BANC1B10",	ATA_HORKAGE_NONCQ },
 	{ "Maxtor 6B200M0",	"BANC1B10",	ATA_HORKAGE_NONCQ },
+	{ "Maxtor 7B250S0",	"BANC1B70",	ATA_HORKAGE_NONCQ, },
+	{ "Maxtor 7B300S0",	"BANC1B70",	ATA_HORKAGE_NONCQ },
+	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
 	{ "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI",
 	{ "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI",
 	 ATA_HORKAGE_NONCQ },
 	 ATA_HORKAGE_NONCQ },
 	/* NCQ hard hangs device under heavier load, needs hard power cycle */
 	/* NCQ hard hangs device under heavier load, needs hard power cycle */
@@ -3794,6 +3797,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
 	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
 	{ "FUJITSU MHV2080BH",	"00840028",	ATA_HORKAGE_NONCQ, },
 	{ "FUJITSU MHV2080BH",	"00840028",	ATA_HORKAGE_NONCQ, },
 	{ "ST9160821AS",	"3.CLF",	ATA_HORKAGE_NONCQ, },
 	{ "ST9160821AS",	"3.CLF",	ATA_HORKAGE_NONCQ, },
+	{ "ST3160812AS",	"3.AD",		ATA_HORKAGE_NONCQ, },
 	{ "SAMSUNG HD401LJ",	"ZZ100-15",	ATA_HORKAGE_NONCQ, },
 	{ "SAMSUNG HD401LJ",	"ZZ100-15",	ATA_HORKAGE_NONCQ, },
 
 
 	/* devices which puke on READ_NATIVE_MAX */
 	/* devices which puke on READ_NATIVE_MAX */

+ 4 - 1
drivers/ata/libata-sff.c

@@ -297,7 +297,7 @@ void ata_bmdma_start (struct ata_queued_cmd *qc)
 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
 	dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
 	iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
 
 
-	/* Strictly, one may wish to issue a readb() here, to
+	/* Strictly, one may wish to issue an ioread8() here, to
 	 * flush the mmio write.  However, control also passes
 	 * flush the mmio write.  However, control also passes
 	 * to the hardware at this point, and it will interrupt
 	 * to the hardware at this point, and it will interrupt
 	 * us when we are to resume control.  So, in effect,
 	 * us when we are to resume control.  So, in effect,
@@ -307,6 +307,9 @@ void ata_bmdma_start (struct ata_queued_cmd *qc)
 	 * is expected, so I think it is best to not add a readb()
 	 * is expected, so I think it is best to not add a readb()
 	 * without first all the MMIO ATA cards/mobos.
 	 * without first all the MMIO ATA cards/mobos.
 	 * Or maybe I'm just being paranoid.
 	 * Or maybe I'm just being paranoid.
+	 *
+	 * FIXME: The posting of this write means I/O starts are
+	 * unneccessarily delayed for MMIO
 	 */
 	 */
 }
 }
 
 

+ 2 - 1
drivers/ata/pata_sis.c

@@ -375,8 +375,9 @@ static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev)
 	int drive_pci = sis_old_port_base(adev);
 	int drive_pci = sis_old_port_base(adev);
 	u16 timing;
 	u16 timing;
 
 
+	/* MWDMA 0-2 and UDMA 0-5 */
 	const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
 	const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
-	const u16 udma_bits[]  = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000};
+	const u16 udma_bits[]  = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000, 0x8000 };
 
 
 	pci_read_config_word(pdev, drive_pci, &timing);
 	pci_read_config_word(pdev, drive_pci, &timing);
 
 

+ 12 - 4
drivers/ata/sata_sil24.c

@@ -888,6 +888,16 @@ static inline void sil24_host_intr(struct ata_port *ap)
 	u32 slot_stat, qc_active;
 	u32 slot_stat, qc_active;
 	int rc;
 	int rc;
 
 
+	/* If PCIX_IRQ_WOC, there's an inherent race window between
+	 * clearing IRQ pending status and reading PORT_SLOT_STAT
+	 * which may cause spurious interrupts afterwards.  This is
+	 * unavoidable and much better than losing interrupts which
+	 * happens if IRQ pending is cleared after reading
+	 * PORT_SLOT_STAT.
+	 */
+	if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
+		writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
+
 	slot_stat = readl(port + PORT_SLOT_STAT);
 	slot_stat = readl(port + PORT_SLOT_STAT);
 
 
 	if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
 	if (unlikely(slot_stat & HOST_SSTAT_ATTN)) {
@@ -895,9 +905,6 @@ static inline void sil24_host_intr(struct ata_port *ap)
 		return;
 		return;
 	}
 	}
 
 
-	if (ap->flags & SIL24_FLAG_PCIX_IRQ_WOC)
-		writel(PORT_IRQ_COMPLETE, port + PORT_IRQ_STAT);
-
 	qc_active = slot_stat & ~HOST_SSTAT_ATTN;
 	qc_active = slot_stat & ~HOST_SSTAT_ATTN;
 	rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc);
 	rc = ata_qc_complete_multiple(ap, qc_active, sil24_finish_qc);
 	if (rc > 0)
 	if (rc > 0)
@@ -910,7 +917,8 @@ static inline void sil24_host_intr(struct ata_port *ap)
 		return;
 		return;
 	}
 	}
 
 
-	if (ata_ratelimit())
+	/* spurious interrupts are expected if PCIX_IRQ_WOC */
+	if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit())
 		ata_port_printk(ap, KERN_INFO, "spurious interrupt "
 		ata_port_printk(ap, KERN_INFO, "spurious interrupt "
 			"(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
 			"(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
 			slot_stat, ap->active_tag, ap->sactive);
 			slot_stat, ap->active_tag, ap->sactive);

+ 1 - 0
drivers/base/core.c

@@ -284,6 +284,7 @@ static ssize_t show_uevent(struct device *dev, struct device_attribute *attr,
 
 
 	/* let the kset specific function add its keys */
 	/* let the kset specific function add its keys */
 	pos = data;
 	pos = data;
+	memset(envp, 0, sizeof(envp));
 	retval = kset->uevent_ops->uevent(kset, &dev->kobj,
 	retval = kset->uevent_ops->uevent(kset, &dev->kobj,
 					  envp, ARRAY_SIZE(envp),
 					  envp, ARRAY_SIZE(envp),
 					  pos, PAGE_SIZE);
 					  pos, PAGE_SIZE);

+ 4 - 0
drivers/cdrom/cdrom.c

@@ -1032,6 +1032,10 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
 	check_disk_change(ip->i_bdev);
 	check_disk_change(ip->i_bdev);
 	return 0;
 	return 0;
 err_release:
 err_release:
+	if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
+		cdi->ops->lock_door(cdi, 0);
+		cdinfo(CD_OPEN, "door unlocked.\n");
+	}
 	cdi->ops->release(cdi);
 	cdi->ops->release(cdi);
 err:
 err:
 	cdi->use_count--;
 	cdi->use_count--;

+ 6 - 0
drivers/char/drm/i915_drv.h

@@ -210,6 +210,12 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 #define I915REG_INT_MASK_R 	0x020a8
 #define I915REG_INT_MASK_R 	0x020a8
 #define I915REG_INT_ENABLE_R	0x020a0
 #define I915REG_INT_ENABLE_R	0x020a0
 
 
+#define I915REG_PIPEASTAT	0x70024
+#define I915REG_PIPEBSTAT	0x71024
+
+#define I915_VBLANK_INTERRUPT_ENABLE	(1UL<<17)
+#define I915_VBLANK_CLEAR		(1UL<<1)
+
 #define SRX_INDEX		0x3c4
 #define SRX_INDEX		0x3c4
 #define SRX_DATA		0x3c5
 #define SRX_DATA		0x3c5
 #define SR01			1
 #define SR01			1

+ 12 - 0
drivers/char/drm/i915_irq.c

@@ -214,6 +214,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 	struct drm_device *dev = (struct drm_device *) arg;
 	struct drm_device *dev = (struct drm_device *) arg;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	u16 temp;
 	u16 temp;
+	u32 pipea_stats, pipeb_stats;
+
+	pipea_stats = I915_READ(I915REG_PIPEASTAT);
+	pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
 
 
 	temp = I915_READ16(I915REG_INT_IDENTITY_R);
 	temp = I915_READ16(I915REG_INT_IDENTITY_R);
 
 
@@ -225,6 +229,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 		return IRQ_NONE;
 		return IRQ_NONE;
 
 
 	I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
 	I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
+	(void) I915_READ16(I915REG_INT_IDENTITY_R);
+	DRM_READMEMORYBARRIER();
 
 
 	dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
 	dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
 
 
@@ -252,6 +258,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 
 
 		if (dev_priv->swaps_pending > 0)
 		if (dev_priv->swaps_pending > 0)
 			drm_locked_tasklet(dev, i915_vblank_tasklet);
 			drm_locked_tasklet(dev, i915_vblank_tasklet);
+		I915_WRITE(I915REG_PIPEASTAT,
+			pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
+			I915_VBLANK_CLEAR);
+		I915_WRITE(I915REG_PIPEBSTAT,
+			pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
+			I915_VBLANK_CLEAR);
 	}
 	}
 
 
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;

+ 6 - 3
drivers/char/hpet.c

@@ -62,6 +62,8 @@
 
 
 static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
 static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
 
 
+/* This clocksource driver currently only works on ia64 */
+#ifdef CONFIG_IA64
 static void __iomem *hpet_mctr;
 static void __iomem *hpet_mctr;
 
 
 static cycle_t read_hpet(void)
 static cycle_t read_hpet(void)
@@ -79,6 +81,7 @@ static struct clocksource clocksource_hpet = {
         .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
         .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 };
 static struct clocksource *hpet_clocksource;
 static struct clocksource *hpet_clocksource;
+#endif
 
 
 /* A lock for concurrent access by app and isr hpet activity. */
 /* A lock for concurrent access by app and isr hpet activity. */
 static DEFINE_SPINLOCK(hpet_lock);
 static DEFINE_SPINLOCK(hpet_lock);
@@ -943,14 +946,14 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
 			printk(KERN_DEBUG "%s: 0x%lx is busy\n",
 			printk(KERN_DEBUG "%s: 0x%lx is busy\n",
 				__FUNCTION__, hdp->hd_phys_address);
 				__FUNCTION__, hdp->hd_phys_address);
 			iounmap(hdp->hd_address);
 			iounmap(hdp->hd_address);
-			return -EBUSY;
+			return AE_ALREADY_EXISTS;
 		}
 		}
 	} else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
 	} else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
 		struct acpi_resource_fixed_memory32 *fixmem32;
 		struct acpi_resource_fixed_memory32 *fixmem32;
 
 
 		fixmem32 = &res->data.fixed_memory32;
 		fixmem32 = &res->data.fixed_memory32;
 		if (!fixmem32)
 		if (!fixmem32)
-			return -EINVAL;
+			return AE_NO_MEMORY;
 
 
 		hdp->hd_phys_address = fixmem32->address;
 		hdp->hd_phys_address = fixmem32->address;
 		hdp->hd_address = ioremap(fixmem32->address,
 		hdp->hd_address = ioremap(fixmem32->address,
@@ -960,7 +963,7 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
 			printk(KERN_DEBUG "%s: 0x%lx is busy\n",
 			printk(KERN_DEBUG "%s: 0x%lx is busy\n",
 				__FUNCTION__, hdp->hd_phys_address);
 				__FUNCTION__, hdp->hd_phys_address);
 			iounmap(hdp->hd_address);
 			iounmap(hdp->hd_address);
-			return -EBUSY;
+			return AE_ALREADY_EXISTS;
 		}
 		}
 	} else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) {
 	} else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) {
 		struct acpi_resource_extended_irq *irqp;
 		struct acpi_resource_extended_irq *irqp;

+ 8 - 18
drivers/char/mspec.c

@@ -155,23 +155,22 @@ mspec_open(struct vm_area_struct *vma)
  * mspec_close
  * mspec_close
  *
  *
  * Called when unmapping a device mapping. Frees all mspec pages
  * Called when unmapping a device mapping. Frees all mspec pages
- * belonging to the vma.
+ * belonging to all the vma's sharing this vma_data structure.
  */
  */
 static void
 static void
 mspec_close(struct vm_area_struct *vma)
 mspec_close(struct vm_area_struct *vma)
 {
 {
 	struct vma_data *vdata;
 	struct vma_data *vdata;
-	int index, last_index, result;
+	int index, last_index;
 	unsigned long my_page;
 	unsigned long my_page;
 
 
 	vdata = vma->vm_private_data;
 	vdata = vma->vm_private_data;
 
 
-	BUG_ON(vma->vm_start < vdata->vm_start || vma->vm_end > vdata->vm_end);
+	if (!atomic_dec_and_test(&vdata->refcnt))
+		return;
 
 
-	spin_lock(&vdata->lock);
-	index = (vma->vm_start - vdata->vm_start) >> PAGE_SHIFT;
-	last_index = (vma->vm_end - vdata->vm_start) >> PAGE_SHIFT;
-	for (; index < last_index; index++) {
+	last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
+	for (index = 0; index < last_index; index++) {
 		if (vdata->maddr[index] == 0)
 		if (vdata->maddr[index] == 0)
 			continue;
 			continue;
 		/*
 		/*
@@ -180,20 +179,12 @@ mspec_close(struct vm_area_struct *vma)
 		 */
 		 */
 		my_page = vdata->maddr[index];
 		my_page = vdata->maddr[index];
 		vdata->maddr[index] = 0;
 		vdata->maddr[index] = 0;
-		spin_unlock(&vdata->lock);
-		result = mspec_zero_block(my_page, PAGE_SIZE);
-		if (!result)
+		if (!mspec_zero_block(my_page, PAGE_SIZE))
 			uncached_free_page(my_page);
 			uncached_free_page(my_page);
 		else
 		else
 			printk(KERN_WARNING "mspec_close(): "
 			printk(KERN_WARNING "mspec_close(): "
-			       "failed to zero page %i\n",
-			       result);
-		spin_lock(&vdata->lock);
+			       "failed to zero page %ld\n", my_page);
 	}
 	}
-	spin_unlock(&vdata->lock);
-
-	if (!atomic_dec_and_test(&vdata->refcnt))
-		return;
 
 
 	if (vdata->flags & VMD_VMALLOCED)
 	if (vdata->flags & VMD_VMALLOCED)
 		vfree(vdata);
 		vfree(vdata);
@@ -201,7 +192,6 @@ mspec_close(struct vm_area_struct *vma)
 		kfree(vdata);
 		kfree(vdata);
 }
 }
 
 
-
 /*
 /*
  * mspec_nopfn
  * mspec_nopfn
  *
  *

+ 6 - 4
drivers/char/random.c

@@ -1550,11 +1550,13 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
 	 *	As close as possible to RFC 793, which
 	 *	As close as possible to RFC 793, which
 	 *	suggests using a 250 kHz clock.
 	 *	suggests using a 250 kHz clock.
 	 *	Further reading shows this assumes 2 Mb/s networks.
 	 *	Further reading shows this assumes 2 Mb/s networks.
-	 *	For 10 Gb/s Ethernet, a 1 GHz clock is appropriate.
-	 *	That's funny, Linux has one built in!  Use it!
-	 *	(Networks are faster now - should this be increased?)
+	 *	For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
+	 *	For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
+	 *	we also need to limit the resolution so that the u32 seq
+	 *	overlaps less than one time per MSL (2 minutes).
+	 *	Choosing a clock of 64 ns period is OK. (period of 274 s)
 	 */
 	 */
-	seq += ktime_get_real().tv64;
+	seq += ktime_get_real().tv64 >> 6;
 #if 0
 #if 0
 	printk("init_seq(%lx, %lx, %d, %d) = %d\n",
 	printk("init_seq(%lx, %lx, %d, %d) = %d\n",
 	       saddr, daddr, sport, dport, seq);
 	       saddr, daddr, sport, dport, seq);

+ 10 - 5
drivers/char/vt_ioctl.c

@@ -770,6 +770,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
 		/*
 		/*
 		 * Switching-from response
 		 * Switching-from response
 		 */
 		 */
+		acquire_console_sem();
 		if (vc->vt_newvt >= 0) {
 		if (vc->vt_newvt >= 0) {
 			if (arg == 0)
 			if (arg == 0)
 				/*
 				/*
@@ -784,7 +785,6 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
 				 * complete the switch.
 				 * complete the switch.
 				 */
 				 */
 				int newvt;
 				int newvt;
-				acquire_console_sem();
 				newvt = vc->vt_newvt;
 				newvt = vc->vt_newvt;
 				vc->vt_newvt = -1;
 				vc->vt_newvt = -1;
 				i = vc_allocate(newvt);
 				i = vc_allocate(newvt);
@@ -798,7 +798,6 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
 				 * other console switches..
 				 * other console switches..
 				 */
 				 */
 				complete_change_console(vc_cons[newvt].d);
 				complete_change_console(vc_cons[newvt].d);
-				release_console_sem();
 			}
 			}
 		}
 		}
 
 
@@ -810,9 +809,12 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
 			/*
 			/*
 			 * If it's just an ACK, ignore it
 			 * If it's just an ACK, ignore it
 			 */
 			 */
-			if (arg != VT_ACKACQ)
+			if (arg != VT_ACKACQ) {
+				release_console_sem();
 				return -EINVAL;
 				return -EINVAL;
+			}
 		}
 		}
+		release_console_sem();
 
 
 		return 0;
 		return 0;
 
 
@@ -1208,15 +1210,18 @@ void change_console(struct vc_data *new_vc)
 		/*
 		/*
 		 * Send the signal as privileged - kill_pid() will
 		 * Send the signal as privileged - kill_pid() will
 		 * tell us if the process has gone or something else
 		 * tell us if the process has gone or something else
-		 * is awry
+		 * is awry.
+		 *
+		 * We need to set vt_newvt *before* sending the signal or we
+		 * have a race.
 		 */
 		 */
+		vc->vt_newvt = new_vc->vc_num;
 		if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) {
 		if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) {
 			/*
 			/*
 			 * It worked. Mark the vt to switch to and
 			 * It worked. Mark the vt to switch to and
 			 * return. The process needs to send us a
 			 * return. The process needs to send us a
 			 * VT_RELDISP ioctl to complete the switch.
 			 * VT_RELDISP ioctl to complete the switch.
 			 */
 			 */
-			vc->vt_newvt = new_vc->vc_num;
 			return;
 			return;
 		}
 		}
 
 

+ 1 - 1
drivers/ieee1394/ieee1394_core.c

@@ -1273,7 +1273,7 @@ static void __exit ieee1394_cleanup(void)
 	unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
 	unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
 }
 }
 
 
-fs_initcall(ieee1394_init); /* same as ohci1394 */
+module_init(ieee1394_init);
 module_exit(ieee1394_cleanup);
 module_exit(ieee1394_cleanup);
 
 
 /* Exported symbols */
 /* Exported symbols */

+ 1 - 3
drivers/ieee1394/ohci1394.c

@@ -3537,7 +3537,5 @@ static int __init ohci1394_init(void)
 	return pci_register_driver(&ohci1394_pci_driver);
 	return pci_register_driver(&ohci1394_pci_driver);
 }
 }
 
 
-/* Register before most other device drivers.
- * Useful for remote debugging via physical DMA, e.g. using firescope. */
-fs_initcall(ohci1394_init);
+module_init(ohci1394_init);
 module_exit(ohci1394_cleanup);
 module_exit(ohci1394_cleanup);

+ 49 - 13
drivers/infiniband/hw/mlx4/qp.c

@@ -1211,12 +1211,42 @@ static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg,
 	dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
 	dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
 }
 }
 
 
-static void set_data_seg(struct mlx4_wqe_data_seg *dseg,
-			 struct ib_sge *sg)
+static void set_mlx_icrc_seg(void *dseg)
+{
+	u32 *t = dseg;
+	struct mlx4_wqe_inline_seg *iseg = dseg;
+
+	t[1] = 0;
+
+	/*
+	 * Need a barrier here before writing the byte_count field to
+	 * make sure that all the data is visible before the
+	 * byte_count field is set.  Otherwise, if the segment begins
+	 * a new cacheline, the HCA prefetcher could grab the 64-byte
+	 * chunk and get a valid (!= * 0xffffffff) byte count but
+	 * stale data, and end up sending the wrong data.
+	 */
+	wmb();
+
+	iseg->byte_count = cpu_to_be32((1 << 31) | 4);
+}
+
+static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
 {
 {
-	dseg->byte_count = cpu_to_be32(sg->length);
 	dseg->lkey       = cpu_to_be32(sg->lkey);
 	dseg->lkey       = cpu_to_be32(sg->lkey);
 	dseg->addr       = cpu_to_be64(sg->addr);
 	dseg->addr       = cpu_to_be64(sg->addr);
+
+	/*
+	 * Need a barrier here before writing the byte_count field to
+	 * make sure that all the data is visible before the
+	 * byte_count field is set.  Otherwise, if the segment begins
+	 * a new cacheline, the HCA prefetcher could grab the 64-byte
+	 * chunk and get a valid (!= * 0xffffffff) byte count but
+	 * stale data, and end up sending the wrong data.
+	 */
+	wmb();
+
+	dseg->byte_count = cpu_to_be32(sg->length);
 }
 }
 
 
 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
@@ -1225,6 +1255,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
 	struct mlx4_ib_qp *qp = to_mqp(ibqp);
 	void *wqe;
 	void *wqe;
 	struct mlx4_wqe_ctrl_seg *ctrl;
 	struct mlx4_wqe_ctrl_seg *ctrl;
+	struct mlx4_wqe_data_seg *dseg;
 	unsigned long flags;
 	unsigned long flags;
 	int nreq;
 	int nreq;
 	int err = 0;
 	int err = 0;
@@ -1324,22 +1355,27 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 			break;
 			break;
 		}
 		}
 
 
-		for (i = 0; i < wr->num_sge; ++i) {
-			set_data_seg(wqe, wr->sg_list + i);
+		/*
+		 * Write data segments in reverse order, so as to
+		 * overwrite cacheline stamp last within each
+		 * cacheline.  This avoids issues with WQE
+		 * prefetching.
+		 */
 
 
-			wqe  += sizeof (struct mlx4_wqe_data_seg);
-			size += sizeof (struct mlx4_wqe_data_seg) / 16;
-		}
+		dseg = wqe;
+		dseg += wr->num_sge - 1;
+		size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16);
 
 
 		/* Add one more inline data segment for ICRC for MLX sends */
 		/* Add one more inline data segment for ICRC for MLX sends */
-		if (qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI) {
-			((struct mlx4_wqe_inline_seg *) wqe)->byte_count =
-				cpu_to_be32((1 << 31) | 4);
-			((u32 *) wqe)[1] = 0;
-			wqe  += sizeof (struct mlx4_wqe_data_seg);
+		if (unlikely(qp->ibqp.qp_type == IB_QPT_SMI ||
+			     qp->ibqp.qp_type == IB_QPT_GSI)) {
+			set_mlx_icrc_seg(dseg + 1);
 			size += sizeof (struct mlx4_wqe_data_seg) / 16;
 			size += sizeof (struct mlx4_wqe_data_seg) / 16;
 		}
 		}
 
 
+		for (i = wr->num_sge - 1; i >= 0; --i, --dseg)
+			set_data_seg(dseg, wr->sg_list + i);
+
 		ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
 		ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
 				    MLX4_WQE_CTRL_FENCE : 0) | size;
 				    MLX4_WQE_CTRL_FENCE : 0) | size;
 
 

+ 1 - 1
drivers/input/joystick/Kconfig

@@ -277,7 +277,7 @@ config JOYSTICK_XPAD_FF
 
 
 config JOYSTICK_XPAD_LEDS
 config JOYSTICK_XPAD_LEDS
 	bool "LED Support for Xbox360 controller 'BigX' LED"
 	bool "LED Support for Xbox360 controller 'BigX' LED"
-	depends on LEDS_CLASS && JOYSTICK_XPAD
+	depends on JOYSTICK_XPAD && (LEDS_CLASS=y || LEDS_CLASS=JOYSTICK_XPAD)
 	---help---
 	---help---
 	  This option enables support for the LED which surrounds the Big X on
 	  This option enables support for the LED which surrounds the Big X on
 	  XBox 360 controller.
 	  XBox 360 controller.

+ 4 - 2
drivers/input/mouse/appletouch.c

@@ -328,6 +328,7 @@ static void atp_complete(struct urb* urb)
 {
 {
 	int x, y, x_z, y_z, x_f, y_f;
 	int x, y, x_z, y_z, x_f, y_f;
 	int retval, i, j;
 	int retval, i, j;
+	int key;
 	struct atp *dev = urb->context;
 	struct atp *dev = urb->context;
 
 
 	switch (urb->status) {
 	switch (urb->status) {
@@ -468,6 +469,7 @@ static void atp_complete(struct urb* urb)
 			      ATP_XFACT, &x_z, &x_f);
 			      ATP_XFACT, &x_z, &x_f);
 	y = atp_calculate_abs(dev->xy_acc + ATP_XSENSORS, ATP_YSENSORS,
 	y = atp_calculate_abs(dev->xy_acc + ATP_XSENSORS, ATP_YSENSORS,
 			      ATP_YFACT, &y_z, &y_f);
 			      ATP_YFACT, &y_z, &y_f);
+	key = dev->data[dev->datalen - 1] & 1;
 
 
 	if (x && y) {
 	if (x && y) {
 		if (dev->x_old != -1) {
 		if (dev->x_old != -1) {
@@ -505,7 +507,7 @@ static void atp_complete(struct urb* urb)
 		   the first touch unless reinitialised. Do so if it's been
 		   the first touch unless reinitialised. Do so if it's been
 		   idle for a while in order to avoid waking the kernel up
 		   idle for a while in order to avoid waking the kernel up
 		   several hundred times a second */
 		   several hundred times a second */
-		if (atp_is_geyser_3(dev)) {
+		if (!key && atp_is_geyser_3(dev)) {
 			dev->idlecount++;
 			dev->idlecount++;
 			if (dev->idlecount == 10) {
 			if (dev->idlecount == 10) {
 				dev->valid = 0;
 				dev->valid = 0;
@@ -514,7 +516,7 @@ static void atp_complete(struct urb* urb)
 		}
 		}
 	}
 	}
 
 
-	input_report_key(dev->input, BTN_LEFT, dev->data[dev->datalen - 1] & 1);
+	input_report_key(dev->input, BTN_LEFT, key);
 	input_sync(dev->input);
 	input_sync(dev->input);
 
 
 exit:
 exit:

+ 2 - 1
drivers/kvm/Kconfig

@@ -6,7 +6,8 @@ menuconfig VIRTUALIZATION
 	depends on X86
 	depends on X86
 	default y
 	default y
 	---help---
 	---help---
-	  Say Y here to get to see options for virtualization guest drivers.
+	  Say Y here to get to see options for using your Linux host to run other
+	  operating systems inside virtual machines (guests).
 	  This option alone does not add any kernel code.
 	  This option alone does not add any kernel code.
 
 
 	  If you say N, all options in this submenu will be skipped and disabled.
 	  If you say N, all options in this submenu will be skipped and disabled.

+ 3 - 3
drivers/lguest/lguest_asm.S

@@ -22,8 +22,9 @@
 	jmp lguest_init
 	jmp lguest_init
 
 
 /*G:055 We create a macro which puts the assembler code between lgstart_ and
 /*G:055 We create a macro which puts the assembler code between lgstart_ and
- * lgend_ markers.  These templates end up in the .init.text section, so they
- * are discarded after boot. */
+ * lgend_ markers.  These templates are put in the .text section: they can't be
+ * discarded after boot as we may need to patch modules, too. */
+.text
 #define LGUEST_PATCH(name, insns...)			\
 #define LGUEST_PATCH(name, insns...)			\
 	lgstart_##name:	insns; lgend_##name:;		\
 	lgstart_##name:	insns; lgend_##name:;		\
 	.globl lgstart_##name; .globl lgend_##name
 	.globl lgstart_##name; .globl lgend_##name
@@ -34,7 +35,6 @@ LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled)
 LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax)
 LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax)
 /*:*/
 /*:*/
 
 
-.text
 /* These demark the EIP range where host should never deliver interrupts. */
 /* These demark the EIP range where host should never deliver interrupts. */
 .global lguest_noirq_start
 .global lguest_noirq_start
 .global lguest_noirq_end
 .global lguest_noirq_end

+ 7 - 10
drivers/md/raid5.c

@@ -514,7 +514,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
 	struct stripe_head *sh = stripe_head_ref;
 	struct stripe_head *sh = stripe_head_ref;
 	struct bio *return_bi = NULL;
 	struct bio *return_bi = NULL;
 	raid5_conf_t *conf = sh->raid_conf;
 	raid5_conf_t *conf = sh->raid_conf;
-	int i, more_to_read = 0;
+	int i;
 
 
 	pr_debug("%s: stripe %llu\n", __FUNCTION__,
 	pr_debug("%s: stripe %llu\n", __FUNCTION__,
 		(unsigned long long)sh->sector);
 		(unsigned long long)sh->sector);
@@ -522,16 +522,14 @@ static void ops_complete_biofill(void *stripe_head_ref)
 	/* clear completed biofills */
 	/* clear completed biofills */
 	for (i = sh->disks; i--; ) {
 	for (i = sh->disks; i--; ) {
 		struct r5dev *dev = &sh->dev[i];
 		struct r5dev *dev = &sh->dev[i];
-		/* check if this stripe has new incoming reads */
-		if (dev->toread)
-			more_to_read++;
 
 
 		/* acknowledge completion of a biofill operation */
 		/* acknowledge completion of a biofill operation */
-		/* and check if we need to reply to a read request
-		*/
-		if (test_bit(R5_Wantfill, &dev->flags) && !dev->toread) {
+		/* and check if we need to reply to a read request,
+		 * new R5_Wantfill requests are held off until
+		 * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)
+		 */
+		if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
 			struct bio *rbi, *rbi2;
 			struct bio *rbi, *rbi2;
-			clear_bit(R5_Wantfill, &dev->flags);
 
 
 			/* The access to dev->read is outside of the
 			/* The access to dev->read is outside of the
 			 * spin_lock_irq(&conf->device_lock), but is protected
 			 * spin_lock_irq(&conf->device_lock), but is protected
@@ -558,8 +556,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
 
 
 	return_io(return_bi);
 	return_io(return_bi);
 
 
-	if (more_to_read)
-		set_bit(STRIPE_HANDLE, &sh->state);
+	set_bit(STRIPE_HANDLE, &sh->state);
 	release_stripe(sh);
 	release_stripe(sh);
 }
 }
 
 

+ 4 - 2
drivers/media/video/ivtv/ivtv-fileops.c

@@ -754,9 +754,11 @@ static void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts)
 		ivtv_yuv_close(itv);
 		ivtv_yuv_close(itv);
 	}
 	}
 	if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_YUV)
 	if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_YUV)
-	    itv->output_mode = OUT_NONE;
+		itv->output_mode = OUT_NONE;
+	else if (s->type == IVTV_DEC_STREAM_TYPE_YUV && itv->output_mode == OUT_UDMA_YUV)
+		itv->output_mode = OUT_NONE;
 	else if (s->type == IVTV_DEC_STREAM_TYPE_MPG && itv->output_mode == OUT_MPG)
 	else if (s->type == IVTV_DEC_STREAM_TYPE_MPG && itv->output_mode == OUT_MPG)
-	    itv->output_mode = OUT_NONE;
+		itv->output_mode = OUT_NONE;
 
 
 	itv->speed = 0;
 	itv->speed = 0;
 	clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags);
 	clear_bit(IVTV_F_I_DEC_PAUSED, &itv->i_flags);

+ 2 - 3
drivers/media/video/usbvision/usbvision-video.c

@@ -1387,7 +1387,6 @@ static const struct file_operations usbvision_fops = {
 	.ioctl		= video_ioctl2,
 	.ioctl		= video_ioctl2,
 	.llseek		= no_llseek,
 	.llseek		= no_llseek,
 /* 	.poll          = video_poll, */
 /* 	.poll          = video_poll, */
-	.mmap	       = usbvision_v4l2_mmap,
 	.compat_ioctl  = v4l_compat_ioctl32,
 	.compat_ioctl  = v4l_compat_ioctl32,
 };
 };
 static struct video_device usbvision_video_template = {
 static struct video_device usbvision_video_template = {
@@ -1413,7 +1412,7 @@ static struct video_device usbvision_video_template = {
 	.vidioc_s_input       = vidioc_s_input,
 	.vidioc_s_input       = vidioc_s_input,
 	.vidioc_queryctrl     = vidioc_queryctrl,
 	.vidioc_queryctrl     = vidioc_queryctrl,
 	.vidioc_g_audio       = vidioc_g_audio,
 	.vidioc_g_audio       = vidioc_g_audio,
-	.vidioc_g_audio       = vidioc_s_audio,
+	.vidioc_s_audio       = vidioc_s_audio,
 	.vidioc_g_ctrl        = vidioc_g_ctrl,
 	.vidioc_g_ctrl        = vidioc_g_ctrl,
 	.vidioc_s_ctrl        = vidioc_s_ctrl,
 	.vidioc_s_ctrl        = vidioc_s_ctrl,
 	.vidioc_streamon      = vidioc_streamon,
 	.vidioc_streamon      = vidioc_streamon,
@@ -1459,7 +1458,7 @@ static struct video_device usbvision_radio_template=
 	.vidioc_s_input       = vidioc_s_input,
 	.vidioc_s_input       = vidioc_s_input,
 	.vidioc_queryctrl     = vidioc_queryctrl,
 	.vidioc_queryctrl     = vidioc_queryctrl,
 	.vidioc_g_audio       = vidioc_g_audio,
 	.vidioc_g_audio       = vidioc_g_audio,
-	.vidioc_g_audio       = vidioc_s_audio,
+	.vidioc_s_audio       = vidioc_s_audio,
 	.vidioc_g_ctrl        = vidioc_g_ctrl,
 	.vidioc_g_ctrl        = vidioc_g_ctrl,
 	.vidioc_s_ctrl        = vidioc_s_ctrl,
 	.vidioc_s_ctrl        = vidioc_s_ctrl,
 	.vidioc_g_tuner       = vidioc_g_tuner,
 	.vidioc_g_tuner       = vidioc_g_tuner,

+ 4 - 3
drivers/net/bnx2.c

@@ -54,8 +54,8 @@
 
 
 #define DRV_MODULE_NAME		"bnx2"
 #define DRV_MODULE_NAME		"bnx2"
 #define PFX DRV_MODULE_NAME	": "
 #define PFX DRV_MODULE_NAME	": "
-#define DRV_MODULE_VERSION	"1.6.4"
-#define DRV_MODULE_RELDATE	"August 3, 2007"
+#define DRV_MODULE_VERSION	"1.6.5"
+#define DRV_MODULE_RELDATE	"September 20, 2007"
 
 
 #define RUN_AT(x) (jiffies + (x))
 #define RUN_AT(x) (jiffies + (x))
 
 
@@ -6727,7 +6727,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
 	} else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
 	} else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
 		   CHIP_NUM(bp) == CHIP_NUM_5708)
 		   CHIP_NUM(bp) == CHIP_NUM_5708)
 		bp->phy_flags |= PHY_CRC_FIX_FLAG;
 		bp->phy_flags |= PHY_CRC_FIX_FLAG;
-	else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
+	else if (CHIP_ID(bp) == CHIP_ID_5709_A0 ||
+		 CHIP_ID(bp) == CHIP_ID_5709_A1)
 		bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
 		bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
 
 
 	if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
 	if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||

+ 1 - 0
drivers/net/e1000/e1000_ethtool.c

@@ -1726,6 +1726,7 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol
 	case E1000_DEV_ID_82571EB_QUAD_COPPER:
 	case E1000_DEV_ID_82571EB_QUAD_COPPER:
 	case E1000_DEV_ID_82571EB_QUAD_FIBER:
 	case E1000_DEV_ID_82571EB_QUAD_FIBER:
 	case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
 	case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
+	case E1000_DEV_ID_82571PT_QUAD_COPPER:
 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
 		/* quad port adapters only support WoL on port A */
 		/* quad port adapters only support WoL on port A */
 		if (!adapter->quad_port_a) {
 		if (!adapter->quad_port_a) {

+ 1 - 0
drivers/net/e1000/e1000_hw.c

@@ -387,6 +387,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
 	case E1000_DEV_ID_82571EB_SERDES_DUAL:
 	case E1000_DEV_ID_82571EB_SERDES_DUAL:
 	case E1000_DEV_ID_82571EB_SERDES_QUAD:
 	case E1000_DEV_ID_82571EB_SERDES_QUAD:
 	case E1000_DEV_ID_82571EB_QUAD_COPPER:
 	case E1000_DEV_ID_82571EB_QUAD_COPPER:
+	case E1000_DEV_ID_82571PT_QUAD_COPPER:
 	case E1000_DEV_ID_82571EB_QUAD_FIBER:
 	case E1000_DEV_ID_82571EB_QUAD_FIBER:
 	case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
 	case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
 		hw->mac_type = e1000_82571;
 		hw->mac_type = e1000_82571;

+ 1 - 0
drivers/net/e1000/e1000_hw.h

@@ -475,6 +475,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
 #define E1000_DEV_ID_82571EB_FIBER       0x105F
 #define E1000_DEV_ID_82571EB_FIBER       0x105F
 #define E1000_DEV_ID_82571EB_SERDES      0x1060
 #define E1000_DEV_ID_82571EB_SERDES      0x1060
 #define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
 #define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
+#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5
 #define E1000_DEV_ID_82571EB_QUAD_FIBER  0x10A5
 #define E1000_DEV_ID_82571EB_QUAD_FIBER  0x10A5
 #define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE  0x10BC
 #define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE  0x10BC
 #define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9
 #define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9

+ 2 - 0
drivers/net/e1000/e1000_main.c

@@ -108,6 +108,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
 	INTEL_E1000_ETHERNET_DEVICE(0x10BC),
 	INTEL_E1000_ETHERNET_DEVICE(0x10BC),
 	INTEL_E1000_ETHERNET_DEVICE(0x10C4),
 	INTEL_E1000_ETHERNET_DEVICE(0x10C4),
 	INTEL_E1000_ETHERNET_DEVICE(0x10C5),
 	INTEL_E1000_ETHERNET_DEVICE(0x10C5),
+	INTEL_E1000_ETHERNET_DEVICE(0x10D5),
 	INTEL_E1000_ETHERNET_DEVICE(0x10D9),
 	INTEL_E1000_ETHERNET_DEVICE(0x10D9),
 	INTEL_E1000_ETHERNET_DEVICE(0x10DA),
 	INTEL_E1000_ETHERNET_DEVICE(0x10DA),
 	/* required last entry */
 	/* required last entry */
@@ -1101,6 +1102,7 @@ e1000_probe(struct pci_dev *pdev,
 	case E1000_DEV_ID_82571EB_QUAD_COPPER:
 	case E1000_DEV_ID_82571EB_QUAD_COPPER:
 	case E1000_DEV_ID_82571EB_QUAD_FIBER:
 	case E1000_DEV_ID_82571EB_QUAD_FIBER:
 	case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
 	case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
+	case E1000_DEV_ID_82571PT_QUAD_COPPER:
 		/* if quad port adapter, disable WoL on all but port A */
 		/* if quad port adapter, disable WoL on all but port A */
 		if (global_quad_port_a != 0)
 		if (global_quad_port_a != 0)
 			adapter->eeprom_wol = 0;
 			adapter->eeprom_wol = 0;

+ 1 - 4
drivers/net/mv643xx_eth.c

@@ -534,7 +534,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
 	}
 	}
 
 
 	/* PHY status changed */
 	/* PHY status changed */
-	if (eth_int_cause_ext & ETH_INT_CAUSE_PHY) {
+	if (eth_int_cause_ext & (ETH_INT_CAUSE_PHY | ETH_INT_CAUSE_STATE)) {
 		struct ethtool_cmd cmd;
 		struct ethtool_cmd cmd;
 
 
 		if (mii_link_ok(&mp->mii)) {
 		if (mii_link_ok(&mp->mii)) {
@@ -1357,7 +1357,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
 #endif
 #endif
 
 
 	dev->watchdog_timeo = 2 * HZ;
 	dev->watchdog_timeo = 2 * HZ;
-	dev->tx_queue_len = mp->tx_ring_size;
 	dev->base_addr = 0;
 	dev->base_addr = 0;
 	dev->change_mtu = mv643xx_eth_change_mtu;
 	dev->change_mtu = mv643xx_eth_change_mtu;
 	dev->do_ioctl = mv643xx_eth_do_ioctl;
 	dev->do_ioctl = mv643xx_eth_do_ioctl;
@@ -2768,8 +2767,6 @@ static const struct ethtool_ops mv643xx_ethtool_ops = {
 	.get_stats_count        = mv643xx_get_stats_count,
 	.get_stats_count        = mv643xx_get_stats_count,
 	.get_ethtool_stats      = mv643xx_get_ethtool_stats,
 	.get_ethtool_stats      = mv643xx_get_ethtool_stats,
 	.get_strings            = mv643xx_get_strings,
 	.get_strings            = mv643xx_get_strings,
-	.get_stats_count        = mv643xx_get_stats_count,
-	.get_ethtool_stats      = mv643xx_get_ethtool_stats,
 	.nway_reset		= mv643xx_eth_nway_restart,
 	.nway_reset		= mv643xx_eth_nway_restart,
 };
 };
 
 

+ 3 - 1
drivers/net/mv643xx_eth.h

@@ -64,7 +64,9 @@
 #define ETH_INT_CAUSE_TX_ERROR	(ETH_TX_QUEUES_ENABLED << 8)
 #define ETH_INT_CAUSE_TX_ERROR	(ETH_TX_QUEUES_ENABLED << 8)
 #define ETH_INT_CAUSE_TX	(ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR)
 #define ETH_INT_CAUSE_TX	(ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR)
 #define ETH_INT_CAUSE_PHY	0x00010000
 #define ETH_INT_CAUSE_PHY	0x00010000
-#define ETH_INT_UNMASK_ALL_EXT	(ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY)
+#define ETH_INT_CAUSE_STATE	0x00100000
+#define ETH_INT_UNMASK_ALL_EXT	(ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \
+					ETH_INT_CAUSE_STATE)
 
 
 #define ETH_INT_MASK_ALL	0x00000000
 #define ETH_INT_MASK_ALL	0x00000000
 #define ETH_INT_MASK_ALL_EXT	0x00000000
 #define ETH_INT_MASK_ALL_EXT	0x00000000

+ 3 - 0
drivers/net/myri10ge/myri10ge.c

@@ -3094,9 +3094,12 @@ static void myri10ge_remove(struct pci_dev *pdev)
 }
 }
 
 
 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 	0x0008
 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 	0x0008
+#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9	0x0009
 
 
 static struct pci_device_id myri10ge_pci_tbl[] = {
 static struct pci_device_id myri10ge_pci_tbl[] = {
 	{PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
 	{PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
+	{PCI_DEVICE
+	 (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)},
 	{0},
 	{0},
 };
 };
 
 

+ 1 - 1
drivers/net/pcmcia/3c589_cs.c

@@ -116,7 +116,7 @@ struct el3_private {
     spinlock_t		lock;
     spinlock_t		lock;
 };
 };
 
 
-static const char *if_names[] = { "auto", "10base2", "10baseT", "AUI" };
+static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" };
 
 
 /*====================================================================*/
 /*====================================================================*/
 
 

+ 1 - 0
drivers/net/phy/phy.c

@@ -409,6 +409,7 @@ int phy_mii_ioctl(struct phy_device *phydev,
 
 
 	return 0;
 	return 0;
 }
 }
+EXPORT_SYMBOL(phy_mii_ioctl);
 
 
 /**
 /**
  * phy_start_aneg - start auto-negotiation for this PHY device
  * phy_start_aneg - start auto-negotiation for this PHY device

+ 6 - 8
drivers/net/ppp_mppe.c

@@ -136,7 +136,7 @@ struct ppp_mppe_state {
  * Key Derivation, from RFC 3078, RFC 3079.
  * Key Derivation, from RFC 3078, RFC 3079.
  * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079.
  * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079.
  */
  */
-static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *InterimKey)
+static void get_new_key_from_sha(struct ppp_mppe_state * state)
 {
 {
 	struct hash_desc desc;
 	struct hash_desc desc;
 	struct scatterlist sg[4];
 	struct scatterlist sg[4];
@@ -153,8 +153,6 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *I
 	desc.flags = 0;
 	desc.flags = 0;
 
 
 	crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest);
 	crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest);
-
-	memcpy(InterimKey, state->sha1_digest, state->keylen);
 }
 }
 
 
 /*
 /*
@@ -163,21 +161,21 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state, unsigned char *I
  */
  */
 static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
 static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
 {
 {
-	unsigned char InterimKey[MPPE_MAX_KEY_LEN];
 	struct scatterlist sg_in[1], sg_out[1];
 	struct scatterlist sg_in[1], sg_out[1];
 	struct blkcipher_desc desc = { .tfm = state->arc4 };
 	struct blkcipher_desc desc = { .tfm = state->arc4 };
 
 
-	get_new_key_from_sha(state, InterimKey);
+	get_new_key_from_sha(state);
 	if (!initial_key) {
 	if (!initial_key) {
-		crypto_blkcipher_setkey(state->arc4, InterimKey, state->keylen);
-		setup_sg(sg_in, InterimKey, state->keylen);
+		crypto_blkcipher_setkey(state->arc4, state->sha1_digest,
+					state->keylen);
+		setup_sg(sg_in, state->sha1_digest, state->keylen);
 		setup_sg(sg_out, state->session_key, state->keylen);
 		setup_sg(sg_out, state->session_key, state->keylen);
 		if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
 		if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
 					     state->keylen) != 0) {
 					     state->keylen) != 0) {
     		    printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
     		    printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
 		}
 		}
 	} else {
 	} else {
-		memcpy(state->session_key, InterimKey, state->keylen);
+		memcpy(state->session_key, state->sha1_digest, state->keylen);
 	}
 	}
 	if (state->keylen == 8) {
 	if (state->keylen == 8) {
 		/* See RFC 3078 */
 		/* See RFC 3078 */

+ 1 - 2
drivers/net/pppoe.c

@@ -879,8 +879,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
 	dev->hard_header(skb, dev, ETH_P_PPP_SES,
 	dev->hard_header(skb, dev, ETH_P_PPP_SES,
 			 po->pppoe_pa.remote, NULL, data_len);
 			 po->pppoe_pa.remote, NULL, data_len);
 
 
-	if (dev_queue_xmit(skb) < 0)
-		goto abort;
+	dev_queue_xmit(skb);
 
 
 	return 1;
 	return 1;
 
 

+ 53 - 65
drivers/net/pppol2tp.c

@@ -491,44 +491,46 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
 	u16 hdrflags;
 	u16 hdrflags;
 	u16 tunnel_id, session_id;
 	u16 tunnel_id, session_id;
 	int length;
 	int length;
-	struct udphdr *uh;
+	int offset;
 
 
 	tunnel = pppol2tp_sock_to_tunnel(sock);
 	tunnel = pppol2tp_sock_to_tunnel(sock);
 	if (tunnel == NULL)
 	if (tunnel == NULL)
 		goto error;
 		goto error;
 
 
+	/* UDP always verifies the packet length. */
+	__skb_pull(skb, sizeof(struct udphdr));
+
 	/* Short packet? */
 	/* Short packet? */
-	if (skb->len < sizeof(struct udphdr)) {
+	if (!pskb_may_pull(skb, 12)) {
 		PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
 		PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
 		       "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
 		       "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
 		goto error;
 		goto error;
 	}
 	}
 
 
 	/* Point to L2TP header */
 	/* Point to L2TP header */
-	ptr = skb->data + sizeof(struct udphdr);
+	ptr = skb->data;
 
 
 	/* Get L2TP header flags */
 	/* Get L2TP header flags */
 	hdrflags = ntohs(*(__be16*)ptr);
 	hdrflags = ntohs(*(__be16*)ptr);
 
 
 	/* Trace packet contents, if enabled */
 	/* Trace packet contents, if enabled */
 	if (tunnel->debug & PPPOL2TP_MSG_DATA) {
 	if (tunnel->debug & PPPOL2TP_MSG_DATA) {
+		length = min(16u, skb->len);
+		if (!pskb_may_pull(skb, length))
+			goto error;
+
 		printk(KERN_DEBUG "%s: recv: ", tunnel->name);
 		printk(KERN_DEBUG "%s: recv: ", tunnel->name);
 
 
-		for (length = 0; length < 16; length++)
-			printk(" %02X", ptr[length]);
+		offset = 0;
+		do {
+			printk(" %02X", ptr[offset]);
+		} while (++offset < length);
+
 		printk("\n");
 		printk("\n");
 	}
 	}
 
 
 	/* Get length of L2TP packet */
 	/* Get length of L2TP packet */
-	uh = (struct udphdr *) skb_transport_header(skb);
-	length = ntohs(uh->len) - sizeof(struct udphdr);
-
-	/* Too short? */
-	if (length < 12) {
-		PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
-		       "%s: recv short L2TP packet (len=%d)\n", tunnel->name, length);
-		goto error;
-	}
+	length = skb->len;
 
 
 	/* If type is control packet, it is handled by userspace. */
 	/* If type is control packet, it is handled by userspace. */
 	if (hdrflags & L2TP_HDRFLAG_T) {
 	if (hdrflags & L2TP_HDRFLAG_T) {
@@ -606,7 +608,6 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
 			       "%s: recv data has no seq numbers when required. "
 			       "%s: recv data has no seq numbers when required. "
 			       "Discarding\n", session->name);
 			       "Discarding\n", session->name);
 			session->stats.rx_seq_discards++;
 			session->stats.rx_seq_discards++;
-			session->stats.rx_errors++;
 			goto discard;
 			goto discard;
 		}
 		}
 
 
@@ -625,7 +626,6 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
 			       "%s: recv data has no seq numbers when required. "
 			       "%s: recv data has no seq numbers when required. "
 			       "Discarding\n", session->name);
 			       "Discarding\n", session->name);
 			session->stats.rx_seq_discards++;
 			session->stats.rx_seq_discards++;
-			session->stats.rx_errors++;
 			goto discard;
 			goto discard;
 		}
 		}
 
 
@@ -634,10 +634,14 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
 	}
 	}
 
 
 	/* If offset bit set, skip it. */
 	/* If offset bit set, skip it. */
-	if (hdrflags & L2TP_HDRFLAG_O)
-		ptr += 2 + ntohs(*(__be16 *) ptr);
+	if (hdrflags & L2TP_HDRFLAG_O) {
+		offset = ntohs(*(__be16 *)ptr);
+		skb->transport_header += 2 + offset;
+		if (!pskb_may_pull(skb, skb_transport_offset(skb) + 2))
+			goto discard;
+	}
 
 
-	skb_pull(skb, ptr - skb->data);
+	__skb_pull(skb, skb_transport_offset(skb));
 
 
 	/* Skip PPP header, if present.	 In testing, Microsoft L2TP clients
 	/* Skip PPP header, if present.	 In testing, Microsoft L2TP clients
 	 * don't send the PPP header (PPP header compression enabled), but
 	 * don't send the PPP header (PPP header compression enabled), but
@@ -673,7 +677,6 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
 			 */
 			 */
 			if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
 			if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
 				session->stats.rx_seq_discards++;
 				session->stats.rx_seq_discards++;
-				session->stats.rx_errors++;
 				PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
 				PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
 				       "%s: oos pkt %hu len %d discarded, "
 				       "%s: oos pkt %hu len %d discarded, "
 				       "waiting for %hu, reorder_q_len=%d\n",
 				       "waiting for %hu, reorder_q_len=%d\n",
@@ -698,6 +701,7 @@ static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
 	return 0;
 	return 0;
 
 
 discard:
 discard:
+	session->stats.rx_errors++;
 	kfree_skb(skb);
 	kfree_skb(skb);
 	sock_put(session->sock);
 	sock_put(session->sock);
 
 
@@ -958,7 +962,6 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
 	int data_len = skb->len;
 	int data_len = skb->len;
 	struct inet_sock *inet;
 	struct inet_sock *inet;
 	__wsum csum = 0;
 	__wsum csum = 0;
-	struct sk_buff *skb2 = NULL;
 	struct udphdr *uh;
 	struct udphdr *uh;
 	unsigned int len;
 	unsigned int len;
 
 
@@ -989,41 +992,30 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
 	 */
 	 */
 	headroom = NET_SKB_PAD + sizeof(struct iphdr) +
 	headroom = NET_SKB_PAD + sizeof(struct iphdr) +
 		sizeof(struct udphdr) + hdr_len + sizeof(ppph);
 		sizeof(struct udphdr) + hdr_len + sizeof(ppph);
-	if (skb_headroom(skb) < headroom) {
-		skb2 = skb_realloc_headroom(skb, headroom);
-		if (skb2 == NULL)
-			goto abort;
-	} else
-		skb2 = skb;
-
-	/* Check that the socket has room */
-	if (atomic_read(&sk_tun->sk_wmem_alloc) < sk_tun->sk_sndbuf)
-		skb_set_owner_w(skb2, sk_tun);
-	else
-		goto discard;
+	if (skb_cow_head(skb, headroom))
+		goto abort;
 
 
 	/* Setup PPP header */
 	/* Setup PPP header */
-	skb_push(skb2, sizeof(ppph));
-	skb2->data[0] = ppph[0];
-	skb2->data[1] = ppph[1];
+	__skb_push(skb, sizeof(ppph));
+	skb->data[0] = ppph[0];
+	skb->data[1] = ppph[1];
 
 
 	/* Setup L2TP header */
 	/* Setup L2TP header */
-	skb_push(skb2, hdr_len);
-	pppol2tp_build_l2tp_header(session, skb2->data);
+	pppol2tp_build_l2tp_header(session, __skb_push(skb, hdr_len));
 
 
 	/* Setup UDP header */
 	/* Setup UDP header */
 	inet = inet_sk(sk_tun);
 	inet = inet_sk(sk_tun);
-	skb_push(skb2, sizeof(struct udphdr));
-	skb_reset_transport_header(skb2);
-	uh = (struct udphdr *) skb2->data;
+	__skb_push(skb, sizeof(*uh));
+	skb_reset_transport_header(skb);
+	uh = udp_hdr(skb);
 	uh->source = inet->sport;
 	uh->source = inet->sport;
 	uh->dest = inet->dport;
 	uh->dest = inet->dport;
 	uh->len = htons(sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len);
 	uh->len = htons(sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len);
 	uh->check = 0;
 	uh->check = 0;
 
 
-	/* Calculate UDP checksum if configured to do so */
+	/* *BROKEN* Calculate UDP checksum if configured to do so */
 	if (sk_tun->sk_no_check != UDP_CSUM_NOXMIT)
 	if (sk_tun->sk_no_check != UDP_CSUM_NOXMIT)
-		csum = udp_csum_outgoing(sk_tun, skb2);
+		csum = udp_csum_outgoing(sk_tun, skb);
 
 
 	/* Debug */
 	/* Debug */
 	if (session->send_seq)
 	if (session->send_seq)
@@ -1036,7 +1028,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
 
 
 	if (session->debug & PPPOL2TP_MSG_DATA) {
 	if (session->debug & PPPOL2TP_MSG_DATA) {
 		int i;
 		int i;
-		unsigned char *datap = skb2->data;
+		unsigned char *datap = skb->data;
 
 
 		printk(KERN_DEBUG "%s: xmit:", session->name);
 		printk(KERN_DEBUG "%s: xmit:", session->name);
 		for (i = 0; i < data_len; i++) {
 		for (i = 0; i < data_len; i++) {
@@ -1049,18 +1041,18 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
 		printk("\n");
 		printk("\n");
 	}
 	}
 
 
-	memset(&(IPCB(skb2)->opt), 0, sizeof(IPCB(skb2)->opt));
-	IPCB(skb2)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
-			       IPSKB_REROUTED);
-	nf_reset(skb2);
+	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+			      IPSKB_REROUTED);
+	nf_reset(skb);
 
 
 	/* Get routing info from the tunnel socket */
 	/* Get routing info from the tunnel socket */
-	dst_release(skb2->dst);
-	skb2->dst = sk_dst_get(sk_tun);
+	dst_release(skb->dst);
+	skb->dst = sk_dst_get(sk_tun);
 
 
 	/* Queue the packet to IP for output */
 	/* Queue the packet to IP for output */
-	len = skb2->len;
-	rc = ip_queue_xmit(skb2, 1);
+	len = skb->len;
+	rc = ip_queue_xmit(skb, 1);
 
 
 	/* Update stats */
 	/* Update stats */
 	if (rc >= 0) {
 	if (rc >= 0) {
@@ -1073,17 +1065,12 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
 		session->stats.tx_errors++;
 		session->stats.tx_errors++;
 	}
 	}
 
 
-	/* Free the original skb */
-	kfree_skb(skb);
-
 	return 1;
 	return 1;
 
 
-discard:
-	/* Free the new skb. Caller will free original skb. */
-	if (skb2 != skb)
-		kfree_skb(skb2);
 abort:
 abort:
-	return 0;
+	/* Free the original skb */
+	kfree_skb(skb);
+	return 1;
 }
 }
 
 
 /*****************************************************************************
 /*****************************************************************************
@@ -1326,12 +1313,14 @@ static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id,
 		goto err;
 		goto err;
 	}
 	}
 
 
+	sk = sock->sk;
+
 	/* Quick sanity checks */
 	/* Quick sanity checks */
-	err = -ESOCKTNOSUPPORT;
-	if (sock->type != SOCK_DGRAM) {
+	err = -EPROTONOSUPPORT;
+	if (sk->sk_protocol != IPPROTO_UDP) {
 		PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
 		PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
-		       "tunl %hu: fd %d wrong type, got %d, expected %d\n",
-		       tunnel_id, fd, sock->type, SOCK_DGRAM);
+		       "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
+		       tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
 		goto err;
 		goto err;
 	}
 	}
 	err = -EAFNOSUPPORT;
 	err = -EAFNOSUPPORT;
@@ -1343,7 +1332,6 @@ static struct sock *pppol2tp_prepare_tunnel_socket(int fd, u16 tunnel_id,
 	}
 	}
 
 
 	err = -ENOTCONN;
 	err = -ENOTCONN;
-	sk = sock->sk;
 
 
 	/* Check if this socket has already been prepped */
 	/* Check if this socket has already been prepped */
 	tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data;
 	tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data;

+ 7 - 0
drivers/net/qla3xxx.c

@@ -2248,6 +2248,13 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
 		qdev->rsp_consumer_index) && (work_done < work_to_do)) {
 		qdev->rsp_consumer_index) && (work_done < work_to_do)) {
 
 
 		net_rsp = qdev->rsp_current;
 		net_rsp = qdev->rsp_current;
+		rmb();
+		/*
+		 * Fix 4032 chipe undocumented "feature" where bit-8 is set if the
+		 * inbound completion is for a VLAN.
+		 */
+		if (qdev->device_id == QL3032_DEVICE_ID)
+			net_rsp->opcode &= 0x7f;
 		switch (net_rsp->opcode) {
 		switch (net_rsp->opcode) {
 
 
 		case OPCODE_OB_MAC_IOCB_FN0:
 		case OPCODE_OB_MAC_IOCB_FN0:

+ 13 - 1
drivers/net/r8169.c

@@ -1228,7 +1228,10 @@ static void rtl8169_hw_phy_config(struct net_device *dev)
 		return;
 		return;
 	}
 	}
 
 
-	/* phy config for RTL8169s mac_version C chip */
+	if ((tp->mac_version != RTL_GIGA_MAC_VER_02) &&
+	    (tp->mac_version != RTL_GIGA_MAC_VER_03))
+		return;
+
 	mdio_write(ioaddr, 31, 0x0001);			//w 31 2 0 1
 	mdio_write(ioaddr, 31, 0x0001);			//w 31 2 0 1
 	mdio_write(ioaddr, 21, 0x1000);			//w 21 15 0 1000
 	mdio_write(ioaddr, 21, 0x1000);			//w 21 15 0 1000
 	mdio_write(ioaddr, 24, 0x65c7);			//w 24 15 0 65c7
 	mdio_write(ioaddr, 24, 0x65c7);			//w 24 15 0 65c7
@@ -2567,6 +2570,15 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
 		    (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
 		    (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
 			netif_wake_queue(dev);
 			netif_wake_queue(dev);
 		}
 		}
+		/*
+		 * 8168 hack: TxPoll requests are lost when the Tx packets are
+		 * too close. Let's kick an extra TxPoll request when a burst
+		 * of start_xmit activity is detected (if it is not detected,
+		 * it is slow enough). -- FR
+		 */
+		smp_rmb();
+		if (tp->cur_tx != dirty_tx)
+			RTL_W8(TxPoll, NPQ);
 	}
 	}
 }
 }
 
 

+ 293 - 115
drivers/net/sky2.c

@@ -51,7 +51,7 @@
 #include "sky2.h"
 #include "sky2.h"
 
 
 #define DRV_NAME		"sky2"
 #define DRV_NAME		"sky2"
-#define DRV_VERSION		"1.17"
+#define DRV_VERSION		"1.18"
 #define PFX			DRV_NAME " "
 #define PFX			DRV_NAME " "
 
 
 /*
 /*
@@ -118,12 +118,15 @@ static const struct pci_device_id sky2_id_table[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) }, /* 88E8070 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
@@ -147,6 +150,7 @@ static const char *yukon2_name[] = {
 	"Extreme",	/* 0xb5 */
 	"Extreme",	/* 0xb5 */
 	"EC",		/* 0xb6 */
 	"EC",		/* 0xb6 */
 	"FE",		/* 0xb7 */
 	"FE",		/* 0xb7 */
+	"FE+",		/* 0xb8 */
 };
 };
 
 
 static void sky2_set_multicast(struct net_device *dev);
 static void sky2_set_multicast(struct net_device *dev);
@@ -217,8 +221,7 @@ static void sky2_power_on(struct sky2_hw *hw)
 	else
 	else
 		sky2_write8(hw, B2_Y2_CLK_GATE, 0);
 		sky2_write8(hw, B2_Y2_CLK_GATE, 0);
 
 
-	if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
-	    hw->chip_id == CHIP_ID_YUKON_EX) {
+	if (hw->flags & SKY2_HW_ADV_POWER_CTL) {
 		u32 reg;
 		u32 reg;
 
 
 		sky2_pci_write32(hw, PCI_DEV_REG3, 0);
 		sky2_pci_write32(hw, PCI_DEV_REG3, 0);
@@ -311,10 +314,8 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
 	struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
 	struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
 	u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
 	u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
 
 
-	if (sky2->autoneg == AUTONEG_ENABLE
-	    && !(hw->chip_id == CHIP_ID_YUKON_XL
-		 || hw->chip_id == CHIP_ID_YUKON_EC_U
-		 || hw->chip_id == CHIP_ID_YUKON_EX)) {
+	if (sky2->autoneg == AUTONEG_ENABLE &&
+	    !(hw->flags & SKY2_HW_NEWER_PHY)) {
 		u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
 		u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
 
 
 		ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
 		ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
@@ -334,9 +335,19 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
 
 
 	ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 	ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 	if (sky2_is_copper(hw)) {
 	if (sky2_is_copper(hw)) {
-		if (hw->chip_id == CHIP_ID_YUKON_FE) {
+		if (!(hw->flags & SKY2_HW_GIGABIT)) {
 			/* enable automatic crossover */
 			/* enable automatic crossover */
 			ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
 			ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
+
+			if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+			    hw->chip_rev == CHIP_REV_YU_FE2_A0) {
+				u16 spec;
+
+				/* Enable Class A driver for FE+ A0 */
+				spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2);
+				spec |= PHY_M_FESC_SEL_CL_A;
+				gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
+			}
 		} else {
 		} else {
 			/* disable energy detect */
 			/* disable energy detect */
 			ctrl &= ~PHY_M_PC_EN_DET_MSK;
 			ctrl &= ~PHY_M_PC_EN_DET_MSK;
@@ -346,9 +357,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
 
 
 			/* downshift on PHY 88E1112 and 88E1149 is changed */
 			/* downshift on PHY 88E1112 and 88E1149 is changed */
 			if (sky2->autoneg == AUTONEG_ENABLE
 			if (sky2->autoneg == AUTONEG_ENABLE
-			    && (hw->chip_id == CHIP_ID_YUKON_XL
-				|| hw->chip_id == CHIP_ID_YUKON_EC_U
-				|| hw->chip_id == CHIP_ID_YUKON_EX)) {
+			    && (hw->flags & SKY2_HW_NEWER_PHY)) {
 				/* set downshift counter to 3x and enable downshift */
 				/* set downshift counter to 3x and enable downshift */
 				ctrl &= ~PHY_M_PC_DSC_MSK;
 				ctrl &= ~PHY_M_PC_DSC_MSK;
 				ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
 				ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
@@ -364,7 +373,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
 	gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 	gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 
 
 	/* special setup for PHY 88E1112 Fiber */
 	/* special setup for PHY 88E1112 Fiber */
-	if (hw->chip_id == CHIP_ID_YUKON_XL && !sky2_is_copper(hw)) {
+	if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) {
 		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
 		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
 
 
 		/* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
 		/* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
@@ -455,7 +464,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
 
 
 	gma_write16(hw, port, GM_GP_CTRL, reg);
 	gma_write16(hw, port, GM_GP_CTRL, reg);
 
 
-	if (hw->chip_id != CHIP_ID_YUKON_FE)
+	if (hw->flags & SKY2_HW_GIGABIT)
 		gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
 		gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
 
 
 	gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
 	gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
@@ -479,6 +488,23 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
 		gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
 		gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
 		break;
 		break;
 
 
+	case CHIP_ID_YUKON_FE_P:
+		/* Enable Link Partner Next Page */
+		ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+		ctrl |= PHY_M_PC_ENA_LIP_NP;
+
+		/* disable Energy Detect and enable scrambler */
+		ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB);
+		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+		/* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */
+		ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) |
+			PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) |
+			PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED);
+
+		gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
+		break;
+
 	case CHIP_ID_YUKON_XL:
 	case CHIP_ID_YUKON_XL:
 		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
 		pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
 
 
@@ -548,7 +574,13 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
 
 
 		/* set page register to 0 */
 		/* set page register to 0 */
 		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
 		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
+	} else if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+		   hw->chip_rev == CHIP_REV_YU_FE2_A0) {
+		/* apply workaround for integrated resistors calibration */
+		gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17);
+		gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60);
 	} else if (hw->chip_id != CHIP_ID_YUKON_EX) {
 	} else if (hw->chip_id != CHIP_ID_YUKON_EX) {
+		/* no effect on Yukon-XL */
 		gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
 		gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
 
 
 		if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
 		if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
@@ -669,25 +701,25 @@ static void sky2_wol_init(struct sky2_port *sky2)
 
 
 static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
 static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
 {
 {
-	if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev != CHIP_REV_YU_EX_A0) {
+	struct net_device *dev = hw->dev[port];
+
+	if (dev->mtu <= ETH_DATA_LEN)
 		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
 		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
-			     TX_STFW_ENA |
-			     (hw->dev[port]->mtu > ETH_DATA_LEN) ? TX_JUMBO_ENA : TX_JUMBO_DIS);
-	} else {
-		if (hw->dev[port]->mtu > ETH_DATA_LEN) {
-			/* set Tx GMAC FIFO Almost Empty Threshold */
-			sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
-				     (ECU_JUMBO_WM << 16) | ECU_AE_THR);
+			     TX_JUMBO_DIS | TX_STFW_ENA);
 
 
-			sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
-				     TX_JUMBO_ENA | TX_STFW_DIS);
+	else if (hw->chip_id != CHIP_ID_YUKON_EC_U)
+		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+			     TX_STFW_ENA | TX_JUMBO_ENA);
+	else {
+		/* set Tx GMAC FIFO Almost Empty Threshold */
+		sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
+			     (ECU_JUMBO_WM << 16) | ECU_AE_THR);
 
 
-			/* Can't do offload because of lack of store/forward */
-			hw->dev[port]->features &= ~(NETIF_F_TSO | NETIF_F_SG
-						     | NETIF_F_ALL_CSUM);
-		} else
-			sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
-				     TX_JUMBO_DIS | TX_STFW_ENA);
+		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+			     TX_JUMBO_ENA | TX_STFW_DIS);
+
+		/* Can't do offload because of lack of store/forward */
+		dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | NETIF_F_ALL_CSUM);
 	}
 	}
 }
 }
 
 
@@ -773,7 +805,8 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
 	/* Configure Rx MAC FIFO */
 	/* Configure Rx MAC FIFO */
 	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
 	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
 	rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
 	rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
-	if (hw->chip_id == CHIP_ID_YUKON_EX)
+	if (hw->chip_id == CHIP_ID_YUKON_EX ||
+	    hw->chip_id == CHIP_ID_YUKON_FE_P)
 		rx_reg |= GMF_RX_OVER_ON;
 		rx_reg |= GMF_RX_OVER_ON;
 
 
 	sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg);
 	sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg);
@@ -782,13 +815,19 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
 	sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
 	sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
 
 
 	/* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug  */
 	/* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug  */
-	sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
+	reg = RX_GMF_FL_THR_DEF + 1;
+	/* Another magic mystery workaround from sk98lin */
+	if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
+	    hw->chip_rev == CHIP_REV_YU_FE2_A0)
+		reg = 0x178;
+	sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg);
 
 
 	/* Configure Tx MAC FIFO */
 	/* Configure Tx MAC FIFO */
 	sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
 	sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
 	sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
 	sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
 
 
-	if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
+	/* On chips without ram buffer, pause is controled by MAC level */
+	if (sky2_read8(hw, B2_E_0) == 0) {
 		sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
 		sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
 		sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
 		sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
 
 
@@ -871,6 +910,20 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
 	return le;
 	return le;
 }
 }
 
 
+static void tx_init(struct sky2_port *sky2)
+{
+	struct sky2_tx_le *le;
+
+	sky2->tx_prod = sky2->tx_cons = 0;
+	sky2->tx_tcpsum = 0;
+	sky2->tx_last_mss = 0;
+
+	le = get_tx_le(sky2);
+	le->addr = 0;
+	le->opcode = OP_ADDR64 | HW_OWNER;
+	sky2->tx_addr64 = 0;
+}
+
 static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2,
 static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2,
 					    struct sky2_tx_le *le)
 					    struct sky2_tx_le *le)
 {
 {
@@ -967,19 +1020,15 @@ static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
  */
  */
 static void rx_set_checksum(struct sky2_port *sky2)
 static void rx_set_checksum(struct sky2_port *sky2)
 {
 {
-	struct sky2_rx_le *le;
-
-	if (sky2->hw->chip_id != CHIP_ID_YUKON_EX) {
-		le = sky2_next_rx(sky2);
-		le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN);
-		le->ctrl = 0;
-		le->opcode = OP_TCPSTART | HW_OWNER;
+	struct sky2_rx_le *le = sky2_next_rx(sky2);
 
 
-		sky2_write32(sky2->hw,
-			     Q_ADDR(rxqaddr[sky2->port], Q_CSR),
-			     sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
-	}
+	le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN);
+	le->ctrl = 0;
+	le->opcode = OP_TCPSTART | HW_OWNER;
 
 
+	sky2_write32(sky2->hw,
+		     Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+		     sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
 }
 }
 
 
 /*
 /*
@@ -1175,7 +1224,8 @@ static int sky2_rx_start(struct sky2_port *sky2)
 
 
 	sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
 	sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
 
 
-	rx_set_checksum(sky2);
+	if (!(hw->flags & SKY2_HW_NEW_LE))
+		rx_set_checksum(sky2);
 
 
 	/* Space needed for frame data + headers rounded up */
 	/* Space needed for frame data + headers rounded up */
 	size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
 	size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
@@ -1246,7 +1296,7 @@ static int sky2_up(struct net_device *dev)
 	struct sky2_port *sky2 = netdev_priv(dev);
 	struct sky2_port *sky2 = netdev_priv(dev);
 	struct sky2_hw *hw = sky2->hw;
 	struct sky2_hw *hw = sky2->hw;
 	unsigned port = sky2->port;
 	unsigned port = sky2->port;
-	u32 ramsize, imask;
+	u32 imask, ramsize;
 	int cap, err = -ENOMEM;
 	int cap, err = -ENOMEM;
 	struct net_device *otherdev = hw->dev[sky2->port^1];
 	struct net_device *otherdev = hw->dev[sky2->port^1];
 
 
@@ -1284,7 +1334,8 @@ static int sky2_up(struct net_device *dev)
 				GFP_KERNEL);
 				GFP_KERNEL);
 	if (!sky2->tx_ring)
 	if (!sky2->tx_ring)
 		goto err_out;
 		goto err_out;
-	sky2->tx_prod = sky2->tx_cons = 0;
+
+	tx_init(sky2);
 
 
 	sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
 	sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
 					   &sky2->rx_le_map);
 					   &sky2->rx_le_map);
@@ -1303,11 +1354,10 @@ static int sky2_up(struct net_device *dev)
 
 
 	/* Register is number of 4K blocks on internal RAM buffer. */
 	/* Register is number of 4K blocks on internal RAM buffer. */
 	ramsize = sky2_read8(hw, B2_E_0) * 4;
 	ramsize = sky2_read8(hw, B2_E_0) * 4;
-	printk(KERN_INFO PFX "%s: ram buffer %dK\n", dev->name, ramsize);
-
 	if (ramsize > 0) {
 	if (ramsize > 0) {
 		u32 rxspace;
 		u32 rxspace;
 
 
+		pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
 		if (ramsize < 16)
 		if (ramsize < 16)
 			rxspace = ramsize / 2;
 			rxspace = ramsize / 2;
 		else
 		else
@@ -1436,13 +1486,15 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
 	/* Check for TCP Segmentation Offload */
 	/* Check for TCP Segmentation Offload */
 	mss = skb_shinfo(skb)->gso_size;
 	mss = skb_shinfo(skb)->gso_size;
 	if (mss != 0) {
 	if (mss != 0) {
-		if (hw->chip_id != CHIP_ID_YUKON_EX)
+
+		if (!(hw->flags & SKY2_HW_NEW_LE))
 			mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
 			mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
 
 
   		if (mss != sky2->tx_last_mss) {
   		if (mss != sky2->tx_last_mss) {
   			le = get_tx_le(sky2);
   			le = get_tx_le(sky2);
   			le->addr = cpu_to_le32(mss);
   			le->addr = cpu_to_le32(mss);
- 			if (hw->chip_id == CHIP_ID_YUKON_EX)
+
+			if (hw->flags & SKY2_HW_NEW_LE)
 				le->opcode = OP_MSS | HW_OWNER;
 				le->opcode = OP_MSS | HW_OWNER;
 			else
 			else
 				le->opcode = OP_LRGLEN | HW_OWNER;
 				le->opcode = OP_LRGLEN | HW_OWNER;
@@ -1468,8 +1520,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
 	/* Handle TCP checksum offload */
 	/* Handle TCP checksum offload */
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 		/* On Yukon EX (some versions) encoding change. */
 		/* On Yukon EX (some versions) encoding change. */
- 		if (hw->chip_id == CHIP_ID_YUKON_EX
-		    && hw->chip_rev != CHIP_REV_YU_EX_B0)
+ 		if (hw->flags & SKY2_HW_AUTO_TX_SUM)
  			ctrl |= CALSUM;	/* auto checksum */
  			ctrl |= CALSUM;	/* auto checksum */
 		else {
 		else {
 			const unsigned offset = skb_transport_offset(skb);
 			const unsigned offset = skb_transport_offset(skb);
@@ -1622,9 +1673,6 @@ static int sky2_down(struct net_device *dev)
 	if (netif_msg_ifdown(sky2))
 	if (netif_msg_ifdown(sky2))
 		printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
 		printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
 
 
-	if (netif_carrier_ok(dev) && --hw->active == 0)
-		del_timer(&hw->watchdog_timer);
-
 	/* Stop more packets from being queued */
 	/* Stop more packets from being queued */
 	netif_stop_queue(dev);
 	netif_stop_queue(dev);
 
 
@@ -1708,11 +1756,15 @@ static int sky2_down(struct net_device *dev)
 
 
 static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
 static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
 {
 {
-	if (!sky2_is_copper(hw))
+	if (hw->flags & SKY2_HW_FIBRE_PHY)
 		return SPEED_1000;
 		return SPEED_1000;
 
 
-	if (hw->chip_id == CHIP_ID_YUKON_FE)
-		return (aux & PHY_M_PS_SPEED_100) ? SPEED_100 : SPEED_10;
+	if (!(hw->flags & SKY2_HW_GIGABIT)) {
+		if (aux & PHY_M_PS_SPEED_100)
+			return SPEED_100;
+		else
+			return SPEED_10;
+	}
 
 
 	switch (aux & PHY_M_PS_SPEED_MSK) {
 	switch (aux & PHY_M_PS_SPEED_MSK) {
 	case PHY_M_PS_SPEED_1000:
 	case PHY_M_PS_SPEED_1000:
@@ -1745,17 +1797,13 @@ static void sky2_link_up(struct sky2_port *sky2)
 
 
 	netif_carrier_on(sky2->netdev);
 	netif_carrier_on(sky2->netdev);
 
 
-	if (hw->active++ == 0)
-		mod_timer(&hw->watchdog_timer, jiffies + 1);
-
+	mod_timer(&hw->watchdog_timer, jiffies + 1);
 
 
 	/* Turn on link LED */
 	/* Turn on link LED */
 	sky2_write8(hw, SK_REG(port, LNK_LED_REG),
 	sky2_write8(hw, SK_REG(port, LNK_LED_REG),
 		    LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
 		    LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
 
 
-	if (hw->chip_id == CHIP_ID_YUKON_XL
-	    || hw->chip_id == CHIP_ID_YUKON_EC_U
-	    || hw->chip_id == CHIP_ID_YUKON_EX) {
+	if (hw->flags & SKY2_HW_NEWER_PHY) {
 		u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
 		u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
 		u16 led = PHY_M_LEDC_LOS_CTRL(1);	/* link active */
 		u16 led = PHY_M_LEDC_LOS_CTRL(1);	/* link active */
 
 
@@ -1800,11 +1848,6 @@ static void sky2_link_down(struct sky2_port *sky2)
 
 
 	netif_carrier_off(sky2->netdev);
 	netif_carrier_off(sky2->netdev);
 
 
-	/* Stop watchdog if both ports are not active */
-	if (--hw->active == 0)
-		del_timer(&hw->watchdog_timer);
-
-
 	/* Turn on link LED */
 	/* Turn on link LED */
 	sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
 	sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
 
 
@@ -1847,7 +1890,7 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
 	/* Since the pause result bits seem to in different positions on
 	/* Since the pause result bits seem to in different positions on
 	 * different chips. look at registers.
 	 * different chips. look at registers.
 	 */
 	 */
-	if (!sky2_is_copper(hw)) {
+	if (hw->flags & SKY2_HW_FIBRE_PHY) {
 		/* Shift for bits in fiber PHY */
 		/* Shift for bits in fiber PHY */
 		advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM);
 		advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM);
 		lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM);
 		lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM);
@@ -1958,7 +2001,9 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
 	if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
 	if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	if (new_mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_FE)
+	if (new_mtu > ETH_DATA_LEN &&
+	    (hw->chip_id == CHIP_ID_YUKON_FE ||
+	     hw->chip_id == CHIP_ID_YUKON_FE_P))
 		return -EINVAL;
 		return -EINVAL;
 
 
 	if (!netif_running(dev)) {
 	if (!netif_running(dev)) {
@@ -1975,7 +2020,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
 
 
 	synchronize_irq(hw->pdev->irq);
 	synchronize_irq(hw->pdev->irq);
 
 
-	if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX)
+	if (sky2_read8(hw, B2_E_0) == 0)
 		sky2_set_tx_stfwd(hw, port);
 		sky2_set_tx_stfwd(hw, port);
 
 
 	ctl = gma_read16(hw, port, GM_GP_CTRL);
 	ctl = gma_read16(hw, port, GM_GP_CTRL);
@@ -2103,6 +2148,13 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
  	struct sky2_port *sky2 = netdev_priv(dev);
  	struct sky2_port *sky2 = netdev_priv(dev);
 	struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next;
 	struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next;
 	struct sk_buff *skb = NULL;
 	struct sk_buff *skb = NULL;
+	u16 count = (status & GMR_FS_LEN) >> 16;
+
+#ifdef SKY2_VLAN_TAG_USED
+	/* Account for vlan tag */
+	if (sky2->vlgrp && (status & GMR_FS_VLAN))
+		count -= VLAN_HLEN;
+#endif
 
 
 	if (unlikely(netif_msg_rx_status(sky2)))
 	if (unlikely(netif_msg_rx_status(sky2)))
 		printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
 		printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
@@ -2111,15 +2163,29 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
 	sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
 	sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
 	prefetch(sky2->rx_ring + sky2->rx_next);
 	prefetch(sky2->rx_ring + sky2->rx_next);
 
 
+	if (length < ETH_ZLEN || length > sky2->rx_data_size)
+		goto len_error;
+
+	/* This chip has hardware problems that generates bogus status.
+	 * So do only marginal checking and expect higher level protocols
+	 * to handle crap frames.
+	 */
+	if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
+	    sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 &&
+	    length != count)
+		goto okay;
+
 	if (status & GMR_FS_ANY_ERR)
 	if (status & GMR_FS_ANY_ERR)
 		goto error;
 		goto error;
 
 
 	if (!(status & GMR_FS_RX_OK))
 	if (!(status & GMR_FS_RX_OK))
 		goto resubmit;
 		goto resubmit;
 
 
-	if (status >> 16 != length)
-		goto len_mismatch;
+	/* if length reported by DMA does not match PHY, packet was truncated */
+	if (length != count)
+		goto len_error;
 
 
+okay:
 	if (length < copybreak)
 	if (length < copybreak)
 		skb = receive_copy(sky2, re, length);
 		skb = receive_copy(sky2, re, length);
 	else
 	else
@@ -2129,10 +2195,14 @@ resubmit:
 
 
 	return skb;
 	return skb;
 
 
-len_mismatch:
+len_error:
 	/* Truncation of overlength packets
 	/* Truncation of overlength packets
 	   causes PHY length to not match MAC length */
 	   causes PHY length to not match MAC length */
 	++sky2->net_stats.rx_length_errors;
 	++sky2->net_stats.rx_length_errors;
+	if (netif_msg_rx_err(sky2) && net_ratelimit())
+		pr_info(PFX "%s: rx length error: status %#x length %d\n",
+			dev->name, status, length);
+	goto resubmit;
 
 
 error:
 error:
 	++sky2->net_stats.rx_errors;
 	++sky2->net_stats.rx_errors;
@@ -2202,7 +2272,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
 			}
 			}
 
 
 			/* This chip reports checksum status differently */
 			/* This chip reports checksum status differently */
-			if (hw->chip_id == CHIP_ID_YUKON_EX) {
+			if (hw->flags & SKY2_HW_NEW_LE) {
 				if (sky2->rx_csum &&
 				if (sky2->rx_csum &&
 				    (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) &&
 				    (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) &&
 				    (le->css & CSS_TCPUDPCSOK))
 				    (le->css & CSS_TCPUDPCSOK))
@@ -2243,8 +2313,14 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
 			if (!sky2->rx_csum)
 			if (!sky2->rx_csum)
 				break;
 				break;
 
 
-			if (hw->chip_id == CHIP_ID_YUKON_EX)
+			/* If this happens then driver assuming wrong format */
+			if (unlikely(hw->flags & SKY2_HW_NEW_LE)) {
+				if (net_ratelimit())
+					printk(KERN_NOTICE "%s: unexpected"
+					       " checksum status\n",
+					       dev->name);
 				break;
 				break;
+			}
 
 
 			/* Both checksum counters are programmed to start at
 			/* Both checksum counters are programmed to start at
 			 * the same offset, so unless there is a problem they
 			 * the same offset, so unless there is a problem they
@@ -2436,20 +2512,72 @@ static void sky2_le_error(struct sky2_hw *hw, unsigned port,
 	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK);
 	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK);
 }
 }
 
 
-/* Check for lost IRQ once a second */
+static int sky2_rx_hung(struct net_device *dev)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	unsigned rxq = rxqaddr[port];
+	u32 mac_rp = sky2_read32(hw, SK_REG(port, RX_GMF_RP));
+	u8 mac_lev = sky2_read8(hw, SK_REG(port, RX_GMF_RLEV));
+	u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP));
+	u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL));
+
+	/* If idle and MAC or PCI is stuck */
+	if (sky2->check.last == dev->last_rx &&
+	    ((mac_rp == sky2->check.mac_rp &&
+	      mac_lev != 0 && mac_lev >= sky2->check.mac_lev) ||
+	     /* Check if the PCI RX hang */
+	     (fifo_rp == sky2->check.fifo_rp &&
+	      fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) {
+		printk(KERN_DEBUG PFX "%s: hung mac %d:%d fifo %d (%d:%d)\n",
+		       dev->name, mac_lev, mac_rp, fifo_lev, fifo_rp,
+		       sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
+		return 1;
+	} else {
+		sky2->check.last = dev->last_rx;
+		sky2->check.mac_rp = mac_rp;
+		sky2->check.mac_lev = mac_lev;
+		sky2->check.fifo_rp = fifo_rp;
+		sky2->check.fifo_lev = fifo_lev;
+		return 0;
+	}
+}
+
 static void sky2_watchdog(unsigned long arg)
 static void sky2_watchdog(unsigned long arg)
 {
 {
 	struct sky2_hw *hw = (struct sky2_hw *) arg;
 	struct sky2_hw *hw = (struct sky2_hw *) arg;
+	struct net_device *dev;
 
 
+	/* Check for lost IRQ once a second */
 	if (sky2_read32(hw, B0_ISRC)) {
 	if (sky2_read32(hw, B0_ISRC)) {
-		struct net_device *dev = hw->dev[0];
-
+		dev = hw->dev[0];
 		if (__netif_rx_schedule_prep(dev))
 		if (__netif_rx_schedule_prep(dev))
 			__netif_rx_schedule(dev);
 			__netif_rx_schedule(dev);
+	} else {
+		int i, active = 0;
+
+		for (i = 0; i < hw->ports; i++) {
+			dev = hw->dev[i];
+			if (!netif_running(dev))
+				continue;
+			++active;
+
+			/* For chips with Rx FIFO, check if stuck */
+			if ((hw->flags & SKY2_HW_FIFO_HANG_CHECK) &&
+			     sky2_rx_hung(dev)) {
+				pr_info(PFX "%s: receiver hang detected\n",
+					dev->name);
+				schedule_work(&hw->restart_work);
+				return;
+			}
+		}
+
+		if (active == 0)
+			return;
 	}
 	}
 
 
-	if (hw->active > 0)
-		mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ));
+	mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ));
 }
 }
 
 
 /* Hardware/software error handling */
 /* Hardware/software error handling */
@@ -2546,17 +2674,25 @@ static void sky2_netpoll(struct net_device *dev)
 #endif
 #endif
 
 
 /* Chip internal frequency for clock calculations */
 /* Chip internal frequency for clock calculations */
-static inline u32 sky2_mhz(const struct sky2_hw *hw)
+static u32 sky2_mhz(const struct sky2_hw *hw)
 {
 {
 	switch (hw->chip_id) {
 	switch (hw->chip_id) {
 	case CHIP_ID_YUKON_EC:
 	case CHIP_ID_YUKON_EC:
 	case CHIP_ID_YUKON_EC_U:
 	case CHIP_ID_YUKON_EC_U:
 	case CHIP_ID_YUKON_EX:
 	case CHIP_ID_YUKON_EX:
-		return 125;	/* 125 Mhz */
+		return 125;
+
 	case CHIP_ID_YUKON_FE:
 	case CHIP_ID_YUKON_FE:
-		return 100;	/* 100 Mhz */
-	default:		/* YUKON_XL */
-		return 156;	/* 156 Mhz */
+		return 100;
+
+	case CHIP_ID_YUKON_FE_P:
+		return 50;
+
+	case CHIP_ID_YUKON_XL:
+		return 156;
+
+	default:
+		BUG();
 	}
 	}
 }
 }
 
 
@@ -2581,23 +2717,63 @@ static int __devinit sky2_init(struct sky2_hw *hw)
 	sky2_write8(hw, B0_CTST, CS_RST_CLR);
 	sky2_write8(hw, B0_CTST, CS_RST_CLR);
 
 
 	hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
 	hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
-	if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) {
+	hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
+
+	switch(hw->chip_id) {
+	case CHIP_ID_YUKON_XL:
+		hw->flags = SKY2_HW_GIGABIT
+			| SKY2_HW_NEWER_PHY;
+		if (hw->chip_rev < 3)
+			hw->flags |= SKY2_HW_FIFO_HANG_CHECK;
+
+		break;
+
+	case CHIP_ID_YUKON_EC_U:
+		hw->flags = SKY2_HW_GIGABIT
+			| SKY2_HW_NEWER_PHY
+			| SKY2_HW_ADV_POWER_CTL;
+		break;
+
+	case CHIP_ID_YUKON_EX:
+		hw->flags = SKY2_HW_GIGABIT
+			| SKY2_HW_NEWER_PHY
+			| SKY2_HW_NEW_LE
+			| SKY2_HW_ADV_POWER_CTL;
+
+		/* New transmit checksum */
+		if (hw->chip_rev != CHIP_REV_YU_EX_B0)
+			hw->flags |= SKY2_HW_AUTO_TX_SUM;
+		break;
+
+	case CHIP_ID_YUKON_EC:
+		/* This rev is really old, and requires untested workarounds */
+		if (hw->chip_rev == CHIP_REV_YU_EC_A1) {
+			dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
+			return -EOPNOTSUPP;
+		}
+		hw->flags = SKY2_HW_GIGABIT | SKY2_HW_FIFO_HANG_CHECK;
+		break;
+
+	case CHIP_ID_YUKON_FE:
+		break;
+
+	case CHIP_ID_YUKON_FE_P:
+		hw->flags = SKY2_HW_NEWER_PHY
+			| SKY2_HW_NEW_LE
+			| SKY2_HW_AUTO_TX_SUM
+			| SKY2_HW_ADV_POWER_CTL;
+		break;
+	default:
 		dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
 		dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
 			hw->chip_id);
 			hw->chip_id);
 		return -EOPNOTSUPP;
 		return -EOPNOTSUPP;
 	}
 	}
 
 
-	hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
+	hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
+	if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P')
+		hw->flags |= SKY2_HW_FIBRE_PHY;
 
 
-	/* This rev is really old, and requires untested workarounds */
-	if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) {
-		dev_err(&hw->pdev->dev, "unsupported revision Yukon-%s (0x%x) rev %d\n",
-			yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL],
-			hw->chip_id, hw->chip_rev);
-		return -EOPNOTSUPP;
-	}
 
 
-	hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
 	hw->ports = 1;
 	hw->ports = 1;
 	t8 = sky2_read8(hw, B2_Y2_HW_RES);
 	t8 = sky2_read8(hw, B2_Y2_HW_RES);
 	if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
 	if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
@@ -2791,7 +2967,9 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 
 
 	sky2->wol = wol->wolopts;
 	sky2->wol = wol->wolopts;
 
 
-	if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX)
+	if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
+	    hw->chip_id == CHIP_ID_YUKON_EX ||
+	    hw->chip_id == CHIP_ID_YUKON_FE_P)
 		sky2_write32(hw, B0_CTST, sky2->wol
 		sky2_write32(hw, B0_CTST, sky2->wol
 			     ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
 			     ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
 
 
@@ -2809,7 +2987,7 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw)
 			| SUPPORTED_100baseT_Full
 			| SUPPORTED_100baseT_Full
 			| SUPPORTED_Autoneg | SUPPORTED_TP;
 			| SUPPORTED_Autoneg | SUPPORTED_TP;
 
 
-		if (hw->chip_id != CHIP_ID_YUKON_FE)
+		if (hw->flags & SKY2_HW_GIGABIT)
 			modes |= SUPPORTED_1000baseT_Half
 			modes |= SUPPORTED_1000baseT_Half
 				| SUPPORTED_1000baseT_Full;
 				| SUPPORTED_1000baseT_Full;
 		return modes;
 		return modes;
@@ -2829,13 +3007,6 @@ static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
 	ecmd->supported = sky2_supported_modes(hw);
 	ecmd->supported = sky2_supported_modes(hw);
 	ecmd->phy_address = PHY_ADDR_MARV;
 	ecmd->phy_address = PHY_ADDR_MARV;
 	if (sky2_is_copper(hw)) {
 	if (sky2_is_copper(hw)) {
-		ecmd->supported = SUPPORTED_10baseT_Half
-		    | SUPPORTED_10baseT_Full
-		    | SUPPORTED_100baseT_Half
-		    | SUPPORTED_100baseT_Full
-		    | SUPPORTED_1000baseT_Half
-		    | SUPPORTED_1000baseT_Full
-		    | SUPPORTED_Autoneg | SUPPORTED_TP;
 		ecmd->port = PORT_TP;
 		ecmd->port = PORT_TP;
 		ecmd->speed = sky2->speed;
 		ecmd->speed = sky2->speed;
 	} else {
 	} else {
@@ -3814,8 +3985,12 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
 		dev->features |= NETIF_F_HIGHDMA;
 		dev->features |= NETIF_F_HIGHDMA;
 
 
 #ifdef SKY2_VLAN_TAG_USED
 #ifdef SKY2_VLAN_TAG_USED
-	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-	dev->vlan_rx_register = sky2_vlan_rx_register;
+	/* The workaround for FE+ status conflicts with VLAN tag detection. */
+	if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
+	      sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) {
+		dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+		dev->vlan_rx_register = sky2_vlan_rx_register;
+	}
 #endif
 #endif
 
 
 	/* read the mac address */
 	/* read the mac address */
@@ -3846,7 +4021,7 @@ static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id)
 		return IRQ_NONE;
 		return IRQ_NONE;
 
 
 	if (status & Y2_IS_IRQ_SW) {
 	if (status & Y2_IS_IRQ_SW) {
-		hw->msi = 1;
+		hw->flags |= SKY2_HW_USE_MSI;
 		wake_up(&hw->msi_wait);
 		wake_up(&hw->msi_wait);
 		sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
 		sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
 	}
 	}
@@ -3874,9 +4049,9 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
 	sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
 	sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
 	sky2_read8(hw, B0_CTST);
 	sky2_read8(hw, B0_CTST);
 
 
-	wait_event_timeout(hw->msi_wait, hw->msi, HZ/10);
+	wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10);
 
 
-	if (!hw->msi) {
+	if (!(hw->flags & SKY2_HW_USE_MSI)) {
 		/* MSI test failed, go back to INTx mode */
 		/* MSI test failed, go back to INTx mode */
 		dev_info(&pdev->dev, "No interrupt generated using MSI, "
 		dev_info(&pdev->dev, "No interrupt generated using MSI, "
 			 "switching to INTx mode.\n");
 			 "switching to INTx mode.\n");
@@ -4009,7 +4184,8 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
 		goto err_out_free_netdev;
 		goto err_out_free_netdev;
 	}
 	}
 
 
-	err = request_irq(pdev->irq,  sky2_intr, hw->msi ? 0 : IRQF_SHARED,
+	err = request_irq(pdev->irq, sky2_intr,
+			  (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
 			  dev->name, hw);
 			  dev->name, hw);
 	if (err) {
 	if (err) {
 		dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
 		dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
@@ -4042,7 +4218,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
 	return 0;
 	return 0;
 
 
 err_out_unregister:
 err_out_unregister:
-	if (hw->msi)
+	if (hw->flags & SKY2_HW_USE_MSI)
 		pci_disable_msi(pdev);
 		pci_disable_msi(pdev);
 	unregister_netdev(dev);
 	unregister_netdev(dev);
 err_out_free_netdev:
 err_out_free_netdev:
@@ -4091,7 +4267,7 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
 	sky2_read8(hw, B0_CTST);
 	sky2_read8(hw, B0_CTST);
 
 
 	free_irq(pdev->irq, hw);
 	free_irq(pdev->irq, hw);
-	if (hw->msi)
+	if (hw->flags & SKY2_HW_USE_MSI)
 		pci_disable_msi(pdev);
 		pci_disable_msi(pdev);
 	pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
 	pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
 	pci_release_regions(pdev);
 	pci_release_regions(pdev);
@@ -4159,7 +4335,9 @@ static int sky2_resume(struct pci_dev *pdev)
 	pci_enable_wake(pdev, PCI_D0, 0);
 	pci_enable_wake(pdev, PCI_D0, 0);
 
 
 	/* Re-enable all clocks */
 	/* Re-enable all clocks */
-	if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_EC_U)
+	if (hw->chip_id == CHIP_ID_YUKON_EX ||
+	    hw->chip_id == CHIP_ID_YUKON_EC_U ||
+	    hw->chip_id == CHIP_ID_YUKON_FE_P)
 		sky2_pci_write32(hw, PCI_DEV_REG3, 0);
 		sky2_pci_write32(hw, PCI_DEV_REG3, 0);
 
 
 	sky2_reset(hw);
 	sky2_reset(hw);

+ 33 - 8
drivers/net/sky2.h

@@ -470,18 +470,24 @@ enum {
 	CHIP_ID_YUKON_EX   = 0xb5, /* Chip ID for YUKON-2 Extreme */
 	CHIP_ID_YUKON_EX   = 0xb5, /* Chip ID for YUKON-2 Extreme */
 	CHIP_ID_YUKON_EC   = 0xb6, /* Chip ID for YUKON-2 EC */
 	CHIP_ID_YUKON_EC   = 0xb6, /* Chip ID for YUKON-2 EC */
  	CHIP_ID_YUKON_FE   = 0xb7, /* Chip ID for YUKON-2 FE */
  	CHIP_ID_YUKON_FE   = 0xb7, /* Chip ID for YUKON-2 FE */
-
+ 	CHIP_ID_YUKON_FE_P = 0xb8, /* Chip ID for YUKON-2 FE+ */
+};
+enum yukon_ec_rev {
 	CHIP_REV_YU_EC_A1    = 0,  /* Chip Rev. for Yukon-EC A1/A0 */
 	CHIP_REV_YU_EC_A1    = 0,  /* Chip Rev. for Yukon-EC A1/A0 */
 	CHIP_REV_YU_EC_A2    = 1,  /* Chip Rev. for Yukon-EC A2 */
 	CHIP_REV_YU_EC_A2    = 1,  /* Chip Rev. for Yukon-EC A2 */
 	CHIP_REV_YU_EC_A3    = 2,  /* Chip Rev. for Yukon-EC A3 */
 	CHIP_REV_YU_EC_A3    = 2,  /* Chip Rev. for Yukon-EC A3 */
-
+};
+enum yukon_ec_u_rev {
 	CHIP_REV_YU_EC_U_A0  = 1,
 	CHIP_REV_YU_EC_U_A0  = 1,
 	CHIP_REV_YU_EC_U_A1  = 2,
 	CHIP_REV_YU_EC_U_A1  = 2,
 	CHIP_REV_YU_EC_U_B0  = 3,
 	CHIP_REV_YU_EC_U_B0  = 3,
-
+};
+enum yukon_fe_rev {
 	CHIP_REV_YU_FE_A1    = 1,
 	CHIP_REV_YU_FE_A1    = 1,
 	CHIP_REV_YU_FE_A2    = 2,
 	CHIP_REV_YU_FE_A2    = 2,
-
+};
+enum yukon_fe_p_rev {
+	CHIP_REV_YU_FE2_A0   = 0,
 };
 };
 enum yukon_ex_rev {
 enum yukon_ex_rev {
 	CHIP_REV_YU_EX_A0    = 1,
 	CHIP_REV_YU_EX_A0    = 1,
@@ -1668,7 +1674,7 @@ enum {
 
 
 /* Receive Frame Status Encoding */
 /* Receive Frame Status Encoding */
 enum {
 enum {
-	GMR_FS_LEN	= 0xffff<<16, /* Bit 31..16:	Rx Frame Length */
+	GMR_FS_LEN	= 0x7fff<<16, /* Bit 30..16:	Rx Frame Length */
 	GMR_FS_VLAN	= 1<<13, /* VLAN Packet */
 	GMR_FS_VLAN	= 1<<13, /* VLAN Packet */
 	GMR_FS_JABBER	= 1<<12, /* Jabber Packet */
 	GMR_FS_JABBER	= 1<<12, /* Jabber Packet */
 	GMR_FS_UN_SIZE	= 1<<11, /* Undersize Packet */
 	GMR_FS_UN_SIZE	= 1<<11, /* Undersize Packet */
@@ -1729,6 +1735,10 @@ enum {
 	GMF_RX_CTRL_DEF	= GMF_OPER_ON | GMF_RX_F_FL_ON,
 	GMF_RX_CTRL_DEF	= GMF_OPER_ON | GMF_RX_F_FL_ON,
 };
 };
 
 
+/*	TX_GMF_EA		32 bit	Tx GMAC FIFO End Address */
+enum {
+	TX_DYN_WM_ENA	= 3,	/* Yukon-FE+ specific */
+};
 
 
 /*	TX_GMF_CTRL_T	32 bit	Tx GMAC FIFO Control/Test */
 /*	TX_GMF_CTRL_T	32 bit	Tx GMAC FIFO Control/Test */
 enum {
 enum {
@@ -2017,6 +2027,14 @@ struct sky2_port {
 	u16		     rx_tag;
 	u16		     rx_tag;
 	struct vlan_group    *vlgrp;
 	struct vlan_group    *vlgrp;
 #endif
 #endif
+	struct {
+		unsigned long last;
+		u32	mac_rp;
+		u8	mac_lev;
+		u8	fifo_rp;
+		u8	fifo_lev;
+	} check;
+
 
 
 	dma_addr_t	     rx_le_map;
 	dma_addr_t	     rx_le_map;
 	dma_addr_t	     tx_le_map;
 	dma_addr_t	     tx_le_map;
@@ -2040,12 +2058,20 @@ struct sky2_hw {
 	void __iomem  	     *regs;
 	void __iomem  	     *regs;
 	struct pci_dev	     *pdev;
 	struct pci_dev	     *pdev;
 	struct net_device    *dev[2];
 	struct net_device    *dev[2];
+	unsigned long	     flags;
+#define SKY2_HW_USE_MSI		0x00000001
+#define SKY2_HW_FIBRE_PHY	0x00000002
+#define SKY2_HW_GIGABIT		0x00000004
+#define SKY2_HW_NEWER_PHY	0x00000008
+#define SKY2_HW_FIFO_HANG_CHECK	0x00000010
+#define SKY2_HW_NEW_LE		0x00000020	/* new LSOv2 format */
+#define SKY2_HW_AUTO_TX_SUM	0x00000040	/* new IP decode for Tx */
+#define SKY2_HW_ADV_POWER_CTL	0x00000080	/* additional PHY power regs */
 
 
 	u8	     	     chip_id;
 	u8	     	     chip_id;
 	u8		     chip_rev;
 	u8		     chip_rev;
 	u8		     pmd_type;
 	u8		     pmd_type;
 	u8		     ports;
 	u8		     ports;
-	u8		     active;
 
 
 	struct sky2_status_le *st_le;
 	struct sky2_status_le *st_le;
 	u32		     st_idx;
 	u32		     st_idx;
@@ -2053,13 +2079,12 @@ struct sky2_hw {
 
 
 	struct timer_list    watchdog_timer;
 	struct timer_list    watchdog_timer;
 	struct work_struct   restart_work;
 	struct work_struct   restart_work;
-	int		     msi;
 	wait_queue_head_t    msi_wait;
 	wait_queue_head_t    msi_wait;
 };
 };
 
 
 static inline int sky2_is_copper(const struct sky2_hw *hw)
 static inline int sky2_is_copper(const struct sky2_hw *hw)
 {
 {
-	return !(hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P');
+	return !(hw->flags & SKY2_HW_FIBRE_PHY);
 }
 }
 
 
 /* Register accessor for memory mapped device */
 /* Register accessor for memory mapped device */

+ 1 - 1
drivers/net/usb/dm9601.c

@@ -405,7 +405,7 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
 	dev->net->ethtool_ops = &dm9601_ethtool_ops;
 	dev->net->ethtool_ops = &dm9601_ethtool_ops;
 	dev->net->hard_header_len += DM_TX_OVERHEAD;
 	dev->net->hard_header_len += DM_TX_OVERHEAD;
 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
 	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
-	dev->rx_urb_size = dev->net->mtu + DM_RX_OVERHEAD;
+	dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD;
 
 
 	dev->mii.dev = dev->net;
 	dev->mii.dev = dev->net;
 	dev->mii.mdio_read = dm9601_mdio_read;
 	dev->mii.mdio_read = dm9601_mdio_read;

+ 1 - 1
drivers/net/wireless/Makefile

@@ -43,7 +43,7 @@ obj-$(CONFIG_PCMCIA_RAYCS)	+= ray_cs.o
 obj-$(CONFIG_PCMCIA_WL3501)	+= wl3501_cs.o
 obj-$(CONFIG_PCMCIA_WL3501)	+= wl3501_cs.o
 
 
 obj-$(CONFIG_USB_ZD1201)	+= zd1201.o
 obj-$(CONFIG_USB_ZD1201)	+= zd1201.o
-obj-$(CONFIG_LIBERTAS_USB)     += libertas/
+obj-$(CONFIG_LIBERTAS)		+= libertas/
 
 
 rtl8187-objs		:= rtl8187_dev.o rtl8187_rtl8225.o
 rtl8187-objs		:= rtl8187_dev.o rtl8187_rtl8225.o
 obj-$(CONFIG_RTL8187)	+= rtl8187.o
 obj-$(CONFIG_RTL8187)	+= rtl8187.o

+ 3 - 4
drivers/pci/quirks.c

@@ -1444,7 +1444,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos);
 static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
 static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
 {
 {
 	u16 command;
 	u16 command;
-	u32 bar;
 	u8 __iomem *csr;
 	u8 __iomem *csr;
 	u8 cmd_hi;
 	u8 cmd_hi;
 
 
@@ -1476,12 +1475,12 @@ static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
 	 * re-enable them when it's ready.
 	 * re-enable them when it's ready.
 	 */
 	 */
 	pci_read_config_word(dev, PCI_COMMAND, &command);
 	pci_read_config_word(dev, PCI_COMMAND, &command);
-	pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &bar);
 
 
-	if (!(command & PCI_COMMAND_MEMORY) || !bar)
+	if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0))
 		return;
 		return;
 
 
-	csr = ioremap(bar, 8);
+	/* Convert from PCI bus to resource space.  */
+	csr = ioremap(pci_resource_start(dev, 0), 8);
 	if (!csr) {
 	if (!csr) {
 		printk(KERN_WARNING "PCI: Can't map %s e100 registers\n",
 		printk(KERN_WARNING "PCI: Can't map %s e100 registers\n",
 			pci_name(dev));
 			pci_name(dev));

+ 1 - 0
drivers/power/power_supply_sysfs.c

@@ -289,6 +289,7 @@ int power_supply_uevent(struct device *dev, char **envp, int num_envp,
 		if (ret)
 		if (ret)
 			goto out;
 			goto out;
 	}
 	}
+	envp[i] = NULL;
 
 
 out:
 out:
 	free_page((unsigned long)prop_buf);
 	free_page((unsigned long)prop_buf);

+ 2 - 2
drivers/scsi/aic94xx/aic94xx_task.c

@@ -451,7 +451,7 @@ static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task,
 	struct scb *scb;
 	struct scb *scb;
 
 
 	pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1,
 	pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1,
-		   PCI_DMA_FROMDEVICE);
+		   PCI_DMA_TODEVICE);
 	pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1,
 	pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1,
 		   PCI_DMA_FROMDEVICE);
 		   PCI_DMA_FROMDEVICE);
 
 
@@ -486,7 +486,7 @@ static void asd_unbuild_smp_ascb(struct asd_ascb *a)
 
 
 	BUG_ON(!task);
 	BUG_ON(!task);
 	pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1,
 	pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1,
-		     PCI_DMA_FROMDEVICE);
+		     PCI_DMA_TODEVICE);
 	pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1,
 	pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1,
 		     PCI_DMA_FROMDEVICE);
 		     PCI_DMA_FROMDEVICE);
 }
 }

+ 2 - 1
drivers/scsi/esp_scsi.c

@@ -2314,6 +2314,7 @@ int __devinit scsi_esp_register(struct esp *esp, struct device *dev)
 	esp->host->transportt = esp_transport_template;
 	esp->host->transportt = esp_transport_template;
 	esp->host->max_lun = ESP_MAX_LUN;
 	esp->host->max_lun = ESP_MAX_LUN;
 	esp->host->cmd_per_lun = 2;
 	esp->host->cmd_per_lun = 2;
+	esp->host->unique_id = instance;
 
 
 	esp_set_clock_params(esp);
 	esp_set_clock_params(esp);
 
 
@@ -2337,7 +2338,7 @@ int __devinit scsi_esp_register(struct esp *esp, struct device *dev)
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	esp->host->unique_id = instance++;
+	instance++;
 
 
 	scsi_scan_host(esp->host);
 	scsi_scan_host(esp->host);
 
 

+ 22 - 6
drivers/scsi/scsi_transport_spi.c

@@ -787,10 +787,12 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
 	struct scsi_target *starget = sdev->sdev_target;
 	struct scsi_target *starget = sdev->sdev_target;
 	struct Scsi_Host *shost = sdev->host;
 	struct Scsi_Host *shost = sdev->host;
 	int len = sdev->inquiry_len;
 	int len = sdev->inquiry_len;
+	int min_period = spi_min_period(starget);
+	int max_width = spi_max_width(starget);
 	/* first set us up for narrow async */
 	/* first set us up for narrow async */
 	DV_SET(offset, 0);
 	DV_SET(offset, 0);
 	DV_SET(width, 0);
 	DV_SET(width, 0);
-	
+
 	if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS)
 	if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS)
 	    != SPI_COMPARE_SUCCESS) {
 	    != SPI_COMPARE_SUCCESS) {
 		starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n");
 		starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n");
@@ -798,9 +800,13 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
 		return;
 		return;
 	}
 	}
 
 
+	if (!scsi_device_wide(sdev)) {
+		spi_max_width(starget) = 0;
+		max_width = 0;
+	}
+
 	/* test width */
 	/* test width */
-	if (i->f->set_width && spi_max_width(starget) &&
-	    scsi_device_wide(sdev)) {
+	if (i->f->set_width && max_width) {
 		i->f->set_width(starget, 1);
 		i->f->set_width(starget, 1);
 
 
 		if (spi_dv_device_compare_inquiry(sdev, buffer,
 		if (spi_dv_device_compare_inquiry(sdev, buffer,
@@ -809,6 +815,11 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
 		    != SPI_COMPARE_SUCCESS) {
 		    != SPI_COMPARE_SUCCESS) {
 			starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n");
 			starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n");
 			i->f->set_width(starget, 0);
 			i->f->set_width(starget, 0);
+			/* Make sure we don't force wide back on by asking
+			 * for a transfer period that requires it */
+			max_width = 0;
+			if (min_period < 10)
+				min_period = 10;
 		}
 		}
 	}
 	}
 
 
@@ -828,7 +839,8 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
 
 
 	/* now set up to the maximum */
 	/* now set up to the maximum */
 	DV_SET(offset, spi_max_offset(starget));
 	DV_SET(offset, spi_max_offset(starget));
-	DV_SET(period, spi_min_period(starget));
+	DV_SET(period, min_period);
+
 	/* try QAS requests; this should be harmless to set if the
 	/* try QAS requests; this should be harmless to set if the
 	 * target supports it */
 	 * target supports it */
 	if (scsi_device_qas(sdev)) {
 	if (scsi_device_qas(sdev)) {
@@ -837,14 +849,14 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
 		DV_SET(qas, 0);
 		DV_SET(qas, 0);
 	}
 	}
 
 
-	if (scsi_device_ius(sdev) && spi_min_period(starget) < 9) {
+	if (scsi_device_ius(sdev) && min_period < 9) {
 		/* This u320 (or u640). Set IU transfers */
 		/* This u320 (or u640). Set IU transfers */
 		DV_SET(iu, 1);
 		DV_SET(iu, 1);
 		/* Then set the optional parameters */
 		/* Then set the optional parameters */
 		DV_SET(rd_strm, 1);
 		DV_SET(rd_strm, 1);
 		DV_SET(wr_flow, 1);
 		DV_SET(wr_flow, 1);
 		DV_SET(rti, 1);
 		DV_SET(rti, 1);
-		if (spi_min_period(starget) == 8)
+		if (min_period == 8)
 			DV_SET(pcomp_en, 1);
 			DV_SET(pcomp_en, 1);
 	} else {
 	} else {
 		DV_SET(iu, 0);
 		DV_SET(iu, 0);
@@ -862,6 +874,10 @@ spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer)
 	} else {
 	} else {
 		DV_SET(dt, 1);
 		DV_SET(dt, 1);
 	}
 	}
+	/* set width last because it will pull all the other
+	 * parameters down to required values */
+	DV_SET(width, max_width);
+
 	/* Do the read only INQUIRY tests */
 	/* Do the read only INQUIRY tests */
 	spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len,
 	spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len,
 		       spi_dv_device_compare_inquiry);
 		       spi_dv_device_compare_inquiry);

+ 1 - 1
drivers/serial/cpm_uart/cpm_uart_cpm1.h

@@ -37,6 +37,6 @@ static inline void cpm_set_smc_fcr(volatile smc_uart_t * up)
 	up->smc_tfcr = SMC_EB;
 	up->smc_tfcr = SMC_EB;
 }
 }
 
 
-#define DPRAM_BASE	((unsigned char *)&cpmp->cp_dpmem[0])
+#define DPRAM_BASE	((unsigned char *)cpm_dpram_addr(0))
 
 
 #endif
 #endif

+ 1 - 1
drivers/serial/sunsab.c

@@ -38,7 +38,7 @@
 #include <asm/prom.h>
 #include <asm/prom.h>
 #include <asm/of_device.h>
 #include <asm/of_device.h>
 
 
-#if defined(CONFIG_SERIAL_SUNZILOG_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#if defined(CONFIG_SERIAL_SUNSAB_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
 #define SUPPORT_SYSRQ
 #define SUPPORT_SYSRQ
 #endif
 #endif
 
 

+ 1 - 0
drivers/w1/w1.c

@@ -431,6 +431,7 @@ static int w1_uevent(struct device *dev, char **envp, int num_envp,
 	err = add_uevent_var(envp, num_envp, &cur_index, buffer, buffer_size,
 	err = add_uevent_var(envp, num_envp, &cur_index, buffer, buffer_size,
 			&cur_len, "W1_SLAVE_ID=%024LX",
 			&cur_len, "W1_SLAVE_ID=%024LX",
 			(unsigned long long)sl->reg_num.id);
 			(unsigned long long)sl->reg_num.id);
+	envp[cur_index] = NULL;
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 

Unele fișiere nu au fost afișate deoarece prea multe fișiere au fost modificate în acest diff