浏览代码

Merge branch 'upstream'

Jeff Garzik 19 年之前
父节点
当前提交
c4052da6f0
共有 100 个文件被更改,包括 4064 次插入1460 次删除
  1. 1072 0
      Documentation/DocBook/libata.tmpl
  2. 1 1
      arch/arm/kernel/sys_arm.c
  3. 1 1
      arch/arm/kernel/traps.c
  4. 12 11
      arch/arm/mach-imx/generic.c
  5. 1 1
      arch/arm/mach-imx/mx1ads.c
  6. 4 4
      arch/arm/mm/Kconfig
  7. 5 0
      arch/ia64/kernel/mca.c
  8. 1 1
      arch/ppc/platforms/pmac_time.c
  9. 0 56
      arch/sparc/Kconfig
  10. 1 1
      arch/sparc/kernel/time.c
  11. 1 1
      arch/sparc/mm/srmmu.c
  12. 8 35
      arch/sparc64/kernel/entry.S
  13. 4 47
      arch/sparc64/kernel/etrap.S
  14. 5 28
      arch/sparc64/kernel/head.S
  15. 2 21
      arch/sparc64/kernel/rtrap.S
  16. 2 6
      arch/sparc64/kernel/setup.c
  17. 4 11
      arch/sparc64/kernel/trampoline.S
  18. 2 31
      arch/sparc64/kernel/winfixup.S
  19. 76 106
      arch/sparc64/mm/init.c
  20. 1 11
      arch/um/include/registers.h
  21. 0 4
      arch/um/include/sysdep-x86_64/ptrace.h
  22. 1 7
      arch/um/kernel/sysrq.c
  23. 9 10
      arch/um/os-Linux/sys-i386/registers.c
  24. 9 10
      arch/um/os-Linux/sys-x86_64/registers.c
  25. 1 12
      arch/um/sys-i386/sysrq.c
  26. 1 1
      arch/um/sys-i386/user-offsets.c
  27. 20 20
      arch/x86_64/kernel/head.S
  28. 4 6
      drivers/atm/fore200e.c
  29. 1 1
      drivers/char/drm/drm_stub.c
  30. 2 1
      drivers/connector/connector.c
  31. 21 24
      drivers/infiniband/hw/mthca/mthca_main.c
  32. 2 2
      drivers/infiniband/ulp/ipoib/ipoib_main.c
  33. 0 2
      drivers/mfd/ucb1x00-core.c
  34. 0 2
      drivers/mfd/ucb1x00.h
  35. 4 4
      drivers/net/Kconfig
  36. 72 213
      drivers/net/bonding/bond_main.c
  37. 2 2
      drivers/net/bonding/bonding.h
  38. 31 0
      drivers/net/ibm_emac/ibm_emac_core.c
  39. 1 1
      drivers/net/ns83820.c
  40. 1 1
      drivers/net/pcmcia/smc91c92_cs.c
  41. 16 8
      drivers/net/skge.c
  42. 24 22
      drivers/net/starfire.c
  43. 2 1
      drivers/net/sungem.h
  44. 0 5
      drivers/net/tokenring/ibmtr.c
  45. 1 1
      drivers/net/tulip/21142.c
  46. 9 5
      drivers/net/wireless/orinoco.c
  47. 2 0
      drivers/s390/net/qeth.h
  48. 17 20
      drivers/s390/net/qeth_main.c
  49. 8 0
      drivers/scsi/Kconfig
  50. 1 0
      drivers/scsi/Makefile
  51. 25 6
      drivers/scsi/ahci.c
  52. 259 174
      drivers/scsi/libata-core.c
  53. 502 187
      drivers/scsi/libata-scsi.c
  54. 6 10
      drivers/scsi/libata.h
  55. 1 0
      drivers/scsi/megaraid/megaraid_sas.c
  56. 687 106
      drivers/scsi/sata_mv.c
  57. 8 8
      drivers/scsi/sata_nv.c
  58. 3 3
      drivers/scsi/sata_promise.c
  59. 875 0
      drivers/scsi/sata_sil24.c
  60. 1 1
      drivers/scsi/sata_sis.c
  61. 1 1
      drivers/scsi/sata_uli.c
  62. 1 1
      drivers/scsi/sata_via.c
  63. 0 4
      drivers/serial/sunsu.c
  64. 1 1
      fs/bfs/dir.c
  65. 31 13
      fs/bfs/inode.c
  66. 3 3
      fs/namei.c
  67. 3 0
      fs/ntfs/ChangeLog
  68. 3 2
      fs/ntfs/bitmap.c
  69. 1 1
      fs/ntfs/layout.h
  70. 2 1
      fs/ntfs/mft.c
  71. 1 1
      fs/ntfs/unistr.c
  72. 5 3
      include/asm-arm/arch-h720x/system.h
  73. 34 12
      include/asm-arm/arch-imx/imx-regs.h
  74. 1 1
      include/asm-arm/arch-ixp4xx/platform.h
  75. 12 12
      include/asm-sparc/btfixup.h
  76. 9 9
      include/asm-sparc/cache.h
  77. 4 4
      include/asm-sparc/cypress.h
  78. 1 1
      include/asm-sparc/delay.h
  79. 1 1
      include/asm-sparc/dma.h
  80. 2 2
      include/asm-sparc/iommu.h
  81. 1 1
      include/asm-sparc/kdebug.h
  82. 2 2
      include/asm-sparc/mbus.h
  83. 1 1
      include/asm-sparc/msi.h
  84. 4 4
      include/asm-sparc/mxcc.h
  85. 15 15
      include/asm-sparc/obio.h
  86. 3 3
      include/asm-sparc/pci.h
  87. 22 22
      include/asm-sparc/pgtable.h
  88. 15 15
      include/asm-sparc/pgtsrmmu.h
  89. 1 1
      include/asm-sparc/processor.h
  90. 3 3
      include/asm-sparc/psr.h
  91. 5 5
      include/asm-sparc/sbi.h
  92. 3 3
      include/asm-sparc/sbus.h
  93. 13 13
      include/asm-sparc/smp.h
  94. 4 4
      include/asm-sparc/smpprim.h
  95. 5 5
      include/asm-sparc/spinlock.h
  96. 1 1
      include/asm-sparc/system.h
  97. 1 1
      include/asm-sparc/traps.h
  98. 10 13
      include/asm-um/processor-generic.h
  99. 4 11
      include/asm-um/processor-i386.h
  100. 3 11
      include/asm-um/processor-x86_64.h

+ 1072 - 0
Documentation/DocBook/libata.tmpl

@@ -415,6 +415,362 @@ and other resources, etc.
      </sect1>
      </sect1>
   </chapter>
   </chapter>
 
 
+  <chapter id="libataEH">
+        <title>Error handling</title>
+
+	<para>
+	This chapter describes how errors are handled under libata.
+	Readers are advised to read SCSI EH
+	(Documentation/scsi/scsi_eh.txt) and ATA exceptions doc first.
+	</para>
+
+	<sect1><title>Origins of commands</title>
+	<para>
+	In libata, a command is represented with struct ata_queued_cmd
+	or qc.  qc's are preallocated during port initialization and
+	repetitively used for command executions.  Currently only one
+	qc is allocated per port but yet-to-be-merged NCQ branch
+	allocates one for each tag and maps each qc to NCQ tag 1-to-1.
+	</para>
+	<para>
+	libata commands can originate from two sources - libata itself
+	and SCSI midlayer.  libata internal commands are used for
+	initialization and error handling.  All normal blk requests
+	and commands for SCSI emulation are passed as SCSI commands
+	through queuecommand callback of SCSI host template.
+	</para>
+	</sect1>
+
+	<sect1><title>How commands are issued</title>
+
+	<variablelist>
+
+	<varlistentry><term>Internal commands</term>
+	<listitem>
+	<para>
+	First, qc is allocated and initialized using
+	ata_qc_new_init().  Although ata_qc_new_init() doesn't
+	implement any wait or retry mechanism when qc is not
+	available, internal commands are currently issued only during
+	initialization and error recovery, so no other command is
+	active and allocation is guaranteed to succeed.
+	</para>
+	<para>
+	Once allocated qc's taskfile is initialized for the command to
+	be executed.  qc currently has two mechanisms to notify
+	completion.  One is via qc->complete_fn() callback and the
+	other is completion qc->waiting.  qc->complete_fn() callback
+	is the asynchronous path used by normal SCSI translated
+	commands and qc->waiting is the synchronous (issuer sleeps in
+	process context) path used by internal commands.
+	</para>
+	<para>
+	Once initialization is complete, host_set lock is acquired
+	and the qc is issued.
+	</para>
+	</listitem>
+	</varlistentry>
+
+	<varlistentry><term>SCSI commands</term>
+	<listitem>
+	<para>
+	All libata drivers use ata_scsi_queuecmd() as
+	hostt->queuecommand callback.  scmds can either be simulated
+	or translated.  No qc is involved in processing a simulated
+	scmd.  The result is computed right away and the scmd is
+	completed.
+	</para>
+	<para>
+	For a translated scmd, ata_qc_new_init() is invoked to
+	allocate a qc and the scmd is translated into the qc.  SCSI
+	midlayer's completion notification function pointer is stored
+	into qc->scsidone.
+	</para>
+	<para>
+	qc->complete_fn() callback is used for completion
+	notification.  ATA commands use ata_scsi_qc_complete() while
+	ATAPI commands use atapi_qc_complete().  Both functions end up
+	calling qc->scsidone to notify upper layer when the qc is
+	finished.  After translation is completed, the qc is issued
+	with ata_qc_issue().
+	</para>
+	<para>
+	Note that SCSI midlayer invokes hostt->queuecommand while
+	holding host_set lock, so all above occur while holding
+	host_set lock.
+	</para>
+	</listitem>
+	</varlistentry>
+
+	</variablelist>
+	</sect1>
+
+	<sect1><title>How commands are processed</title>
+	<para>
+	Depending on which protocol and which controller are used,
+	commands are processed differently.  For the purpose of
+	discussion, a controller which uses taskfile interface and all
+	standard callbacks is assumed.
+	</para>
+	<para>
+	Currently 6 ATA command protocols are used.  They can be
+	sorted into the following four categories according to how
+	they are processed.
+	</para>
+
+	<variablelist>
+	   <varlistentry><term>ATA NO DATA or DMA</term>
+	   <listitem>
+	   <para>
+	   ATA_PROT_NODATA and ATA_PROT_DMA fall into this category.
+	   These types of commands don't require any software
+	   intervention once issued.  Device will raise interrupt on
+	   completion.
+	   </para>
+	   </listitem>
+	   </varlistentry>
+
+	   <varlistentry><term>ATA PIO</term>
+	   <listitem>
+	   <para>
+	   ATA_PROT_PIO is in this category.  libata currently
+	   implements PIO with polling.  ATA_NIEN bit is set to turn
+	   off interrupt and pio_task on ata_wq performs polling and
+	   IO.
+	   </para>
+	   </listitem>
+	   </varlistentry>
+
+	   <varlistentry><term>ATAPI NODATA or DMA</term>
+	   <listitem>
+	   <para>
+	   ATA_PROT_ATAPI_NODATA and ATA_PROT_ATAPI_DMA are in this
+	   category.  packet_task is used to poll BSY bit after
+	   issuing PACKET command.  Once BSY is turned off by the
+	   device, packet_task transfers CDB and hands off processing
+	   to interrupt handler.
+	   </para>
+	   </listitem>
+	   </varlistentry>
+
+	   <varlistentry><term>ATAPI PIO</term>
+	   <listitem>
+	   <para>
+	   ATA_PROT_ATAPI is in this category.  ATA_NIEN bit is set
+	   and, as in ATAPI NODATA or DMA, packet_task submits cdb.
+	   However, after submitting cdb, further processing (data
+	   transfer) is handed off to pio_task.
+	   </para>
+	   </listitem>
+	   </varlistentry>
+	</variablelist>
+        </sect1>
+
+	<sect1><title>How commands are completed</title>
+	<para>
+	Once issued, all qc's are either completed with
+	ata_qc_complete() or time out.  For commands which are handled
+	by interrupts, ata_host_intr() invokes ata_qc_complete(), and,
+	for PIO tasks, pio_task invokes ata_qc_complete().  In error
+	cases, packet_task may also complete commands.
+	</para>
+	<para>
+	ata_qc_complete() does the following.
+	</para>
+
+	<orderedlist>
+
+	<listitem>
+	<para>
+	DMA memory is unmapped.
+	</para>
+	</listitem>
+
+	<listitem>
+	<para>
+	ATA_QCFLAG_ACTIVE is clared from qc->flags.
+	</para>
+	</listitem>
+
+	<listitem>
+	<para>
+	qc->complete_fn() callback is invoked.  If the return value of
+	the callback is not zero.  Completion is short circuited and
+	ata_qc_complete() returns.
+	</para>
+	</listitem>
+
+	<listitem>
+	<para>
+	__ata_qc_complete() is called, which does
+	   <orderedlist>
+
+	   <listitem>
+	   <para>
+	   qc->flags is cleared to zero.
+	   </para>
+	   </listitem>
+
+	   <listitem>
+	   <para>
+	   ap->active_tag and qc->tag are poisoned.
+	   </para>
+	   </listitem>
+
+	   <listitem>
+	   <para>
+	   qc->waiting is claread &amp; completed (in that order).
+	   </para>
+	   </listitem>
+
+	   <listitem>
+	   <para>
+	   qc is deallocated by clearing appropriate bit in ap->qactive.
+	   </para>
+	   </listitem>
+
+	   </orderedlist>
+	</para>
+	</listitem>
+
+	</orderedlist>
+
+	<para>
+	So, it basically notifies upper layer and deallocates qc.  One
+	exception is short-circuit path in #3 which is used by
+	atapi_qc_complete().
+	</para>
+	<para>
+	For all non-ATAPI commands, whether it fails or not, almost
+	the same code path is taken and very little error handling
+	takes place.  A qc is completed with success status if it
+	succeeded, with failed status otherwise.
+	</para>
+	<para>
+	However, failed ATAPI commands require more handling as
+	REQUEST SENSE is needed to acquire sense data.  If an ATAPI
+	command fails, ata_qc_complete() is invoked with error status,
+	which in turn invokes atapi_qc_complete() via
+	qc->complete_fn() callback.
+	</para>
+	<para>
+	This makes atapi_qc_complete() set scmd->result to
+	SAM_STAT_CHECK_CONDITION, complete the scmd and return 1.  As
+	the sense data is empty but scmd->result is CHECK CONDITION,
+	SCSI midlayer will invoke EH for the scmd, and returning 1
+	makes ata_qc_complete() to return without deallocating the qc.
+	This leads us to ata_scsi_error() with partially completed qc.
+	</para>
+
+	</sect1>
+
+	<sect1><title>ata_scsi_error()</title>
+	<para>
+	ata_scsi_error() is the current hostt->eh_strategy_handler()
+	for libata.  As discussed above, this will be entered in two
+	cases - timeout and ATAPI error completion.  This function
+	calls low level libata driver's eng_timeout() callback, the
+	standard callback for which is ata_eng_timeout().  It checks
+	if a qc is active and calls ata_qc_timeout() on the qc if so.
+	Actual error handling occurs in ata_qc_timeout().
+	</para>
+	<para>
+	If EH is invoked for timeout, ata_qc_timeout() stops BMDMA and
+	completes the qc.  Note that as we're currently in EH, we
+	cannot call scsi_done.  As described in SCSI EH doc, a
+	recovered scmd should be either retried with
+	scsi_queue_insert() or finished with scsi_finish_command().
+	Here, we override qc->scsidone with scsi_finish_command() and
+	calls ata_qc_complete().
+	</para>
+	<para>
+	If EH is invoked due to a failed ATAPI qc, the qc here is
+	completed but not deallocated.  The purpose of this
+	half-completion is to use the qc as place holder to make EH
+	code reach this place.  This is a bit hackish, but it works.
+	</para>
+	<para>
+	Once control reaches here, the qc is deallocated by invoking
+	__ata_qc_complete() explicitly.  Then, internal qc for REQUEST
+	SENSE is issued.  Once sense data is acquired, scmd is
+	finished by directly invoking scsi_finish_command() on the
+	scmd.  Note that as we already have completed and deallocated
+	the qc which was associated with the scmd, we don't need
+	to/cannot call ata_qc_complete() again.
+	</para>
+
+	</sect1>
+
+	<sect1><title>Problems with the current EH</title>
+
+	<itemizedlist>
+
+	<listitem>
+	<para>
+	Error representation is too crude.  Currently any and all
+	error conditions are represented with ATA STATUS and ERROR
+	registers.  Errors which aren't ATA device errors are treated
+	as ATA device errors by setting ATA_ERR bit.  Better error
+	descriptor which can properly represent ATA and other
+	errors/exceptions is needed.
+	</para>
+	</listitem>
+
+	<listitem>
+	<para>
+	When handling timeouts, no action is taken to make device
+	forget about the timed out command and ready for new commands.
+	</para>
+	</listitem>
+
+	<listitem>
+	<para>
+	EH handling via ata_scsi_error() is not properly protected
+	from usual command processing.  On EH entrance, the device is
+	not in quiescent state.  Timed out commands may succeed or
+	fail any time.  pio_task and atapi_task may still be running.
+	</para>
+	</listitem>
+
+	<listitem>
+	<para>
+	Too weak error recovery.  Devices / controllers causing HSM
+	mismatch errors and other errors quite often require reset to
+	return to known state.  Also, advanced error handling is
+	necessary to support features like NCQ and hotplug.
+	</para>
+	</listitem>
+
+	<listitem>
+	<para>
+	ATA errors are directly handled in the interrupt handler and
+	PIO errors in pio_task.  This is problematic for advanced
+	error handling for the following reasons.
+	</para>
+	<para>
+	First, advanced error handling often requires context and
+	internal qc execution.
+	</para>
+	<para>
+	Second, even a simple failure (say, CRC error) needs
+	information gathering and could trigger complex error handling
+	(say, resetting &amp; reconfiguring).  Having multiple code
+	paths to gather information, enter EH and trigger actions
+	makes life painful.
+	</para>
+	<para>
+	Third, scattered EH code makes implementing low level drivers
+	difficult.  Low level drivers override libata callbacks.  If
+	EH is scattered over several places, each affected callbacks
+	should perform its part of error handling.  This can be error
+	prone and painful.
+	</para>
+	</listitem>
+
+	</itemizedlist>
+	</sect1>
+  </chapter>
+
   <chapter id="libataExt">
   <chapter id="libataExt">
      <title>libata Library</title>
      <title>libata Library</title>
 !Edrivers/scsi/libata-core.c
 !Edrivers/scsi/libata-core.c
@@ -431,6 +787,722 @@ and other resources, etc.
 !Idrivers/scsi/libata-scsi.c
 !Idrivers/scsi/libata-scsi.c
   </chapter>
   </chapter>
 
 
+  <chapter id="ataExceptions">
+     <title>ATA errors &amp; exceptions</title>
+
+  <para>
+  This chapter tries to identify what error/exception conditions exist
+  for ATA/ATAPI devices and describe how they should be handled in
+  implementation-neutral way.
+  </para>
+
+  <para>
+  The term 'error' is used to describe conditions where either an
+  explicit error condition is reported from device or a command has
+  timed out.
+  </para>
+
+  <para>
+  The term 'exception' is either used to describe exceptional
+  conditions which are not errors (say, power or hotplug events), or
+  to describe both errors and non-error exceptional conditions.  Where
+  explicit distinction between error and exception is necessary, the
+  term 'non-error exception' is used.
+  </para>
+
+  <sect1 id="excat">
+     <title>Exception categories</title>
+     <para>
+     Exceptions are described primarily with respect to legacy
+     taskfile + bus master IDE interface.  If a controller provides
+     other better mechanism for error reporting, mapping those into
+     categories described below shouldn't be difficult.
+     </para>
+
+     <para>
+     In the following sections, two recovery actions - reset and
+     reconfiguring transport - are mentioned.  These are described
+     further in <xref linkend="exrec"/>.
+     </para>
+
+     <sect2 id="excatHSMviolation">
+        <title>HSM violation</title>
+        <para>
+        This error is indicated when STATUS value doesn't match HSM
+        requirement during issuing or excution any ATA/ATAPI command.
+        </para>
+
+	<itemizedlist>
+	<title>Examples</title>
+
+        <listitem>
+	<para>
+	ATA_STATUS doesn't contain !BSY &amp;&amp; DRDY &amp;&amp; !DRQ while trying
+	to issue a command.
+        </para>
+	</listitem>
+
+        <listitem>
+	<para>
+	!BSY &amp;&amp; !DRQ during PIO data transfer.
+        </para>
+	</listitem>
+
+        <listitem>
+	<para>
+	DRQ on command completion.
+        </para>
+	</listitem>
+
+        <listitem>
+	<para>
+	!BSY &amp;&amp; ERR after CDB tranfer starts but before the
+        last byte of CDB is transferred.  ATA/ATAPI standard states
+        that &quot;The device shall not terminate the PACKET command
+        with an error before the last byte of the command packet has
+        been written&quot; in the error outputs description of PACKET
+        command and the state diagram doesn't include such
+        transitions.
+	</para>
+	</listitem>
+
+	</itemizedlist>
+
+	<para>
+	In these cases, HSM is violated and not much information
+	regarding the error can be acquired from STATUS or ERROR
+	register.  IOW, this error can be anything - driver bug,
+	faulty device, controller and/or cable.
+	</para>
+
+	<para>
+	As HSM is violated, reset is necessary to restore known state.
+	Reconfiguring transport for lower speed might be helpful too
+	as transmission errors sometimes cause this kind of errors.
+	</para>
+     </sect2>
+     
+     <sect2 id="excatDevErr">
+        <title>ATA/ATAPI device error (non-NCQ / non-CHECK CONDITION)</title>
+
+	<para>
+	These are errors detected and reported by ATA/ATAPI devices
+	indicating device problems.  For this type of errors, STATUS
+	and ERROR register values are valid and describe error
+	condition.  Note that some of ATA bus errors are detected by
+	ATA/ATAPI devices and reported using the same mechanism as
+	device errors.  Those cases are described later in this
+	section.
+	</para>
+
+	<para>
+	For ATA commands, this type of errors are indicated by !BSY
+	&amp;&amp; ERR during command execution and on completion.
+	</para>
+
+	<para>For ATAPI commands,</para>
+
+	<itemizedlist>
+
+	<listitem>
+	<para>
+	!BSY &amp;&amp; ERR &amp;&amp; ABRT right after issuing PACKET
+	indicates that PACKET command is not supported and falls in
+	this category.
+	</para>
+	</listitem>
+
+	<listitem>
+	<para>
+	!BSY &amp;&amp; ERR(==CHK) &amp;&amp; !ABRT after the last
+	byte of CDB is transferred indicates CHECK CONDITION and
+	doesn't fall in this category.
+	</para>
+	</listitem>
+
+	<listitem>
+	<para>
+	!BSY &amp;&amp; ERR(==CHK) &amp;&amp; ABRT after the last byte
+        of CDB is transferred *probably* indicates CHECK CONDITION and
+        doesn't fall in this category.
+	</para>
+	</listitem>
+
+	</itemizedlist>
+
+	<para>
+	Of errors detected as above, the followings are not ATA/ATAPI
+	device errors but ATA bus errors and should be handled
+	according to <xref linkend="excatATAbusErr"/>.
+	</para>
+
+	<variablelist>
+
+	   <varlistentry>
+	   <term>CRC error during data transfer</term>
+	   <listitem>
+	   <para>
+	   This is indicated by ICRC bit in the ERROR register and
+	   means that corruption occurred during data transfer.  Upto
+	   ATA/ATAPI-7, the standard specifies that this bit is only
+	   applicable to UDMA transfers but ATA/ATAPI-8 draft revision
+	   1f says that the bit may be applicable to multiword DMA and
+	   PIO.
+	   </para>
+	   </listitem>
+	   </varlistentry>
+
+	   <varlistentry>
+	   <term>ABRT error during data transfer or on completion</term>
+	   <listitem>
+	   <para>
+	   Upto ATA/ATAPI-7, the standard specifies that ABRT could be
+	   set on ICRC errors and on cases where a device is not able
+	   to complete a command.  Combined with the fact that MWDMA
+	   and PIO transfer errors aren't allowed to use ICRC bit upto
+	   ATA/ATAPI-7, it seems to imply that ABRT bit alone could
+	   indicate tranfer errors.
+	   </para>
+	   <para>
+	   However, ATA/ATAPI-8 draft revision 1f removes the part
+	   that ICRC errors can turn on ABRT.  So, this is kind of
+	   gray area.  Some heuristics are needed here.
+	   </para>
+	   </listitem>
+	   </varlistentry>
+
+	</variablelist>
+
+	<para>
+	ATA/ATAPI device errors can be further categorized as follows.
+	</para>
+
+	<variablelist>
+
+	   <varlistentry>
+	   <term>Media errors</term>
+	   <listitem>
+	   <para>
+	   This is indicated by UNC bit in the ERROR register.  ATA
+	   devices reports UNC error only after certain number of
+	   retries cannot recover the data, so there's nothing much
+	   else to do other than notifying upper layer.
+	   </para>
+	   <para>
+	   READ and WRITE commands report CHS or LBA of the first
+	   failed sector but ATA/ATAPI standard specifies that the
+	   amount of transferred data on error completion is
+	   indeterminate, so we cannot assume that sectors preceding
+	   the failed sector have been transferred and thus cannot
+	   complete those sectors successfully as SCSI does.
+	   </para>
+	   </listitem>
+	   </varlistentry>
+
+	   <varlistentry>
+	   <term>Media changed / media change requested error</term>
+	   <listitem>
+	   <para>
+	   &lt;&lt;TODO: fill here&gt;&gt;
+	   </para>
+	   </listitem>
+	   </varlistentry>
+
+	   <varlistentry><term>Address error</term>
+	   <listitem>
+	   <para>
+	   This is indicated by IDNF bit in the ERROR register.
+	   Report to upper layer.
+	   </para>
+	   </listitem>
+	   </varlistentry>
+
+	   <varlistentry><term>Other errors</term>
+	   <listitem>
+	   <para>
+	   This can be invalid command or parameter indicated by ABRT
+	   ERROR bit or some other error condition.  Note that ABRT
+	   bit can indicate a lot of things including ICRC and Address
+	   errors.  Heuristics needed.
+	   </para>
+	   </listitem>
+	   </varlistentry>
+
+	</variablelist>
+
+	<para>
+	Depending on commands, not all STATUS/ERROR bits are
+	applicable.  These non-applicable bits are marked with
+	&quot;na&quot; in the output descriptions but upto ATA/ATAPI-7
+	no definition of &quot;na&quot; can be found.  However,
+	ATA/ATAPI-8 draft revision 1f describes &quot;N/A&quot; as
+	follows.
+	</para>
+
+	<blockquote>
+	<variablelist>
+	   <varlistentry><term>3.2.3.3a N/A</term>
+	   <listitem>
+	   <para>
+	   A keyword the indicates a field has no defined value in
+	   this standard and should not be checked by the host or
+	   device. N/A fields should be cleared to zero.
+	   </para>
+	   </listitem>
+	   </varlistentry>
+	</variablelist>
+	</blockquote>
+
+	<para>
+	So, it seems reasonable to assume that &quot;na&quot; bits are
+	cleared to zero by devices and thus need no explicit masking.
+	</para>
+
+     </sect2>
+
+     <sect2 id="excatATAPIcc">
+        <title>ATAPI device CHECK CONDITION</title>
+
+	<para>
+	ATAPI device CHECK CONDITION error is indicated by set CHK bit
+	(ERR bit) in the STATUS register after the last byte of CDB is
+	transferred for a PACKET command.  For this kind of errors,
+	sense data should be acquired to gather information regarding
+	the errors.  REQUEST SENSE packet command should be used to
+	acquire sense data.
+	</para>
+
+	<para>
+	Once sense data is acquired, this type of errors can be
+	handled similary to other SCSI errors.  Note that sense data
+	may indicate ATA bus error (e.g. Sense Key 04h HARDWARE ERROR
+	&amp;&amp; ASC/ASCQ 47h/00h SCSI PARITY ERROR).  In such
+	cases, the error should be considered as an ATA bus error and
+	handled according to <xref linkend="excatATAbusErr"/>.
+	</para>
+
+     </sect2>
+
+     <sect2 id="excatNCQerr">
+        <title>ATA device error (NCQ)</title>
+
+	<para>
+	NCQ command error is indicated by cleared BSY and set ERR bit
+	during NCQ command phase (one or more NCQ commands
+	outstanding).  Although STATUS and ERROR registers will
+	contain valid values describing the error, READ LOG EXT is
+	required to clear the error condition, determine which command
+	has failed and acquire more information.
+	</para>
+
+	<para>
+	READ LOG EXT Log Page 10h reports which tag has failed and
+	taskfile register values describing the error.  With this
+	information the failed command can be handled as a normal ATA
+	command error as in <xref linkend="excatDevErr"/> and all
+	other in-flight commands must be retried.  Note that this
+	retry should not be counted - it's likely that commands
+	retried this way would have completed normally if it were not
+	for the failed command.
+	</para>
+
+	<para>
+	Note that ATA bus errors can be reported as ATA device NCQ
+	errors.  This should be handled as described in <xref
+	linkend="excatATAbusErr"/>.
+	</para>
+
+	<para>
+	If READ LOG EXT Log Page 10h fails or reports NQ, we're
+	thoroughly screwed.  This condition should be treated
+	according to <xref linkend="excatHSMviolation"/>.
+	</para>
+
+     </sect2>
+
+     <sect2 id="excatATAbusErr">
+        <title>ATA bus error</title>
+
+	<para>
+	ATA bus error means that data corruption occurred during
+	transmission over ATA bus (SATA or PATA).  This type of errors
+	can be indicated by
+	</para>
+
+	<itemizedlist>
+
+	<listitem>
+	<para>
+	ICRC or ABRT error as described in <xref linkend="excatDevErr"/>.
+	</para>
+	</listitem>
+
+	<listitem>
+	<para>
+	Controller-specific error completion with error information
+	indicating transmission error.
+	</para>
+	</listitem>
+
+	<listitem>
+	<para>
+	On some controllers, command timeout.  In this case, there may
+	be a mechanism to determine that the timeout is due to
+	transmission error.
+	</para>
+	</listitem>
+
+	<listitem>
+	<para>
+	Unknown/random errors, timeouts and all sorts of weirdities.
+	</para>
+	</listitem>
+
+	</itemizedlist>
+
+	<para>
+	As described above, transmission errors can cause wide variety
+	of symptoms ranging from device ICRC error to random device
+	lockup, and, for many cases, there is no way to tell if an
+	error condition is due to transmission error or not;
+	therefore, it's necessary to employ some kind of heuristic
+	when dealing with errors and timeouts.  For example,
+	encountering repetitive ABRT errors for known supported
+	command is likely to indicate ATA bus error.
+	</para>
+
+	<para>
+	Once it's determined that ATA bus errors have possibly
+	occurred, lowering ATA bus transmission speed is one of
+	actions which may alleviate the problem.  See <xref
+	linkend="exrecReconf"/> for more information.
+	</para>
+
+     </sect2>
+
+     <sect2 id="excatPCIbusErr">
+        <title>PCI bus error</title>
+
+	<para>
+	Data corruption or other failures during transmission over PCI
+	(or other system bus).  For standard BMDMA, this is indicated
+	by Error bit in the BMDMA Status register.  This type of
+	errors must be logged as it indicates something is very wrong
+	with the system.  Resetting host controller is recommended.
+	</para>
+
+     </sect2>
+
+     <sect2 id="excatLateCompletion">
+        <title>Late completion</title>
+
+	<para>
+	This occurs when timeout occurs and the timeout handler finds
+	out that the timed out command has completed successfully or
+	with error.  This is usually caused by lost interrupts.  This
+	type of errors must be logged.  Resetting host controller is
+	recommended.
+	</para>
+
+     </sect2>
+
+     <sect2 id="excatUnknown">
+        <title>Unknown error (timeout)</title>
+
+	<para>
+	This is when timeout occurs and the command is still
+	processing or the host and device are in unknown state.  When
+	this occurs, HSM could be in any valid or invalid state.  To
+	bring the device to known state and make it forget about the
+	timed out command, resetting is necessary.  The timed out
+	command may be retried.
+	</para>
+
+	<para>
+	Timeouts can also be caused by transmission errors.  Refer to
+	<xref linkend="excatATAbusErr"/> for more details.
+	</para>
+
+     </sect2>
+
+     <sect2 id="excatHoplugPM">
+        <title>Hotplug and power management exceptions</title>
+
+	<para>
+	&lt;&lt;TODO: fill here&gt;&gt;
+	</para>
+
+     </sect2>
+
+  </sect1>
+
+  <sect1 id="exrec">
+     <title>EH recovery actions</title>
+
+     <para>
+     This section discusses several important recovery actions.
+     </para>
+
+     <sect2 id="exrecClr">
+        <title>Clearing error condition</title>
+
+	<para>
+	Many controllers require its error registers to be cleared by
+	error handler.  Different controllers may have different
+	requirements.
+	</para>
+
+	<para>
+	For SATA, it's strongly recommended to clear at least SError
+	register during error handling.
+	</para>
+     </sect2>
+
+     <sect2 id="exrecRst">
+        <title>Reset</title>
+
+	<para>
+	During EH, resetting is necessary in the following cases.
+	</para>
+
+	<itemizedlist>
+
+	<listitem>
+	<para>
+	HSM is in unknown or invalid state
+	</para>
+	</listitem>
+
+	<listitem>
+	<para>
+	HBA is in unknown or invalid state
+	</para>
+	</listitem>
+
+	<listitem>
+	<para>
+	EH needs to make HBA/device forget about in-flight commands
+	</para>
+	</listitem>
+
+	<listitem>
+	<para>
+	HBA/device behaves weirdly
+	</para>
+	</listitem>
+
+	</itemizedlist>
+
+	<para>
+	Resetting during EH might be a good idea regardless of error
+	condition to improve EH robustness.  Whether to reset both or
+	either one of HBA and device depends on situation but the
+	following scheme is recommended.
+	</para>
+
+	<itemizedlist>
+
+	<listitem>
+	<para>
+	When it's known that HBA is in ready state but ATA/ATAPI
+	device in in unknown state, reset only device.
+	</para>
+	</listitem>
+
+	<listitem>
+	<para>
+	If HBA is in unknown state, reset both HBA and device.
+	</para>
+	</listitem>
+
+	</itemizedlist>
+
+	<para>
+	HBA resetting is implementation specific.  For a controller
+	complying to taskfile/BMDMA PCI IDE, stopping active DMA
+	transaction may be sufficient iff BMDMA state is the only HBA
+	context.  But even mostly taskfile/BMDMA PCI IDE complying
+	controllers may have implementation specific requirements and
+	mechanism to reset themselves.  This must be addressed by
+	specific drivers.
+	</para>
+
+	<para>
+	OTOH, ATA/ATAPI standard describes in detail ways to reset
+	ATA/ATAPI devices.
+	</para>
+
+	<variablelist>
+
+	   <varlistentry><term>PATA hardware reset</term>
+	   <listitem>
+	   <para>
+	   This is hardware initiated device reset signalled with
+	   asserted PATA RESET- signal.  There is no standard way to
+	   initiate hardware reset from software although some
+	   hardware provides registers that allow driver to directly
+	   tweak the RESET- signal.
+	   </para>
+	   </listitem>
+	   </varlistentry>
+
+	   <varlistentry><term>Software reset</term>
+	   <listitem>
+	   <para>
+	   This is achieved by turning CONTROL SRST bit on for at
+	   least 5us.  Both PATA and SATA support it but, in case of
+	   SATA, this may require controller-specific support as the
+	   second Register FIS to clear SRST should be transmitted
+	   while BSY bit is still set.  Note that on PATA, this resets
+	   both master and slave devices on a channel.
+	   </para>
+	   </listitem>
+	   </varlistentry>
+
+	   <varlistentry><term>EXECUTE DEVICE DIAGNOSTIC command</term>
+	   <listitem>
+	   <para>
+	   Although ATA/ATAPI standard doesn't describe exactly, EDD
+	   implies some level of resetting, possibly similar level
+	   with software reset.  Host-side EDD protocol can be handled
+	   with normal command processing and most SATA controllers
+	   should be able to handle EDD's just like other commands.
+	   As in software reset, EDD affects both devices on a PATA
+	   bus.
+	   </para>
+	   <para>
+	   Although EDD does reset devices, this doesn't suit error
+	   handling as EDD cannot be issued while BSY is set and it's
+	   unclear how it will act when device is in unknown/weird
+	   state.
+	   </para>
+	   </listitem>
+	   </varlistentry>
+
+	   <varlistentry><term>ATAPI DEVICE RESET command</term>
+	   <listitem>
+	   <para>
+	   This is very similar to software reset except that reset
+	   can be restricted to the selected device without affecting
+	   the other device sharing the cable.
+	   </para>
+	   </listitem>
+	   </varlistentry>
+
+	   <varlistentry><term>SATA phy reset</term>
+	   <listitem>
+	   <para>
+	   This is the preferred way of resetting a SATA device.  In
+	   effect, it's identical to PATA hardware reset.  Note that
+	   this can be done with the standard SCR Control register.
+	   As such, it's usually easier to implement than software
+	   reset.
+	   </para>
+	   </listitem>
+	   </varlistentry>
+
+	</variablelist>
+
+	<para>
+	One more thing to consider when resetting devices is that
+	resetting clears certain configuration parameters and they
+	need to be set to their previous or newly adjusted values
+	after reset.
+	</para>
+
+	<para>
+	Parameters affected are.
+	</para>
+
+	<itemizedlist>
+
+	<listitem>
+	<para>
+	CHS set up with INITIALIZE DEVICE PARAMETERS (seldomly used)
+	</para>
+	</listitem>
+
+	<listitem>
+	<para>
+	Parameters set with SET FEATURES including transfer mode setting
+	</para>
+	</listitem>
+
+	<listitem>
+	<para>
+	Block count set with SET MULTIPLE MODE
+	</para>
+	</listitem>
+
+	<listitem>
+	<para>
+	Other parameters (SET MAX, MEDIA LOCK...)
+	</para>
+	</listitem>
+
+	</itemizedlist>
+
+	<para>
+	ATA/ATAPI standard specifies that some parameters must be
+	maintained across hardware or software reset, but doesn't
+	strictly specify all of them.  Always reconfiguring needed
+	parameters after reset is required for robustness.  Note that
+	this also applies when resuming from deep sleep (power-off).
+	</para>
+
+	<para>
+	Also, ATA/ATAPI standard requires that IDENTIFY DEVICE /
+	IDENTIFY PACKET DEVICE is issued after any configuration
+	parameter is updated or a hardware reset and the result used
+	for further operation.  OS driver is required to implement
+	revalidation mechanism to support this.
+	</para>
+
+     </sect2>
+
+     <sect2 id="exrecReconf">
+        <title>Reconfigure transport</title>
+
+	<para>
+	For both PATA and SATA, a lot of corners are cut for cheap
+	connectors, cables or controllers and it's quite common to see
+	high transmission error rate.  This can be mitigated by
+	lowering transmission speed.
+	</para>
+
+	<para>
+	The following is a possible scheme Jeff Garzik suggested.
+	</para>
+
+	<blockquote>
+	<para>
+	If more than $N (3?) transmission errors happen in 15 minutes,
+	</para>	
+	<itemizedlist>
+	<listitem>
+	<para>
+	if SATA, decrease SATA PHY speed.  if speed cannot be decreased,
+	</para>
+	</listitem>
+	<listitem>
+	<para>
+	decrease UDMA xfer speed.  if at UDMA0, switch to PIO4,
+	</para>
+	</listitem>
+	<listitem>
+	<para>
+	decrease PIO xfer speed.  if at PIO3, complain, but continue
+	</para>
+	</listitem>
+	</itemizedlist>
+	</blockquote>
+
+     </sect2>
+
+  </sect1>
+
+  </chapter>
+
   <chapter id="PiixInt">
   <chapter id="PiixInt">
      <title>ata_piix Internals</title>
      <title>ata_piix Internals</title>
 !Idrivers/scsi/ata_piix.c
 !Idrivers/scsi/ata_piix.c

+ 1 - 1
arch/arm/kernel/sys_arm.c

@@ -305,7 +305,7 @@ long execve(const char *filename, char **argv, char **envp)
 		  "Ir" (THREAD_START_SP - sizeof(regs)),
 		  "Ir" (THREAD_START_SP - sizeof(regs)),
 		  "r" (&regs),
 		  "r" (&regs),
 		  "Ir" (sizeof(regs))
 		  "Ir" (sizeof(regs))
-		: "r0", "r1", "r2", "r3", "ip", "memory");
+		: "r0", "r1", "r2", "r3", "ip", "lr", "memory");
 
 
  out:
  out:
 	return ret;
 	return ret;

+ 1 - 1
arch/arm/kernel/traps.c

@@ -504,7 +504,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
 
 
 		bad_access:
 		bad_access:
 		spin_unlock(&mm->page_table_lock);
 		spin_unlock(&mm->page_table_lock);
-		/* simulate a read access fault */
+		/* simulate a write access fault */
 		do_DataAbort(addr, 15 + (1 << 11), regs);
 		do_DataAbort(addr, 15 + (1 << 11), regs);
 		return -1;
 		return -1;
 	}
 	}

+ 12 - 11
arch/arm/mach-imx/generic.c

@@ -28,14 +28,15 @@
 #include <linux/module.h>
 #include <linux/module.h>
 #include <asm/arch/imxfb.h>
 #include <asm/arch/imxfb.h>
 #include <asm/hardware.h>
 #include <asm/hardware.h>
+#include <asm/arch/imx-regs.h>
 
 
 #include <asm/mach/map.h>
 #include <asm/mach/map.h>
 
 
 void imx_gpio_mode(int gpio_mode)
 void imx_gpio_mode(int gpio_mode)
 {
 {
 	unsigned int pin = gpio_mode & GPIO_PIN_MASK;
 	unsigned int pin = gpio_mode & GPIO_PIN_MASK;
-	unsigned int port = (gpio_mode & GPIO_PORT_MASK) >> 5;
-	unsigned int ocr = (gpio_mode & GPIO_OCR_MASK) >> 10;
+	unsigned int port = (gpio_mode & GPIO_PORT_MASK) >> GPIO_PORT_SHIFT;
+	unsigned int ocr = (gpio_mode & GPIO_OCR_MASK) >> GPIO_OCR_SHIFT;
 	unsigned int tmp;
 	unsigned int tmp;
 
 
 	/* Pullup enable */
 	/* Pullup enable */
@@ -57,7 +58,7 @@ void imx_gpio_mode(int gpio_mode)
 		GPR(port) &= ~(1<<pin);
 		GPR(port) &= ~(1<<pin);
 
 
 	/* use as gpio? */
 	/* use as gpio? */
-	if( ocr == 3 )
+	if(gpio_mode &  GPIO_GIUS)
 		GIUS(port) |= (1<<pin);
 		GIUS(port) |= (1<<pin);
 	else
 	else
 		GIUS(port) &= ~(1<<pin);
 		GIUS(port) &= ~(1<<pin);
@@ -72,20 +73,20 @@ void imx_gpio_mode(int gpio_mode)
 		tmp |= (ocr << (pin*2));
 		tmp |= (ocr << (pin*2));
 		OCR1(port) = tmp;
 		OCR1(port) = tmp;
 
 
-		if( gpio_mode &	GPIO_AOUT )
-			ICONFA1(port) &= ~( 3<<(pin*2));
-		if( gpio_mode &	GPIO_BOUT )
-			ICONFB1(port) &= ~( 3<<(pin*2));
+		ICONFA1(port) &= ~( 3<<(pin*2));
+		ICONFA1(port) |= ((gpio_mode >> GPIO_AOUT_SHIFT) & 3) << (pin * 2);
+		ICONFB1(port) &= ~( 3<<(pin*2));
+		ICONFB1(port) |= ((gpio_mode >> GPIO_BOUT_SHIFT) & 3) << (pin * 2);
 	} else {
 	} else {
 		tmp = OCR2(port);
 		tmp = OCR2(port);
 		tmp &= ~( 3<<((pin-16)*2));
 		tmp &= ~( 3<<((pin-16)*2));
 		tmp |= (ocr << ((pin-16)*2));
 		tmp |= (ocr << ((pin-16)*2));
 		OCR2(port) = tmp;
 		OCR2(port) = tmp;
 
 
-		if( gpio_mode &	GPIO_AOUT )
-			ICONFA2(port) &= ~( 3<<((pin-16)*2));
-		if( gpio_mode &	GPIO_BOUT )
-			ICONFB2(port) &= ~( 3<<((pin-16)*2));
+		ICONFA2(port) &= ~( 3<<((pin-16)*2));
+		ICONFA2(port) |= ((gpio_mode >> GPIO_AOUT_SHIFT) & 3) << ((pin-16) * 2);
+		ICONFB2(port) &= ~( 3<<((pin-16)*2));
+		ICONFB2(port) |= ((gpio_mode >> GPIO_BOUT_SHIFT) & 3) << ((pin-16) * 2);
 	}
 	}
 }
 }
 
 

+ 1 - 1
arch/arm/mach-imx/mx1ads.c

@@ -55,7 +55,7 @@ static void __init
 mx1ads_init(void)
 mx1ads_init(void)
 {
 {
 #ifdef CONFIG_LEDS
 #ifdef CONFIG_LEDS
-	imx_gpio_mode(GPIO_PORTA | GPIO_OUT | GPIO_GPIO | 2);
+	imx_gpio_mode(GPIO_PORTA | GPIO_OUT | 2);
 #endif
 #endif
 	platform_add_devices(devices, ARRAY_SIZE(devices));
 	platform_add_devices(devices, ARRAY_SIZE(devices));
 }
 }

+ 4 - 4
arch/arm/mm/Kconfig

@@ -370,21 +370,21 @@ config CPU_BIG_ENDIAN
 
 
 config CPU_ICACHE_DISABLE
 config CPU_ICACHE_DISABLE
 	bool "Disable I-Cache"
 	bool "Disable I-Cache"
-	depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020
+	depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6
 	help
 	help
 	  Say Y here to disable the processor instruction cache. Unless
 	  Say Y here to disable the processor instruction cache. Unless
 	  you have a reason not to or are unsure, say N.
 	  you have a reason not to or are unsure, say N.
 
 
 config CPU_DCACHE_DISABLE
 config CPU_DCACHE_DISABLE
 	bool "Disable D-Cache"
 	bool "Disable D-Cache"
-	depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020
+	depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6
 	help
 	help
 	  Say Y here to disable the processor data cache. Unless
 	  Say Y here to disable the processor data cache. Unless
 	  you have a reason not to or are unsure, say N.
 	  you have a reason not to or are unsure, say N.
 
 
 config CPU_DCACHE_WRITETHROUGH
 config CPU_DCACHE_WRITETHROUGH
 	bool "Force write through D-cache"
 	bool "Force write through D-cache"
-	depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020) && !CPU_DCACHE_DISABLE
+	depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6) && !CPU_DCACHE_DISABLE
 	default y if CPU_ARM925T
 	default y if CPU_ARM925T
 	help
 	help
 	  Say Y here to use the data cache in writethrough mode. Unless you
 	  Say Y here to use the data cache in writethrough mode. Unless you
@@ -399,7 +399,7 @@ config CPU_CACHE_ROUND_ROBIN
 
 
 config CPU_BPREDICT_DISABLE
 config CPU_BPREDICT_DISABLE
 	bool "Disable branch prediction"
 	bool "Disable branch prediction"
-	depends on CPU_ARM1020
+	depends on CPU_ARM1020 || CPU_V6
 	help
 	help
 	  Say Y here to disable branch prediction.  If unsure, say N.
 	  Say Y here to disable branch prediction.  If unsure, say N.
 
 

+ 5 - 0
arch/ia64/kernel/mca.c

@@ -1016,6 +1016,11 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
 
 
 			cmc_polling_enabled = 1;
 			cmc_polling_enabled = 1;
 			spin_unlock(&cmc_history_lock);
 			spin_unlock(&cmc_history_lock);
+			/* If we're being hit with CMC interrupts, we won't
+			 * ever execute the schedule_work() below.  Need to
+			 * disable CMC interrupts on this processor now.
+			 */
+			ia64_mca_cmc_vector_disable(NULL);
 			schedule_work(&cmc_disable_work);
 			schedule_work(&cmc_disable_work);
 
 
 			/*
 			/*

+ 1 - 1
arch/ppc/platforms/pmac_time.c

@@ -195,7 +195,7 @@ via_calibrate_decr(void)
 		;
 		;
 	dend = get_dec();
 	dend = get_dec();
 
 
-	tb_ticks_per_jiffy = (dstart - dend) / (6 * (HZ/100));
+	tb_ticks_per_jiffy = (dstart - dend) / ((6 * HZ)/100);
 	tb_to_us = mulhwu_scale_factor(dstart - dend, 60000);
 	tb_to_us = mulhwu_scale_factor(dstart - dend, 60000);
 
 
 	printk(KERN_INFO "via_calibrate_decr: ticks per jiffy = %u (%u ticks)\n",
 	printk(KERN_INFO "via_calibrate_decr: ticks per jiffy = %u (%u ticks)\n",

+ 0 - 56
arch/sparc/Kconfig

@@ -25,62 +25,6 @@ source "init/Kconfig"
 
 
 menu "General machine setup"
 menu "General machine setup"
 
 
-config VT
-	bool
-	select INPUT
-	default y
-	---help---
-	  If you say Y here, you will get support for terminal devices with
-	  display and keyboard devices. These are called "virtual" because you
-	  can run several virtual terminals (also called virtual consoles) on
-	  one physical terminal. This is rather useful, for example one
-	  virtual terminal can collect system messages and warnings, another
-	  one can be used for a text-mode user session, and a third could run
-	  an X session, all in parallel. Switching between virtual terminals
-	  is done with certain key combinations, usually Alt-<function key>.
-
-	  The setterm command ("man setterm") can be used to change the
-	  properties (such as colors or beeping) of a virtual terminal. The
-	  man page console_codes(4) ("man console_codes") contains the special
-	  character sequences that can be used to change those properties
-	  directly. The fonts used on virtual terminals can be changed with
-	  the setfont ("man setfont") command and the key bindings are defined
-	  with the loadkeys ("man loadkeys") command.
-
-	  You need at least one virtual terminal device in order to make use
-	  of your keyboard and monitor. Therefore, only people configuring an
-	  embedded system would want to say N here in order to save some
-	  memory; the only way to log into such a system is then via a serial
-	  or network connection.
-
-	  If unsure, say Y, or else you won't be able to do much with your new
-	  shiny Linux system :-)
-
-config VT_CONSOLE
-	bool
-	default y
-	---help---
-	  The system console is the device which receives all kernel messages
-	  and warnings and which allows logins in single user mode. If you
-	  answer Y here, a virtual terminal (the device used to interact with
-	  a physical terminal) can be used as system console. This is the most
-	  common mode of operations, so you should say Y here unless you want
-	  the kernel messages be output only to a serial port (in which case
-	  you should say Y to "Console on serial port", below).
-
-	  If you do say Y here, by default the currently visible virtual
-	  terminal (/dev/tty0) will be used as system console. You can change
-	  that with a kernel command line option such as "console=tty3" which
-	  would use the third virtual terminal as system console. (Try "man
-	  bootparam" or see the documentation of your boot loader (lilo or
-	  loadlin) about how to pass options to the kernel at boot time.)
-
-	  If unsure, say Y.
-
-config HW_CONSOLE
-	bool
-	default y
-
 config SMP
 config SMP
 	bool "Symmetric multi-processing support (does not work on sun4/sun4c)"
 	bool "Symmetric multi-processing support (does not work on sun4/sun4c)"
 	depends on BROKEN
 	depends on BROKEN

+ 1 - 1
arch/sparc/kernel/time.c

@@ -457,7 +457,7 @@ void __init time_init(void)
 	sbus_time_init();
 	sbus_time_init();
 }
 }
 
 
-extern __inline__ unsigned long do_gettimeoffset(void)
+static inline unsigned long do_gettimeoffset(void)
 {
 {
 	return (*master_l10_counter >> 10) & 0x1fffff;
 	return (*master_l10_counter >> 10) & 0x1fffff;
 }
 }

+ 1 - 1
arch/sparc/mm/srmmu.c

@@ -260,7 +260,7 @@ static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
 { return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); }
 { return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); }
 
 
 /* to find an entry in a top-level page table... */
 /* to find an entry in a top-level page table... */
-extern inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
+static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
 { return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
 { return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
 
 
 /* Find an entry in the second-level page table.. */
 /* Find an entry in the second-level page table.. */

+ 8 - 35
arch/sparc64/kernel/entry.S

@@ -97,8 +97,8 @@ do_fpdis:
 	faddd		%f0, %f2, %f4
 	faddd		%f0, %f2, %f4
 	fmuld		%f0, %f2, %f6
 	fmuld		%f0, %f2, %f6
 	ldxa		[%g3] ASI_DMMU, %g5
 	ldxa		[%g3] ASI_DMMU, %g5
-cplus_fptrap_insn_1:
-	sethi		%hi(0), %g2
+	sethi		%hi(sparc64_kern_sec_context), %g2
+	ldx		[%g2 + %lo(sparc64_kern_sec_context)], %g2
 	stxa		%g2, [%g3] ASI_DMMU
 	stxa		%g2, [%g3] ASI_DMMU
 	membar		#Sync
 	membar		#Sync
 	add		%g6, TI_FPREGS + 0xc0, %g2
 	add		%g6, TI_FPREGS + 0xc0, %g2
@@ -126,8 +126,8 @@ cplus_fptrap_insn_1:
 	fzero		%f34
 	fzero		%f34
 	ldxa		[%g3] ASI_DMMU, %g5
 	ldxa		[%g3] ASI_DMMU, %g5
 	add		%g6, TI_FPREGS, %g1
 	add		%g6, TI_FPREGS, %g1
-cplus_fptrap_insn_2:
-	sethi		%hi(0), %g2
+	sethi		%hi(sparc64_kern_sec_context), %g2
+	ldx		[%g2 + %lo(sparc64_kern_sec_context)], %g2
 	stxa		%g2, [%g3] ASI_DMMU
 	stxa		%g2, [%g3] ASI_DMMU
 	membar		#Sync
 	membar		#Sync
 	add		%g6, TI_FPREGS + 0x40, %g2
 	add		%g6, TI_FPREGS + 0x40, %g2
@@ -153,8 +153,8 @@ cplus_fptrap_insn_2:
 3:	mov		SECONDARY_CONTEXT, %g3
 3:	mov		SECONDARY_CONTEXT, %g3
 	add		%g6, TI_FPREGS, %g1
 	add		%g6, TI_FPREGS, %g1
 	ldxa		[%g3] ASI_DMMU, %g5
 	ldxa		[%g3] ASI_DMMU, %g5
-cplus_fptrap_insn_3:
-	sethi		%hi(0), %g2
+	sethi		%hi(sparc64_kern_sec_context), %g2
+	ldx		[%g2 + %lo(sparc64_kern_sec_context)], %g2
 	stxa		%g2, [%g3] ASI_DMMU
 	stxa		%g2, [%g3] ASI_DMMU
 	membar		#Sync
 	membar		#Sync
 	mov		0x40, %g2
 	mov		0x40, %g2
@@ -319,8 +319,8 @@ do_fptrap_after_fsr:
 	stx		%g3, [%g6 + TI_GSR]
 	stx		%g3, [%g6 + TI_GSR]
 	mov		SECONDARY_CONTEXT, %g3
 	mov		SECONDARY_CONTEXT, %g3
 	ldxa		[%g3] ASI_DMMU, %g5
 	ldxa		[%g3] ASI_DMMU, %g5
-cplus_fptrap_insn_4:
-	sethi		%hi(0), %g2
+	sethi		%hi(sparc64_kern_sec_context), %g2
+	ldx		[%g2 + %lo(sparc64_kern_sec_context)], %g2
 	stxa		%g2, [%g3] ASI_DMMU
 	stxa		%g2, [%g3] ASI_DMMU
 	membar		#Sync
 	membar		#Sync
 	add		%g6, TI_FPREGS, %g2
 	add		%g6, TI_FPREGS, %g2
@@ -341,33 +341,6 @@ cplus_fptrap_insn_4:
 	ba,pt		%xcc, etrap
 	ba,pt		%xcc, etrap
 	 wr		%g0, 0, %fprs
 	 wr		%g0, 0, %fprs
 
 
-cplus_fptrap_1:
-	sethi		%hi(CTX_CHEETAH_PLUS_CTX0), %g2
-
-	.globl		cheetah_plus_patch_fpdis
-cheetah_plus_patch_fpdis:
-	/* We configure the dTLB512_0 for 4MB pages and the
-	 * dTLB512_1 for 8K pages when in context zero.
-	 */
-	sethi			%hi(cplus_fptrap_1), %o0
-	lduw			[%o0 + %lo(cplus_fptrap_1)], %o1
-
-	set			cplus_fptrap_insn_1, %o2
-	stw			%o1, [%o2]
-	flush			%o2
-	set			cplus_fptrap_insn_2, %o2
-	stw			%o1, [%o2]
-	flush			%o2
-	set			cplus_fptrap_insn_3, %o2
-	stw			%o1, [%o2]
-	flush			%o2
-	set			cplus_fptrap_insn_4, %o2
-	stw			%o1, [%o2]
-	flush			%o2
-
-	retl
-	 nop
-
 	/* The registers for cross calls will be:
 	/* The registers for cross calls will be:
 	 *
 	 *
 	 * DATA 0: [low 32-bits]  Address of function to call, jmp to this
 	 * DATA 0: [low 32-bits]  Address of function to call, jmp to this

+ 4 - 47
arch/sparc64/kernel/etrap.S

@@ -68,12 +68,8 @@ etrap_irq:
 
 
 		wrpr	%g3, 0, %otherwin
 		wrpr	%g3, 0, %otherwin
 		wrpr	%g2, 0, %wstate
 		wrpr	%g2, 0, %wstate
-cplus_etrap_insn_1:
-		sethi	%hi(0), %g3
-		sllx	%g3, 32, %g3
-cplus_etrap_insn_2:
-		sethi	%hi(0), %g2
-		or	%g3, %g2, %g3
+		sethi	%hi(sparc64_kern_pri_context), %g2
+		ldx	[%g2 + %lo(sparc64_kern_pri_context)], %g3
 		stxa	%g3, [%l4] ASI_DMMU
 		stxa	%g3, [%l4] ASI_DMMU
 		flush	%l6
 		flush	%l6
 		wr	%g0, ASI_AIUS, %asi
 		wr	%g0, ASI_AIUS, %asi
@@ -215,12 +211,8 @@ scetrap:	rdpr	%pil, %g2
 		mov	PRIMARY_CONTEXT, %l4
 		mov	PRIMARY_CONTEXT, %l4
 		wrpr	%g3, 0, %otherwin
 		wrpr	%g3, 0, %otherwin
 		wrpr	%g2, 0, %wstate
 		wrpr	%g2, 0, %wstate
-cplus_etrap_insn_3:
-		sethi	%hi(0), %g3
-		sllx	%g3, 32, %g3
-cplus_etrap_insn_4:
-		sethi	%hi(0), %g2
-		or	%g3, %g2, %g3
+		sethi	%hi(sparc64_kern_pri_context), %g2
+		ldx	[%g2 + %lo(sparc64_kern_pri_context)], %g3
 		stxa	%g3, [%l4] ASI_DMMU
 		stxa	%g3, [%l4] ASI_DMMU
 		flush	%l6
 		flush	%l6
 
 
@@ -264,38 +256,3 @@ cplus_etrap_insn_4:
 
 
 #undef TASK_REGOFF
 #undef TASK_REGOFF
 #undef ETRAP_PSTATE1
 #undef ETRAP_PSTATE1
-
-cplus_einsn_1:
-		sethi			%uhi(CTX_CHEETAH_PLUS_NUC), %g3
-cplus_einsn_2:
-		sethi			%hi(CTX_CHEETAH_PLUS_CTX0), %g2
-
-		.globl			cheetah_plus_patch_etrap
-cheetah_plus_patch_etrap:
-		/* We configure the dTLB512_0 for 4MB pages and the
-		 * dTLB512_1 for 8K pages when in context zero.
-		 */
-		sethi			%hi(cplus_einsn_1), %o0
-		sethi			%hi(cplus_etrap_insn_1), %o2
-		lduw			[%o0 + %lo(cplus_einsn_1)], %o1
-		or			%o2, %lo(cplus_etrap_insn_1), %o2
-		stw			%o1, [%o2]
-		flush			%o2
-		sethi			%hi(cplus_etrap_insn_3), %o2
-		or			%o2, %lo(cplus_etrap_insn_3), %o2
-		stw			%o1, [%o2]
-		flush			%o2
-
-		sethi			%hi(cplus_einsn_2), %o0
-		sethi			%hi(cplus_etrap_insn_2), %o2
-		lduw			[%o0 + %lo(cplus_einsn_2)], %o1
-		or			%o2, %lo(cplus_etrap_insn_2), %o2
-		stw			%o1, [%o2]
-		flush			%o2
-		sethi			%hi(cplus_etrap_insn_4), %o2
-		or			%o2, %lo(cplus_etrap_insn_4), %o2
-		stw			%o1, [%o2]
-		flush			%o2
-
-		retl
-		 nop

+ 5 - 28
arch/sparc64/kernel/head.S

@@ -325,23 +325,7 @@ cheetah_tlb_fixup:
 1:	sethi	%hi(tlb_type), %g1
 1:	sethi	%hi(tlb_type), %g1
 	stw	%g2, [%g1 + %lo(tlb_type)]
 	stw	%g2, [%g1 + %lo(tlb_type)]
 
 
-	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
-	ba,pt	%xcc, 2f
-	 nop
-
-1:	/* Patch context register writes to support nucleus page
-	 * size correctly.
-	 */
-	call	cheetah_plus_patch_etrap
-	 nop
-	call	cheetah_plus_patch_rtrap
-	 nop
-	call	cheetah_plus_patch_fpdis
-	 nop
-	call	cheetah_plus_patch_winfixup
-	 nop
-
-2:	/* Patch copy/page operations to cheetah optimized versions. */
+	/* Patch copy/page operations to cheetah optimized versions. */
 	call	cheetah_patch_copyops
 	call	cheetah_patch_copyops
 	 nop
 	 nop
 	call	cheetah_patch_copy_page
 	call	cheetah_patch_copy_page
@@ -484,20 +468,13 @@ spitfire_vpte_base:
 	call	prom_set_trap_table
 	call	prom_set_trap_table
 	 sethi	%hi(sparc64_ttable_tl0), %o0
 	 sethi	%hi(sparc64_ttable_tl0), %o0
 
 
-	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f)
-	ba,pt	%xcc, 2f
-	 nop
-
-1:	/* Start using proper page size encodings in ctx register.  */
-	sethi	%uhi(CTX_CHEETAH_PLUS_NUC), %g3
+	/* Start using proper page size encodings in ctx register.  */
+	sethi	%hi(sparc64_kern_pri_context), %g3
+	ldx	[%g3 + %lo(sparc64_kern_pri_context)], %g2
 	mov	PRIMARY_CONTEXT, %g1
 	mov	PRIMARY_CONTEXT, %g1
-	sllx	%g3, 32, %g3
-	sethi	%hi(CTX_CHEETAH_PLUS_CTX0), %g2
-	or	%g3, %g2, %g3
-	stxa	%g3, [%g1] ASI_DMMU
+	stxa	%g2, [%g1] ASI_DMMU
 	membar	#Sync
 	membar	#Sync
 
 
-2:
 	rdpr	%pstate, %o1
 	rdpr	%pstate, %o1
 	or	%o1, PSTATE_IE, %o1
 	or	%o1, PSTATE_IE, %o1
 	wrpr	%o1, 0, %pstate
 	wrpr	%o1, 0, %pstate

+ 2 - 21
arch/sparc64/kernel/rtrap.S

@@ -256,9 +256,8 @@ rt_continue:	ldx			[%sp + PTREGS_OFF + PT_V9_G1], %g1
 		brnz,pn			%l3, kern_rtt
 		brnz,pn			%l3, kern_rtt
 		 mov			PRIMARY_CONTEXT, %l7
 		 mov			PRIMARY_CONTEXT, %l7
 		ldxa			[%l7 + %l7] ASI_DMMU, %l0
 		ldxa			[%l7 + %l7] ASI_DMMU, %l0
-cplus_rtrap_insn_1:
-		sethi			%hi(0), %l1
-		sllx			%l1, 32, %l1
+		sethi			%hi(sparc64_kern_pri_nuc_bits), %l1
+		ldx			[%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
 		or			%l0, %l1, %l0
 		or			%l0, %l1, %l0
 		stxa			%l0, [%l7] ASI_DMMU
 		stxa			%l0, [%l7] ASI_DMMU
 		flush			%g6
 		flush			%g6
@@ -345,21 +344,3 @@ kern_fpucheck:	ldub			[%g6 + TI_FPDEPTH], %l5
 		wr			%g0, FPRS_DU, %fprs
 		wr			%g0, FPRS_DU, %fprs
 		ba,pt			%xcc, rt_continue
 		ba,pt			%xcc, rt_continue
 		 stb			%l5, [%g6 + TI_FPDEPTH]
 		 stb			%l5, [%g6 + TI_FPDEPTH]
-
-cplus_rinsn_1:
-		sethi			%uhi(CTX_CHEETAH_PLUS_NUC), %l1
-
-		.globl			cheetah_plus_patch_rtrap
-cheetah_plus_patch_rtrap:
-		/* We configure the dTLB512_0 for 4MB pages and the
-		 * dTLB512_1 for 8K pages when in context zero.
-		 */
-		sethi			%hi(cplus_rinsn_1), %o0
-		sethi			%hi(cplus_rtrap_insn_1), %o2
-		lduw			[%o0 + %lo(cplus_rinsn_1)], %o1
-		or			%o2, %lo(cplus_rtrap_insn_1), %o2
-		stw			%o1, [%o2]
-		flush			%o2
-
-		retl
-		 nop

+ 2 - 6
arch/sparc64/kernel/setup.c

@@ -187,17 +187,13 @@ int prom_callback(long *args)
 		}
 		}
 
 
 		if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) {
 		if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) {
-			unsigned long kernel_pctx = 0;
-
-			if (tlb_type == cheetah_plus)
-				kernel_pctx |= (CTX_CHEETAH_PLUS_NUC |
-						CTX_CHEETAH_PLUS_CTX0);
+			extern unsigned long sparc64_kern_pri_context;
 
 
 			/* Spitfire Errata #32 workaround */
 			/* Spitfire Errata #32 workaround */
 			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
 			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
 					     "flush	%%g6"
 					     "flush	%%g6"
 					     : /* No outputs */
 					     : /* No outputs */
-					     : "r" (kernel_pctx),
+					     : "r" (sparc64_kern_pri_context),
 					       "r" (PRIMARY_CONTEXT),
 					       "r" (PRIMARY_CONTEXT),
 					       "i" (ASI_DMMU));
 					       "i" (ASI_DMMU));
 
 

+ 4 - 11
arch/sparc64/kernel/trampoline.S

@@ -336,20 +336,13 @@ do_unlock:
 	call		init_irqwork_curcpu
 	call		init_irqwork_curcpu
 	 nop
 	 nop
 
 
-	BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f)
-	ba,pt	%xcc, 2f
-	 nop
-
-1:	/* Start using proper page size encodings in ctx register.  */
-	sethi	%uhi(CTX_CHEETAH_PLUS_NUC), %g3
+	/* Start using proper page size encodings in ctx register.  */
+	sethi	%hi(sparc64_kern_pri_context), %g3
+	ldx	[%g3 + %lo(sparc64_kern_pri_context)], %g2
 	mov	PRIMARY_CONTEXT, %g1
 	mov	PRIMARY_CONTEXT, %g1
-	sllx	%g3, 32, %g3
-	sethi	%hi(CTX_CHEETAH_PLUS_CTX0), %g2
-	or	%g3, %g2, %g3
-	stxa	%g3, [%g1] ASI_DMMU
+	stxa	%g2, [%g1] ASI_DMMU
 	membar	#Sync
 	membar	#Sync
 
 
-2:
 	rdpr		%pstate, %o1
 	rdpr		%pstate, %o1
 	or		%o1, PSTATE_IE, %o1
 	or		%o1, PSTATE_IE, %o1
 	wrpr		%o1, 0, %pstate
 	wrpr		%o1, 0, %pstate

+ 2 - 31
arch/sparc64/kernel/winfixup.S

@@ -16,23 +16,14 @@
 	.text
 	.text
 
 
 set_pcontext:
 set_pcontext:
-cplus_winfixup_insn_1:
-	sethi	%hi(0), %l1
+	sethi	%hi(sparc64_kern_pri_context), %l1
+	ldx	[%l1 + %lo(sparc64_kern_pri_context)], %l1
 	mov	PRIMARY_CONTEXT, %g1
 	mov	PRIMARY_CONTEXT, %g1
-	sllx	%l1, 32, %l1
-cplus_winfixup_insn_2:
-	sethi	%hi(0), %g2
-	or	%l1, %g2, %l1
 	stxa	%l1, [%g1] ASI_DMMU
 	stxa	%l1, [%g1] ASI_DMMU
 	flush	%g6
 	flush	%g6
 	retl
 	retl
 	 nop
 	 nop
 
 
-cplus_wfinsn_1:
-	sethi	%uhi(CTX_CHEETAH_PLUS_NUC), %l1
-cplus_wfinsn_2:
-	sethi	%hi(CTX_CHEETAH_PLUS_CTX0), %g2
-
 	.align	32
 	.align	32
 
 
 	/* Here are the rules, pay attention.
 	/* Here are the rules, pay attention.
@@ -395,23 +386,3 @@ window_dax_from_user_common:
 	 add		%sp, PTREGS_OFF, %o0
 	 add		%sp, PTREGS_OFF, %o0
 	ba,pt		%xcc, rtrap
 	ba,pt		%xcc, rtrap
 	 clr		%l6
 	 clr		%l6
-	
-
-	.globl		cheetah_plus_patch_winfixup
-cheetah_plus_patch_winfixup:
-	sethi			%hi(cplus_wfinsn_1), %o0
-	sethi			%hi(cplus_winfixup_insn_1), %o2
-	lduw			[%o0 + %lo(cplus_wfinsn_1)], %o1
-	or			%o2, %lo(cplus_winfixup_insn_1), %o2
-	stw			%o1, [%o2]
-	flush			%o2
-
-	sethi			%hi(cplus_wfinsn_2), %o0
-	sethi			%hi(cplus_winfixup_insn_2), %o2
-	lduw			[%o0 + %lo(cplus_wfinsn_2)], %o1
-	or			%o2, %lo(cplus_winfixup_insn_2), %o2
-	stw			%o1, [%o2]
-	flush			%o2
-
-	retl
-	 nop

+ 76 - 106
arch/sparc64/mm/init.c

@@ -133,6 +133,12 @@ extern unsigned int sparc_ramdisk_size;
 
 
 struct page *mem_map_zero __read_mostly;
 struct page *mem_map_zero __read_mostly;
 
 
+unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
+
+unsigned long sparc64_kern_pri_context __read_mostly;
+unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
+unsigned long sparc64_kern_sec_context __read_mostly;
+
 int bigkernel = 0;
 int bigkernel = 0;
 
 
 /* XXX Tune this... */
 /* XXX Tune this... */
@@ -362,6 +368,7 @@ struct linux_prom_translation {
 	unsigned long data;
 	unsigned long data;
 };
 };
 static struct linux_prom_translation prom_trans[512] __initdata;
 static struct linux_prom_translation prom_trans[512] __initdata;
+static unsigned int prom_trans_ents __initdata;
 
 
 extern unsigned long prom_boot_page;
 extern unsigned long prom_boot_page;
 extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
 extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
@@ -375,57 +382,7 @@ unsigned long kern_locked_tte_data;
 unsigned long prom_pmd_phys __read_mostly;
 unsigned long prom_pmd_phys __read_mostly;
 unsigned int swapper_pgd_zero __read_mostly;
 unsigned int swapper_pgd_zero __read_mostly;
 
 
-/* Allocate power-of-2 aligned chunks from the end of the
- * kernel image.  Return physical address.
- */
-static inline unsigned long early_alloc_phys(unsigned long size)
-{
-	unsigned long base;
-
-	BUILD_BUG_ON(size & (size - 1));
-
-	kern_size = (kern_size + (size - 1)) & ~(size - 1);
-	base = kern_base + kern_size;
-	kern_size += size;
-
-	return base;
-}
-
-static inline unsigned long load_phys32(unsigned long pa)
-{
-	unsigned long val;
-
-	__asm__ __volatile__("lduwa	[%1] %2, %0"
-			     : "=&r" (val)
-			     : "r" (pa), "i" (ASI_PHYS_USE_EC));
-
-	return val;
-}
-
-static inline unsigned long load_phys64(unsigned long pa)
-{
-	unsigned long val;
-
-	__asm__ __volatile__("ldxa	[%1] %2, %0"
-			     : "=&r" (val)
-			     : "r" (pa), "i" (ASI_PHYS_USE_EC));
-
-	return val;
-}
-
-static inline void store_phys32(unsigned long pa, unsigned long val)
-{
-	__asm__ __volatile__("stwa	%0, [%1] %2"
-			     : /* no outputs */
-			     : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
-}
-
-static inline void store_phys64(unsigned long pa, unsigned long val)
-{
-	__asm__ __volatile__("stxa	%0, [%1] %2"
-			     : /* no outputs */
-			     : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
-}
+static pmd_t *prompmd __read_mostly;
 
 
 #define BASE_PAGE_SIZE 8192
 #define BASE_PAGE_SIZE 8192
 
 
@@ -435,34 +392,28 @@ static inline void store_phys64(unsigned long pa, unsigned long val)
  */
  */
 unsigned long prom_virt_to_phys(unsigned long promva, int *error)
 unsigned long prom_virt_to_phys(unsigned long promva, int *error)
 {
 {
-	unsigned long pmd_phys = (prom_pmd_phys +
-				  ((promva >> 23) & 0x7ff) * sizeof(pmd_t));
-	unsigned long pte_phys;
-	pmd_t pmd_ent;
-	pte_t pte_ent;
+	pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff);
+	pte_t *ptep;
 	unsigned long base;
 	unsigned long base;
 
 
-	pmd_val(pmd_ent) = load_phys32(pmd_phys);
-	if (pmd_none(pmd_ent)) {
+	if (pmd_none(*pmdp)) {
 		if (error)
 		if (error)
 			*error = 1;
 			*error = 1;
 		return 0;
 		return 0;
 	}
 	}
-
-	pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL;
-	pte_phys += ((promva >> 13) & 0x3ff) * sizeof(pte_t);
-	pte_val(pte_ent) = load_phys64(pte_phys);
-	if (!pte_present(pte_ent)) {
+	ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff);
+	if (!pte_present(*ptep)) {
 		if (error)
 		if (error)
 			*error = 1;
 			*error = 1;
 		return 0;
 		return 0;
 	}
 	}
 	if (error) {
 	if (error) {
 		*error = 0;
 		*error = 0;
-		return pte_val(pte_ent);
+		return pte_val(*ptep);
 	}
 	}
-	base = pte_val(pte_ent) & _PAGE_PADDR;
-	return (base + (promva & (BASE_PAGE_SIZE - 1)));
+	base = pte_val(*ptep) & _PAGE_PADDR;
+
+	return base + (promva & (BASE_PAGE_SIZE - 1));
 }
 }
 
 
 /* The obp translations are saved based on 8k pagesize, since obp can
 /* The obp translations are saved based on 8k pagesize, since obp can
@@ -475,25 +426,20 @@ static void __init build_obp_range(unsigned long start, unsigned long end, unsig
 	unsigned long vaddr;
 	unsigned long vaddr;
 
 
 	for (vaddr = start; vaddr < end; vaddr += BASE_PAGE_SIZE) {
 	for (vaddr = start; vaddr < end; vaddr += BASE_PAGE_SIZE) {
-		unsigned long val, pte_phys, pmd_phys;
-		pmd_t pmd_ent;
-		int i;
-
-		pmd_phys = (prom_pmd_phys +
-			    (((vaddr >> 23) & 0x7ff) * sizeof(pmd_t)));
-		pmd_val(pmd_ent) = load_phys32(pmd_phys);
-		if (pmd_none(pmd_ent)) {
-			pte_phys = early_alloc_phys(BASE_PAGE_SIZE);
-
-			for (i = 0; i < BASE_PAGE_SIZE / sizeof(pte_t); i++)
-				store_phys64(pte_phys+i*sizeof(pte_t),0);
+		unsigned long val;
+		pmd_t *pmd;
+		pte_t *pte;
 
 
-			pmd_val(pmd_ent) = pte_phys >> 11UL;
-			store_phys32(pmd_phys, pmd_val(pmd_ent));
+		pmd = prompmd + ((vaddr >> 23) & 0x7ff);
+		if (pmd_none(*pmd)) {
+			pte = __alloc_bootmem(BASE_PAGE_SIZE, BASE_PAGE_SIZE,
+					      PAGE_SIZE);
+			if (!pte)
+				prom_halt();
+			memset(pte, 0, BASE_PAGE_SIZE);
+			pmd_set(pmd, pte);
 		}
 		}
-
-		pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL;
-		pte_phys += (((vaddr >> 13) & 0x3ff) * sizeof(pte_t));
+		pte = (pte_t *) __pmd_page(*pmd) + ((vaddr >> 13) & 0x3ff);
 
 
 		val = data;
 		val = data;
 
 
@@ -501,7 +447,8 @@ static void __init build_obp_range(unsigned long start, unsigned long end, unsig
 		if (tlb_type == spitfire)
 		if (tlb_type == spitfire)
 			val &= ~0x0003fe0000000000UL;
 			val &= ~0x0003fe0000000000UL;
 
 
-		store_phys64(pte_phys, val | _PAGE_MODIFIED);
+		set_pte_at(&init_mm, vaddr, pte,
+			   __pte(val | _PAGE_MODIFIED));
 
 
 		data += BASE_PAGE_SIZE;
 		data += BASE_PAGE_SIZE;
 	}
 	}
@@ -514,13 +461,17 @@ static inline int in_obp_range(unsigned long vaddr)
 }
 }
 
 
 #define OBP_PMD_SIZE 2048
 #define OBP_PMD_SIZE 2048
-static void __init build_obp_pgtable(int prom_trans_ents)
+static void __init build_obp_pgtable(void)
 {
 {
 	unsigned long i;
 	unsigned long i;
 
 
-	prom_pmd_phys = early_alloc_phys(OBP_PMD_SIZE);
-	for (i = 0; i < OBP_PMD_SIZE; i += 4)
-		store_phys32(prom_pmd_phys + i, 0);
+	prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, PAGE_SIZE);
+	if (!prompmd)
+		prom_halt();
+
+	memset(prompmd, 0, OBP_PMD_SIZE);
+
+	prom_pmd_phys = __pa(prompmd);
 
 
 	for (i = 0; i < prom_trans_ents; i++) {
 	for (i = 0; i < prom_trans_ents; i++) {
 		unsigned long start, end;
 		unsigned long start, end;
@@ -540,7 +491,7 @@ static void __init build_obp_pgtable(int prom_trans_ents)
 /* Read OBP translations property into 'prom_trans[]'.
 /* Read OBP translations property into 'prom_trans[]'.
  * Return the number of entries.
  * Return the number of entries.
  */
  */
-static int __init read_obp_translations(void)
+static void __init read_obp_translations(void)
 {
 {
 	int n, node;
 	int n, node;
 
 
@@ -561,8 +512,10 @@ static int __init read_obp_translations(void)
 		prom_printf("prom_mappings: Couldn't get property.\n");
 		prom_printf("prom_mappings: Couldn't get property.\n");
 		prom_halt();
 		prom_halt();
 	}
 	}
+
 	n = n / sizeof(struct linux_prom_translation);
 	n = n / sizeof(struct linux_prom_translation);
-	return n;
+
+	prom_trans_ents = n;
 }
 }
 
 
 static void __init remap_kernel(void)
 static void __init remap_kernel(void)
@@ -582,28 +535,38 @@ static void __init remap_kernel(void)
 	prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
 	prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
 	prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
 	prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
 	if (bigkernel) {
 	if (bigkernel) {
-		prom_dtlb_load(tlb_ent - 1,
+		tlb_ent -= 1;
+		prom_dtlb_load(tlb_ent,
 			       tte_data + 0x400000, 
 			       tte_data + 0x400000, 
 			       tte_vaddr + 0x400000);
 			       tte_vaddr + 0x400000);
-		prom_itlb_load(tlb_ent - 1,
+		prom_itlb_load(tlb_ent,
 			       tte_data + 0x400000, 
 			       tte_data + 0x400000, 
 			       tte_vaddr + 0x400000);
 			       tte_vaddr + 0x400000);
 	}
 	}
+	sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
+	if (tlb_type == cheetah_plus) {
+		sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
+					    CTX_CHEETAH_PLUS_NUC);
+		sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
+		sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
+	}
 }
 }
 
 
-static void __init inherit_prom_mappings(void)
-{
-	int n;
 
 
-	n = read_obp_translations();
-	build_obp_pgtable(n);
+static void __init inherit_prom_mappings_pre(void)
+{
+	read_obp_translations();
 
 
 	/* Now fixup OBP's idea about where we really are mapped. */
 	/* Now fixup OBP's idea about where we really are mapped. */
 	prom_printf("Remapping the kernel... ");
 	prom_printf("Remapping the kernel... ");
 	remap_kernel();
 	remap_kernel();
 
 
 	prom_printf("done.\n");
 	prom_printf("done.\n");
+}
 
 
+static void __init inherit_prom_mappings_post(void)
+{
+	build_obp_pgtable();
 	register_prom_callbacks();
 	register_prom_callbacks();
 }
 }
 
 
@@ -788,8 +751,8 @@ void inherit_locked_prom_mappings(int save_p)
 		}
 		}
 	}
 	}
 	if (tlb_type == spitfire) {
 	if (tlb_type == spitfire) {
-		int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel;
-		for (i = 0; i < high; i++) {
+		int high = sparc64_highest_unlocked_tlb_ent;
+		for (i = 0; i <= high; i++) {
 			unsigned long data;
 			unsigned long data;
 
 
 			/* Spitfire Errata #32 workaround */
 			/* Spitfire Errata #32 workaround */
@@ -877,9 +840,9 @@ void inherit_locked_prom_mappings(int save_p)
 			}
 			}
 		}
 		}
 	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
 	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
-		int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel;
+		int high = sparc64_highest_unlocked_tlb_ent;
 
 
-		for (i = 0; i < high; i++) {
+		for (i = 0; i <= high; i++) {
 			unsigned long data;
 			unsigned long data;
 
 
 			data = cheetah_get_ldtlb_data(i);
 			data = cheetah_get_ldtlb_data(i);
@@ -1556,8 +1519,7 @@ void __init paging_init(void)
 	
 	
 	swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
 	swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
 	
 	
-	/* Inherit non-locked OBP mappings. */
-	inherit_prom_mappings();
+	inherit_prom_mappings_pre();
 	
 	
 	/* Ok, we can use our TLB miss and window trap handlers safely.
 	/* Ok, we can use our TLB miss and window trap handlers safely.
 	 * We need to do a quick peek here to see if we are on StarFire
 	 * We need to do a quick peek here to see if we are on StarFire
@@ -1568,15 +1530,23 @@ void __init paging_init(void)
 		extern void setup_tba(int);
 		extern void setup_tba(int);
 		setup_tba(this_is_starfire);
 		setup_tba(this_is_starfire);
 	}
 	}
-
-	inherit_locked_prom_mappings(1);
-
 	__flush_tlb_all();
 	__flush_tlb_all();
 
 
+	/* Everything from this point forward, until we are done with
+	 * inherit_prom_mappings_post(), must complete successfully
+	 * without calling into the firmware.  The firwmare page tables
+	 * have not been built, but we are running on the Linux kernel's
+	 * trap table.
+	 */
+
 	/* Setup bootmem... */
 	/* Setup bootmem... */
 	pages_avail = 0;
 	pages_avail = 0;
 	last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
 	last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
 
 
+	inherit_prom_mappings_post();
+
+	inherit_locked_prom_mappings(1);
+
 #ifdef CONFIG_DEBUG_PAGEALLOC
 #ifdef CONFIG_DEBUG_PAGEALLOC
 	kernel_physical_mapping_init();
 	kernel_physical_mapping_init();
 #endif
 #endif

+ 1 - 11
arch/um/include/registers.h

@@ -15,16 +15,6 @@ extern void save_registers(int pid, union uml_pt_regs *regs);
 extern void restore_registers(int pid, union uml_pt_regs *regs);
 extern void restore_registers(int pid, union uml_pt_regs *regs);
 extern void init_registers(int pid);
 extern void init_registers(int pid);
 extern void get_safe_registers(unsigned long * regs);
 extern void get_safe_registers(unsigned long * regs);
+extern void get_thread_regs(union uml_pt_regs *uml_regs, void *buffer);
 
 
 #endif
 #endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */

+ 0 - 4
arch/um/include/sysdep-x86_64/ptrace.h

@@ -218,10 +218,6 @@ struct syscall_args {
                 case RBP: UPT_RBP(regs) = __upt_val; break; \
                 case RBP: UPT_RBP(regs) = __upt_val; break; \
                 case ORIG_RAX: UPT_ORIG_RAX(regs) = __upt_val; break; \
                 case ORIG_RAX: UPT_ORIG_RAX(regs) = __upt_val; break; \
                 case CS: UPT_CS(regs) = __upt_val; break; \
                 case CS: UPT_CS(regs) = __upt_val; break; \
-                case DS: UPT_DS(regs) = __upt_val; break; \
-                case ES: UPT_ES(regs) = __upt_val; break; \
-                case FS: UPT_FS(regs) = __upt_val; break; \
-                case GS: UPT_GS(regs) = __upt_val; break; \
                 case EFLAGS: UPT_EFLAGS(regs) = __upt_val; break; \
                 case EFLAGS: UPT_EFLAGS(regs) = __upt_val; break; \
                 default :  \
                 default :  \
                         panic("Bad register in UPT_SET : %d\n", reg);  \
                         panic("Bad register in UPT_SET : %d\n", reg);  \

+ 1 - 7
arch/um/kernel/sysrq.c

@@ -62,13 +62,7 @@ void show_stack(struct task_struct *task, unsigned long *esp)
 
 
 	if (esp == NULL) {
 	if (esp == NULL) {
 		if (task != current && task != NULL) {
 		if (task != current && task != NULL) {
-			/* XXX: Isn't this bogus? I.e. isn't this the
-			 * *userspace* stack of this task? If not so, use this
-			 * even when task == current (as in i386).
-			 */
 			esp = (unsigned long *) KSTK_ESP(task);
 			esp = (unsigned long *) KSTK_ESP(task);
-			/* Which one? No actual difference - just coding style.*/
-			//esp = (unsigned long *) PT_REGS_IP(&task->thread.regs);
 		} else {
 		} else {
 			esp = (unsigned long *) &esp;
 			esp = (unsigned long *) &esp;
 		}
 		}
@@ -84,5 +78,5 @@ void show_stack(struct task_struct *task, unsigned long *esp)
 	}
 	}
 
 
 	printk("Call Trace: \n");
 	printk("Call Trace: \n");
-	show_trace(current, esp);
+	show_trace(task, esp);
 }
 }

+ 9 - 10
arch/um/os-Linux/sys-i386/registers.c

@@ -5,6 +5,7 @@
 
 
 #include <errno.h>
 #include <errno.h>
 #include <string.h>
 #include <string.h>
+#include <setjmp.h>
 #include "sysdep/ptrace_user.h"
 #include "sysdep/ptrace_user.h"
 #include "sysdep/ptrace.h"
 #include "sysdep/ptrace.h"
 #include "uml-config.h"
 #include "uml-config.h"
@@ -126,13 +127,11 @@ void get_safe_registers(unsigned long *regs)
 	memcpy(regs, exec_regs, HOST_FRAME_SIZE * sizeof(unsigned long));
 	memcpy(regs, exec_regs, HOST_FRAME_SIZE * sizeof(unsigned long));
 }
 }
 
 
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
+void get_thread_regs(union uml_pt_regs *uml_regs, void *buffer)
+{
+	struct __jmp_buf_tag *jmpbuf = buffer;
+
+	UPT_SET(uml_regs, EIP, jmpbuf->__jmpbuf[JB_PC]);
+	UPT_SET(uml_regs, UESP, jmpbuf->__jmpbuf[JB_SP]);
+	UPT_SET(uml_regs, EBP, jmpbuf->__jmpbuf[JB_BP]);
+}

+ 9 - 10
arch/um/os-Linux/sys-x86_64/registers.c

@@ -5,6 +5,7 @@
 
 
 #include <errno.h>
 #include <errno.h>
 #include <string.h>
 #include <string.h>
+#include <setjmp.h>
 #include "ptrace_user.h"
 #include "ptrace_user.h"
 #include "uml-config.h"
 #include "uml-config.h"
 #include "skas_ptregs.h"
 #include "skas_ptregs.h"
@@ -74,13 +75,11 @@ void get_safe_registers(unsigned long *regs)
 	memcpy(regs, exec_regs, HOST_FRAME_SIZE * sizeof(unsigned long));
 	memcpy(regs, exec_regs, HOST_FRAME_SIZE * sizeof(unsigned long));
 }
 }
 
 
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
+void get_thread_regs(union uml_pt_regs *uml_regs, void *buffer)
+{
+	struct __jmp_buf_tag *jmpbuf = buffer;
+
+	UPT_SET(uml_regs, RIP, jmpbuf->__jmpbuf[JB_PC]);
+	UPT_SET(uml_regs, RSP, jmpbuf->__jmpbuf[JB_RSP]);
+	UPT_SET(uml_regs, RBP, jmpbuf->__jmpbuf[JB_RBP]);
+}

+ 1 - 12
arch/um/sys-i386/sysrq.c

@@ -88,9 +88,7 @@ void show_trace(struct task_struct* task, unsigned long * stack)
 		task = current;
 		task = current;
 
 
 	if (task != current) {
 	if (task != current) {
-		//ebp = (unsigned long) KSTK_EBP(task);
-		/* Which one? No actual difference - just coding style.*/
-		ebp = (unsigned long) PT_REGS_EBP(&task->thread.regs);
+		ebp = (unsigned long) KSTK_EBP(task);
 	} else {
 	} else {
 		asm ("movl %%ebp, %0" : "=r" (ebp) : );
 		asm ("movl %%ebp, %0" : "=r" (ebp) : );
 	}
 	}
@@ -99,15 +97,6 @@ void show_trace(struct task_struct* task, unsigned long * stack)
 		((unsigned long)stack & (~(THREAD_SIZE - 1)));
 		((unsigned long)stack & (~(THREAD_SIZE - 1)));
 	print_context_stack(context, stack, ebp);
 	print_context_stack(context, stack, ebp);
 
 
-	/*while (((long) stack & (THREAD_SIZE-1)) != 0) {
-		addr = *stack;
-		if (__kernel_text_address(addr)) {
-			printk("%08lx:	[<%08lx>]", (unsigned long) stack, addr);
-			print_symbol(" %s", addr);
-			printk("\n");
-		}
-		stack++;
-	}*/
 	printk("\n");
 	printk("\n");
 }
 }
 
 

+ 1 - 1
arch/um/sys-i386/user-offsets.c

@@ -46,7 +46,7 @@ void foo(void)
 	OFFSET(HOST_SC_FP_ST, _fpstate, _st);
 	OFFSET(HOST_SC_FP_ST, _fpstate, _st);
 	OFFSET(HOST_SC_FXSR_ENV, _fpstate, _fxsr_env);
 	OFFSET(HOST_SC_FXSR_ENV, _fpstate, _fxsr_env);
 
 
-	DEFINE_LONGS(HOST_FRAME_SIZE, FRAME_SIZE);
+	DEFINE(HOST_FRAME_SIZE, FRAME_SIZE);
 	DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_i387_struct));
 	DEFINE_LONGS(HOST_FP_SIZE, sizeof(struct user_i387_struct));
 	DEFINE_LONGS(HOST_XFP_SIZE, sizeof(struct user_fxsr_struct));
 	DEFINE_LONGS(HOST_XFP_SIZE, sizeof(struct user_fxsr_struct));
 
 

+ 20 - 20
arch/x86_64/kernel/head.S

@@ -270,26 +270,26 @@ ENTRY(level3_kernel_pgt)
 .org 0x4000
 .org 0x4000
 ENTRY(level2_ident_pgt)
 ENTRY(level2_ident_pgt)
 	/* 40MB for bootup. 	*/
 	/* 40MB for bootup. 	*/
-	.quad	0x0000000000000183
-	.quad	0x0000000000200183
-	.quad	0x0000000000400183
-	.quad	0x0000000000600183
-	.quad	0x0000000000800183
-	.quad	0x0000000000A00183
-	.quad	0x0000000000C00183
-	.quad	0x0000000000E00183
-	.quad	0x0000000001000183
-	.quad	0x0000000001200183
-	.quad	0x0000000001400183
-	.quad	0x0000000001600183
-	.quad	0x0000000001800183
-	.quad	0x0000000001A00183
-	.quad	0x0000000001C00183
-	.quad	0x0000000001E00183
-	.quad	0x0000000002000183
-	.quad	0x0000000002200183
-	.quad	0x0000000002400183
-	.quad	0x0000000002600183
+	.quad	0x0000000000000083
+	.quad	0x0000000000200083
+	.quad	0x0000000000400083
+	.quad	0x0000000000600083
+	.quad	0x0000000000800083
+	.quad	0x0000000000A00083
+	.quad	0x0000000000C00083
+	.quad	0x0000000000E00083
+	.quad	0x0000000001000083
+	.quad	0x0000000001200083
+	.quad	0x0000000001400083
+	.quad	0x0000000001600083
+	.quad	0x0000000001800083
+	.quad	0x0000000001A00083
+	.quad	0x0000000001C00083
+	.quad	0x0000000001E00083
+	.quad	0x0000000002000083
+	.quad	0x0000000002200083
+	.quad	0x0000000002400083
+	.quad	0x0000000002600083
 	/* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */
 	/* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */
 	.globl temp_boot_pmds
 	.globl temp_boot_pmds
 temp_boot_pmds:
 temp_boot_pmds:

+ 4 - 6
drivers/atm/fore200e.c

@@ -178,14 +178,12 @@ fore200e_irq_itoa(int irq)
 
 
 
 
 static void*
 static void*
-fore200e_kmalloc(int size, int flags)
+fore200e_kmalloc(int size, unsigned int __nocast flags)
 {
 {
-    void* chunk = kmalloc(size, flags);
+    void *chunk = kzalloc(size, flags);
 
 
-    if (chunk)
-	memset(chunk, 0x00, size);
-    else
-	printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n", size, flags);
+    if (!chunk)
+	printk(FORE200E "kmalloc() failed, requested size = %d, flags = 0x%x\n",			size, flags);
     
     
     return chunk;
     return chunk;
 }
 }

+ 1 - 1
drivers/char/drm/drm_stub.c

@@ -47,7 +47,7 @@ MODULE_PARM_DESC(cards_limit, "Maximum number of graphics cards");
 MODULE_PARM_DESC(debug, "Enable debug output");
 MODULE_PARM_DESC(debug, "Enable debug output");
 
 
 module_param_named(cards_limit, drm_cards_limit, int, 0444);
 module_param_named(cards_limit, drm_cards_limit, int, 0444);
-module_param_named(debug, drm_debug, int, 0666);
+module_param_named(debug, drm_debug, int, 0600);
 
 
 drm_head_t **drm_heads;
 drm_head_t **drm_heads;
 struct drm_sysfs_class *drm_class;
 struct drm_sysfs_class *drm_class;

+ 2 - 1
drivers/connector/connector.c

@@ -69,7 +69,8 @@ int cn_already_initialized = 0;
  * a new message.
  * a new message.
  *
  *
  */
  */
-int cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask)
+int cn_netlink_send(struct cn_msg *msg, u32 __group,
+		    unsigned int __nocast gfp_mask)
 {
 {
 	struct cn_callback_entry *__cbq;
 	struct cn_callback_entry *__cbq;
 	unsigned int size;
 	unsigned int size;

+ 21 - 24
drivers/infiniband/hw/mthca/mthca_main.c

@@ -503,6 +503,25 @@ err_free_aux:
 	return err;
 	return err;
 }
 }
 
 
+static void mthca_free_icms(struct mthca_dev *mdev)
+{
+	u8 status;
+
+	mthca_free_icm_table(mdev, mdev->mcg_table.table);
+	if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
+		mthca_free_icm_table(mdev, mdev->srq_table.table);
+	mthca_free_icm_table(mdev, mdev->cq_table.table);
+	mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
+	mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
+	mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
+	mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
+	mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
+	mthca_unmap_eq_icm(mdev);
+
+	mthca_UNMAP_ICM_AUX(mdev, &status);
+	mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
+}
+
 static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
 static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
 {
 {
 	struct mthca_dev_lim        dev_lim;
 	struct mthca_dev_lim        dev_lim;
@@ -580,18 +599,7 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
 	return 0;
 	return 0;
 
 
 err_free_icm:
 err_free_icm:
-	if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
-		mthca_free_icm_table(mdev, mdev->srq_table.table);
-	mthca_free_icm_table(mdev, mdev->cq_table.table);
-	mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
-	mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
-	mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
-	mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
-	mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
-	mthca_unmap_eq_icm(mdev);
-
-	mthca_UNMAP_ICM_AUX(mdev, &status);
-	mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
+	mthca_free_icms(mdev);
 
 
 err_stop_fw:
 err_stop_fw:
 	mthca_UNMAP_FA(mdev, &status);
 	mthca_UNMAP_FA(mdev, &status);
@@ -611,18 +619,7 @@ static void mthca_close_hca(struct mthca_dev *mdev)
 	mthca_CLOSE_HCA(mdev, 0, &status);
 	mthca_CLOSE_HCA(mdev, 0, &status);
 
 
 	if (mthca_is_memfree(mdev)) {
 	if (mthca_is_memfree(mdev)) {
-		if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
-			mthca_free_icm_table(mdev, mdev->srq_table.table);
-		mthca_free_icm_table(mdev, mdev->cq_table.table);
-		mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
-		mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
-		mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
-		mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
-		mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
-		mthca_unmap_eq_icm(mdev);
-
-		mthca_UNMAP_ICM_AUX(mdev, &status);
-		mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
+		mthca_free_icms(mdev);
 
 
 		mthca_UNMAP_FA(mdev, &status);
 		mthca_UNMAP_FA(mdev, &status);
 		mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
 		mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);

+ 2 - 2
drivers/infiniband/ulp/ipoib/ipoib_main.c

@@ -474,7 +474,7 @@ err:
 	spin_unlock(&priv->lock);
 	spin_unlock(&priv->lock);
 }
 }
 
 
-static void path_lookup(struct sk_buff *skb, struct net_device *dev)
+static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
 {
 {
 	struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
 	struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
 
 
@@ -569,7 +569,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
 
 	if (skb->dst && skb->dst->neighbour) {
 	if (skb->dst && skb->dst->neighbour) {
 		if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
 		if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
-			path_lookup(skb, dev);
+			ipoib_path_lookup(skb, dev);
 			goto out;
 			goto out;
 		}
 		}
 
 

+ 0 - 2
drivers/mfd/ucb1x00-core.c

@@ -642,8 +642,6 @@ static void __exit ucb1x00_exit(void)
 module_init(ucb1x00_init);
 module_init(ucb1x00_init);
 module_exit(ucb1x00_exit);
 module_exit(ucb1x00_exit);
 
 
-EXPORT_SYMBOL(ucb1x00_class);
-
 EXPORT_SYMBOL(ucb1x00_io_set_dir);
 EXPORT_SYMBOL(ucb1x00_io_set_dir);
 EXPORT_SYMBOL(ucb1x00_io_write);
 EXPORT_SYMBOL(ucb1x00_io_write);
 EXPORT_SYMBOL(ucb1x00_io_read);
 EXPORT_SYMBOL(ucb1x00_io_read);

+ 0 - 2
drivers/mfd/ucb1x00.h

@@ -106,8 +106,6 @@ struct ucb1x00_irq {
 	void (*fn)(int, void *);
 	void (*fn)(int, void *);
 };
 };
 
 
-extern struct class ucb1x00_class;
-
 struct ucb1x00 {
 struct ucb1x00 {
 	spinlock_t		lock;
 	spinlock_t		lock;
 	struct mcp		*mcp;
 	struct mcp		*mcp;

+ 4 - 4
drivers/net/Kconfig

@@ -1655,7 +1655,7 @@ config LAN_SAA9730
 
 
 config NET_POCKET
 config NET_POCKET
 	bool "Pocket and portable adapters"
 	bool "Pocket and portable adapters"
-	depends on NET_ETHERNET && ISA
+	depends on NET_ETHERNET && PARPORT
 	---help---
 	---help---
 	  Cute little network (Ethernet) devices which attach to the parallel
 	  Cute little network (Ethernet) devices which attach to the parallel
 	  port ("pocket adapters"), commonly used with laptops. If you have
 	  port ("pocket adapters"), commonly used with laptops. If you have
@@ -1679,7 +1679,7 @@ config NET_POCKET
 
 
 config ATP
 config ATP
 	tristate "AT-LAN-TEC/RealTek pocket adapter support"
 	tristate "AT-LAN-TEC/RealTek pocket adapter support"
-	depends on NET_POCKET && ISA && X86
+	depends on NET_POCKET && PARPORT && X86
 	select CRC32
 	select CRC32
 	---help---
 	---help---
 	  This is a network (Ethernet) device which attaches to your parallel
 	  This is a network (Ethernet) device which attaches to your parallel
@@ -1694,7 +1694,7 @@ config ATP
 
 
 config DE600
 config DE600
 	tristate "D-Link DE600 pocket adapter support"
 	tristate "D-Link DE600 pocket adapter support"
-	depends on NET_POCKET && ISA
+	depends on NET_POCKET && PARPORT
 	---help---
 	---help---
 	  This is a network (Ethernet) device which attaches to your parallel
 	  This is a network (Ethernet) device which attaches to your parallel
 	  port. Read <file:Documentation/networking/DLINK.txt> as well as the
 	  port. Read <file:Documentation/networking/DLINK.txt> as well as the
@@ -1709,7 +1709,7 @@ config DE600
 
 
 config DE620
 config DE620
 	tristate "D-Link DE620 pocket adapter support"
 	tristate "D-Link DE620 pocket adapter support"
-	depends on NET_POCKET && ISA
+	depends on NET_POCKET && PARPORT
 	---help---
 	---help---
 	  This is a network (Ethernet) device which attaches to your parallel
 	  This is a network (Ethernet) device which attaches to your parallel
 	  port. Read <file:Documentation/networking/DLINK.txt> as well as the
 	  port. Read <file:Documentation/networking/DLINK.txt> as well as the

+ 72 - 213
drivers/net/bonding/bond_main.c

@@ -487,6 +487,8 @@
  *	  * Added xmit_hash_policy_layer34()
  *	  * Added xmit_hash_policy_layer34()
  *	- Modified by Jay Vosburgh <fubar@us.ibm.com> to also support mode 4.
  *	- Modified by Jay Vosburgh <fubar@us.ibm.com> to also support mode 4.
  *	  Set version to 2.6.3.
  *	  Set version to 2.6.3.
+ * 2005/09/26 - Jay Vosburgh <fubar@us.ibm.com>
+ *	- Removed backwards compatibility for old ifenslaves.  Version 2.6.4.
  */
  */
 
 
 //#define BONDING_DEBUG 1
 //#define BONDING_DEBUG 1
@@ -595,14 +597,7 @@ static int arp_ip_count	= 0;
 static int bond_mode	= BOND_MODE_ROUNDROBIN;
 static int bond_mode	= BOND_MODE_ROUNDROBIN;
 static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2;
 static int xmit_hashtype= BOND_XMIT_POLICY_LAYER2;
 static int lacp_fast	= 0;
 static int lacp_fast	= 0;
-static int app_abi_ver	= 0;
-static int orig_app_abi_ver = -1; /* This is used to save the first ABI version
-				   * we receive from the application. Once set,
-				   * it won't be changed, and the module will
-				   * refuse to enslave/release interfaces if the
-				   * command comes from an application using
-				   * another ABI version.
-				   */
+
 struct bond_parm_tbl {
 struct bond_parm_tbl {
 	char *modename;
 	char *modename;
 	int mode;
 	int mode;
@@ -1294,12 +1289,13 @@ static void bond_mc_list_destroy(struct bonding *bond)
 /*
 /*
  * Copy all the Multicast addresses from src to the bonding device dst
  * Copy all the Multicast addresses from src to the bonding device dst
  */
  */
-static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond, int gpf_flag)
+static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond,
+			     unsigned int __nocast gfp_flag)
 {
 {
 	struct dev_mc_list *dmi, *new_dmi;
 	struct dev_mc_list *dmi, *new_dmi;
 
 
 	for (dmi = mc_list; dmi; dmi = dmi->next) {
 	for (dmi = mc_list; dmi; dmi = dmi->next) {
-		new_dmi = kmalloc(sizeof(struct dev_mc_list), gpf_flag);
+		new_dmi = kmalloc(sizeof(struct dev_mc_list), gfp_flag);
 
 
 		if (!new_dmi) {
 		if (!new_dmi) {
 			/* FIXME: Potential memory leak !!! */
 			/* FIXME: Potential memory leak !!! */
@@ -1702,51 +1698,29 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
 		}
 		}
 	}
 	}
 
 
-	if (app_abi_ver >= 1) {
-		/* The application is using an ABI, which requires the
-		 * slave interface to be closed.
-		 */
-		if ((slave_dev->flags & IFF_UP)) {
-			printk(KERN_ERR DRV_NAME
-			       ": Error: %s is up\n",
-			       slave_dev->name);
-			res = -EPERM;
-			goto err_undo_flags;
-		}
-
-		if (slave_dev->set_mac_address == NULL) {
-			printk(KERN_ERR DRV_NAME
-			       ": Error: The slave device you specified does "
-			       "not support setting the MAC address.\n");
-			printk(KERN_ERR
-			       "Your kernel likely does not support slave "
-			       "devices.\n");
+	/*
+	 * Old ifenslave binaries are no longer supported.  These can
+	 * be identified with moderate accurary by the state of the slave:
+	 * the current ifenslave will set the interface down prior to
+	 * enslaving it; the old ifenslave will not.
+	 */
+	if ((slave_dev->flags & IFF_UP)) {
+		printk(KERN_ERR DRV_NAME ": %s is up. "
+		       "This may be due to an out of date ifenslave.\n",
+		       slave_dev->name);
+		res = -EPERM;
+		goto err_undo_flags;
+	}
 
 
-			res = -EOPNOTSUPP;
-			goto err_undo_flags;
-		}
-	} else {
-		/* The application is not using an ABI, which requires the
-		 * slave interface to be open.
-		 */
-		if (!(slave_dev->flags & IFF_UP)) {
-			printk(KERN_ERR DRV_NAME
-			       ": Error: %s is not running\n",
-			       slave_dev->name);
-			res = -EINVAL;
-			goto err_undo_flags;
-		}
+	if (slave_dev->set_mac_address == NULL) {
+		printk(KERN_ERR DRV_NAME
+		       ": Error: The slave device you specified does "
+		       "not support setting the MAC address.\n");
+		printk(KERN_ERR
+		       "Your kernel likely does not support slave devices.\n");
 
 
-		if ((bond->params.mode == BOND_MODE_8023AD) ||
-		    (bond->params.mode == BOND_MODE_TLB)    ||
-		    (bond->params.mode == BOND_MODE_ALB)) {
-			printk(KERN_ERR DRV_NAME
-			       ": Error: to use %s mode, you must upgrade "
-			       "ifenslave.\n",
-			       bond_mode_name(bond->params.mode));
-			res = -EOPNOTSUPP;
-			goto err_undo_flags;
-		}
+		res = -EOPNOTSUPP;
+		goto err_undo_flags;
 	}
 	}
 
 
 	new_slave = kmalloc(sizeof(struct slave), GFP_KERNEL);
 	new_slave = kmalloc(sizeof(struct slave), GFP_KERNEL);
@@ -1762,41 +1736,36 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
 	 */
 	 */
 	new_slave->original_flags = slave_dev->flags;
 	new_slave->original_flags = slave_dev->flags;
 
 
-	if (app_abi_ver >= 1) {
-		/* save slave's original ("permanent") mac address for
-		 * modes that needs it, and for restoring it upon release,
-		 * and then set it to the master's address
-		 */
-		memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
+	/*
+	 * Save slave's original ("permanent") mac address for modes
+	 * that need it, and for restoring it upon release, and then
+	 * set it to the master's address
+	 */
+	memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
 
 
-		/* set slave to master's mac address
-		 * The application already set the master's
-		 * mac address to that of the first slave
-		 */
-		memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
-		addr.sa_family = slave_dev->type;
-		res = dev_set_mac_address(slave_dev, &addr);
-		if (res) {
-			dprintk("Error %d calling set_mac_address\n", res);
-			goto err_free;
-		}
+	/*
+	 * Set slave to master's mac address.  The application already
+	 * set the master's mac address to that of the first slave
+	 */
+	memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
+	addr.sa_family = slave_dev->type;
+	res = dev_set_mac_address(slave_dev, &addr);
+	if (res) {
+		dprintk("Error %d calling set_mac_address\n", res);
+		goto err_free;
+	}
 
 
-		/* open the slave since the application closed it */
-		res = dev_open(slave_dev);
-		if (res) {
-			dprintk("Openning slave %s failed\n", slave_dev->name);
-			goto err_restore_mac;
-		}
+	/* open the slave since the application closed it */
+	res = dev_open(slave_dev);
+	if (res) {
+		dprintk("Openning slave %s failed\n", slave_dev->name);
+		goto err_restore_mac;
 	}
 	}
 
 
 	res = netdev_set_master(slave_dev, bond_dev);
 	res = netdev_set_master(slave_dev, bond_dev);
 	if (res) {
 	if (res) {
 		dprintk("Error %d calling netdev_set_master\n", res);
 		dprintk("Error %d calling netdev_set_master\n", res);
-		if (app_abi_ver < 1) {
-			goto err_free;
-		} else {
-			goto err_close;
-		}
+		goto err_close;
 	}
 	}
 
 
 	new_slave->dev = slave_dev;
 	new_slave->dev = slave_dev;
@@ -1997,39 +1966,6 @@ static int bond_enslave(struct net_device *bond_dev, struct net_device *slave_de
 
 
 	write_unlock_bh(&bond->lock);
 	write_unlock_bh(&bond->lock);
 
 
-	if (app_abi_ver < 1) {
-		/*
-		 * !!! This is to support old versions of ifenslave.
-		 * We can remove this in 2.5 because our ifenslave takes
-		 * care of this for us.
-		 * We check to see if the master has a mac address yet.
-		 * If not, we'll give it the mac address of our slave device.
-		 */
-		int ndx = 0;
-
-		for (ndx = 0; ndx < bond_dev->addr_len; ndx++) {
-			dprintk("Checking ndx=%d of bond_dev->dev_addr\n",
-				ndx);
-			if (bond_dev->dev_addr[ndx] != 0) {
-				dprintk("Found non-zero byte at ndx=%d\n",
-					ndx);
-				break;
-			}
-		}
-
-		if (ndx == bond_dev->addr_len) {
-			/*
-			 * We got all the way through the address and it was
-			 * all 0's.
-			 */
-			dprintk("%s doesn't have a MAC address yet.  \n",
-				bond_dev->name);
-			dprintk("Going to give assign it from %s.\n",
-				slave_dev->name);
-			bond_sethwaddr(bond_dev, slave_dev);
-		}
-	}
-
 	printk(KERN_INFO DRV_NAME
 	printk(KERN_INFO DRV_NAME
 	       ": %s: enslaving %s as a%s interface with a%s link.\n",
 	       ": %s: enslaving %s as a%s interface with a%s link.\n",
 	       bond_dev->name, slave_dev->name,
 	       bond_dev->name, slave_dev->name,
@@ -2227,12 +2163,10 @@ static int bond_release(struct net_device *bond_dev, struct net_device *slave_de
 	/* close slave before restoring its mac address */
 	/* close slave before restoring its mac address */
 	dev_close(slave_dev);
 	dev_close(slave_dev);
 
 
-	if (app_abi_ver >= 1) {
-		/* restore original ("permanent") mac address */
-		memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
-		addr.sa_family = slave_dev->type;
-		dev_set_mac_address(slave_dev, &addr);
-	}
+	/* restore original ("permanent") mac address */
+	memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
+	addr.sa_family = slave_dev->type;
+	dev_set_mac_address(slave_dev, &addr);
 
 
 	/* restore the original state of the
 	/* restore the original state of the
 	 * IFF_NOARP flag that might have been
 	 * IFF_NOARP flag that might have been
@@ -2320,12 +2254,10 @@ static int bond_release_all(struct net_device *bond_dev)
 		/* close slave before restoring its mac address */
 		/* close slave before restoring its mac address */
 		dev_close(slave_dev);
 		dev_close(slave_dev);
 
 
-		if (app_abi_ver >= 1) {
-			/* restore original ("permanent") mac address*/
-			memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
-			addr.sa_family = slave_dev->type;
-			dev_set_mac_address(slave_dev, &addr);
-		}
+		/* restore original ("permanent") mac address*/
+		memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
+		addr.sa_family = slave_dev->type;
+		dev_set_mac_address(slave_dev, &addr);
 
 
 		/* restore the original state of the IFF_NOARP flag that might have
 		/* restore the original state of the IFF_NOARP flag that might have
 		 * been set by bond_set_slave_inactive_flags()
 		 * been set by bond_set_slave_inactive_flags()
@@ -2423,57 +2355,6 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi
 	return res;
 	return res;
 }
 }
 
 
-static int bond_ethtool_ioctl(struct net_device *bond_dev, struct ifreq *ifr)
-{
-	struct ethtool_drvinfo info;
-	void __user *addr = ifr->ifr_data;
-	uint32_t cmd;
-
-	if (get_user(cmd, (uint32_t __user *)addr)) {
-		return -EFAULT;
-	}
-
-	switch (cmd) {
-	case ETHTOOL_GDRVINFO:
-		if (copy_from_user(&info, addr, sizeof(info))) {
-			return -EFAULT;
-		}
-
-		if (strcmp(info.driver, "ifenslave") == 0) {
-			int new_abi_ver;
-			char *endptr;
-
-			new_abi_ver = simple_strtoul(info.fw_version,
-						     &endptr, 0);
-			if (*endptr) {
-				printk(KERN_ERR DRV_NAME
-				       ": Error: got invalid ABI "
-				       "version from application\n");
-
-				return -EINVAL;
-			}
-
-			if (orig_app_abi_ver == -1) {
-				orig_app_abi_ver  = new_abi_ver;
-			}
-
-			app_abi_ver = new_abi_ver;
-		}
-
-		strncpy(info.driver,  DRV_NAME, 32);
-		strncpy(info.version, DRV_VERSION, 32);
-		snprintf(info.fw_version, 32, "%d", BOND_ABI_VERSION);
-
-		if (copy_to_user(addr, &info, sizeof(info))) {
-			return -EFAULT;
-		}
-
-		return 0;
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
 static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
 static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
 {
 {
 	struct bonding *bond = bond_dev->priv;
 	struct bonding *bond = bond_dev->priv;
@@ -3442,16 +3323,11 @@ static void bond_info_show_slave(struct seq_file *seq, const struct slave *slave
 	seq_printf(seq, "Link Failure Count: %d\n",
 	seq_printf(seq, "Link Failure Count: %d\n",
 		   slave->link_failure_count);
 		   slave->link_failure_count);
 
 
-	if (app_abi_ver >= 1) {
-		seq_printf(seq,
-			   "Permanent HW addr: %02x:%02x:%02x:%02x:%02x:%02x\n",
-			   slave->perm_hwaddr[0],
-			   slave->perm_hwaddr[1],
-			   slave->perm_hwaddr[2],
-			   slave->perm_hwaddr[3],
-			   slave->perm_hwaddr[4],
-			   slave->perm_hwaddr[5]);
-	}
+	seq_printf(seq,
+		   "Permanent HW addr: %02x:%02x:%02x:%02x:%02x:%02x\n",
+		   slave->perm_hwaddr[0], slave->perm_hwaddr[1],
+		   slave->perm_hwaddr[2], slave->perm_hwaddr[3],
+		   slave->perm_hwaddr[4], slave->perm_hwaddr[5]);
 
 
 	if (bond->params.mode == BOND_MODE_8023AD) {
 	if (bond->params.mode == BOND_MODE_8023AD) {
 		const struct aggregator *agg
 		const struct aggregator *agg
@@ -4010,15 +3886,12 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
 	struct ifslave k_sinfo;
 	struct ifslave k_sinfo;
 	struct ifslave __user *u_sinfo = NULL;
 	struct ifslave __user *u_sinfo = NULL;
 	struct mii_ioctl_data *mii = NULL;
 	struct mii_ioctl_data *mii = NULL;
-	int prev_abi_ver = orig_app_abi_ver;
 	int res = 0;
 	int res = 0;
 
 
 	dprintk("bond_ioctl: master=%s, cmd=%d\n",
 	dprintk("bond_ioctl: master=%s, cmd=%d\n",
 		bond_dev->name, cmd);
 		bond_dev->name, cmd);
 
 
 	switch (cmd) {
 	switch (cmd) {
-	case SIOCETHTOOL:
-		return bond_ethtool_ioctl(bond_dev, ifr);
 	case SIOCGMIIPHY:
 	case SIOCGMIIPHY:
 		mii = if_mii(ifr);
 		mii = if_mii(ifr);
 		if (!mii) {
 		if (!mii) {
@@ -4090,21 +3963,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
 		return -EPERM;
 		return -EPERM;
 	}
 	}
 
 
-	if (orig_app_abi_ver == -1) {
-		/* no orig_app_abi_ver was provided yet, so we'll use the
-		 * current one from now on, even if it's 0
-		 */
-		orig_app_abi_ver = app_abi_ver;
-
-	} else if (orig_app_abi_ver != app_abi_ver) {
-		printk(KERN_ERR DRV_NAME
-		       ": Error: already using ifenslave ABI version %d; to "
-		       "upgrade ifenslave to version %d, you must first "
-		       "reload bonding.\n",
-		       orig_app_abi_ver, app_abi_ver);
-		return -EINVAL;
-	}
-
 	slave_dev = dev_get_by_name(ifr->ifr_slave);
 	slave_dev = dev_get_by_name(ifr->ifr_slave);
 
 
 	dprintk("slave_dev=%p: \n", slave_dev);
 	dprintk("slave_dev=%p: \n", slave_dev);
@@ -4137,14 +3995,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
 		dev_put(slave_dev);
 		dev_put(slave_dev);
 	}
 	}
 
 
-	if (res < 0) {
-		/* The ioctl failed, so there's no point in changing the
-		 * orig_app_abi_ver. We'll restore it's value just in case
-		 * we've changed it earlier in this function.
-		 */
-		orig_app_abi_ver = prev_abi_ver;
-	}
-
 	return res;
 	return res;
 }
 }
 
 
@@ -4578,9 +4428,18 @@ static inline void bond_set_mode_ops(struct bonding *bond, int mode)
 	}
 	}
 }
 }
 
 
+static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
+				    struct ethtool_drvinfo *drvinfo)
+{
+	strncpy(drvinfo->driver, DRV_NAME, 32);
+	strncpy(drvinfo->version, DRV_VERSION, 32);
+	snprintf(drvinfo->fw_version, 32, "%d", BOND_ABI_VERSION);
+}
+
 static struct ethtool_ops bond_ethtool_ops = {
 static struct ethtool_ops bond_ethtool_ops = {
 	.get_tx_csum		= ethtool_op_get_tx_csum,
 	.get_tx_csum		= ethtool_op_get_tx_csum,
 	.get_sg			= ethtool_op_get_sg,
 	.get_sg			= ethtool_op_get_sg,
+	.get_drvinfo		= bond_ethtool_get_drvinfo,
 };
 };
 
 
 /*
 /*

+ 2 - 2
drivers/net/bonding/bonding.h

@@ -40,8 +40,8 @@
 #include "bond_3ad.h"
 #include "bond_3ad.h"
 #include "bond_alb.h"
 #include "bond_alb.h"
 
 
-#define DRV_VERSION	"2.6.3"
-#define DRV_RELDATE	"June 8, 2005"
+#define DRV_VERSION	"2.6.4"
+#define DRV_RELDATE	"September 26, 2005"
 #define DRV_NAME	"bonding"
 #define DRV_NAME	"bonding"
 #define DRV_DESCRIPTION	"Ethernet Channel Bonding Driver"
 #define DRV_DESCRIPTION	"Ethernet Channel Bonding Driver"
 
 

+ 31 - 0
drivers/net/ibm_emac/ibm_emac_core.c

@@ -1875,6 +1875,9 @@ static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal)
 		rc = -ENODEV;
 		rc = -ENODEV;
 		goto bail;
 		goto bail;
 	}
 	}
+	
+	/* Disable any PHY features not supported by the platform */
+	ep->phy_mii.def->features &= ~emacdata->phy_feat_exc;
 
 
 	/* Setup initial PHY config & startup aneg */
 	/* Setup initial PHY config & startup aneg */
 	if (ep->phy_mii.def->ops->init)
 	if (ep->phy_mii.def->ops->init)
@@ -1882,6 +1885,34 @@ static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal)
 	netif_carrier_off(ndev);
 	netif_carrier_off(ndev);
 	if (ep->phy_mii.def->features & SUPPORTED_Autoneg)
 	if (ep->phy_mii.def->features & SUPPORTED_Autoneg)
 		ep->want_autoneg = 1;
 		ep->want_autoneg = 1;
+	else {
+		ep->want_autoneg = 0;
+		
+		/* Select highest supported speed/duplex */
+		if (ep->phy_mii.def->features & SUPPORTED_1000baseT_Full) {
+			ep->phy_mii.speed = SPEED_1000;
+			ep->phy_mii.duplex = DUPLEX_FULL;
+		} else if (ep->phy_mii.def->features & 
+			   SUPPORTED_1000baseT_Half) {
+			ep->phy_mii.speed = SPEED_1000;
+			ep->phy_mii.duplex = DUPLEX_HALF;
+		} else if (ep->phy_mii.def->features & 
+			   SUPPORTED_100baseT_Full) {
+			ep->phy_mii.speed = SPEED_100;
+			ep->phy_mii.duplex = DUPLEX_FULL;
+		} else if (ep->phy_mii.def->features & 
+			   SUPPORTED_100baseT_Half) {
+			ep->phy_mii.speed = SPEED_100;
+			ep->phy_mii.duplex = DUPLEX_HALF;
+		} else if (ep->phy_mii.def->features & 
+			   SUPPORTED_10baseT_Full) {
+			ep->phy_mii.speed = SPEED_10;
+			ep->phy_mii.duplex = DUPLEX_FULL;
+		} else {
+			ep->phy_mii.speed = SPEED_10;
+			ep->phy_mii.duplex = DUPLEX_HALF;
+		}
+	}
 	emac_start_link(ep, NULL);
 	emac_start_link(ep, NULL);
 
 
 	/* read the MAC Address */
 	/* read the MAC Address */

+ 1 - 1
drivers/net/ns83820.c

@@ -584,7 +584,7 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
 	return 0;
 	return 0;
 }
 }
 
 
-static inline int rx_refill(struct net_device *ndev, int gfp)
+static inline int rx_refill(struct net_device *ndev, unsigned int __nocast gfp)
 {
 {
 	struct ns83820 *dev = PRIV(ndev);
 	struct ns83820 *dev = PRIV(ndev);
 	unsigned i;
 	unsigned i;

+ 1 - 1
drivers/net/pcmcia/smc91c92_cs.c

@@ -1832,7 +1832,7 @@ static void fill_multicast_tbl(int count, struct dev_mc_list *addrs,
 {
 {
     struct dev_mc_list	*mc_addr;
     struct dev_mc_list	*mc_addr;
 
 
-    for (mc_addr = addrs;  mc_addr && --count > 0;  mc_addr = mc_addr->next) {
+    for (mc_addr = addrs;  mc_addr && count-- > 0;  mc_addr = mc_addr->next) {
 	u_int position = ether_crc(6, mc_addr->dmi_addr);
 	u_int position = ether_crc(6, mc_addr->dmi_addr);
 #ifndef final_version		/* Verify multicast address. */
 #ifndef final_version		/* Verify multicast address. */
 	if ((mc_addr->dmi_addr[0] & 1) == 0)
 	if ((mc_addr->dmi_addr[0] & 1) == 0)

+ 16 - 8
drivers/net/skge.c

@@ -2837,21 +2837,29 @@ static void skge_netpoll(struct net_device *dev)
 static int skge_set_mac_address(struct net_device *dev, void *p)
 static int skge_set_mac_address(struct net_device *dev, void *p)
 {
 {
 	struct skge_port *skge = netdev_priv(dev);
 	struct skge_port *skge = netdev_priv(dev);
-	struct sockaddr *addr = p;
-	int err = 0;
+	struct skge_hw *hw = skge->hw;
+	unsigned port = skge->port;
+	const struct sockaddr *addr = p;
 
 
 	if (!is_valid_ether_addr(addr->sa_data))
 	if (!is_valid_ether_addr(addr->sa_data))
 		return -EADDRNOTAVAIL;
 		return -EADDRNOTAVAIL;
 
 
-	skge_down(dev);
+	spin_lock_bh(&hw->phy_lock);
 	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
 	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
-	memcpy_toio(skge->hw->regs + B2_MAC_1 + skge->port*8,
+	memcpy_toio(hw->regs + B2_MAC_1 + port*8,
 		    dev->dev_addr, ETH_ALEN);
 		    dev->dev_addr, ETH_ALEN);
-	memcpy_toio(skge->hw->regs + B2_MAC_2 + skge->port*8,
+	memcpy_toio(hw->regs + B2_MAC_2 + port*8,
 		    dev->dev_addr, ETH_ALEN);
 		    dev->dev_addr, ETH_ALEN);
-	if (dev->flags & IFF_UP)
-		err = skge_up(dev);
-	return err;
+
+	if (hw->chip_id == CHIP_ID_GENESIS)
+		xm_outaddr(hw, port, XM_SA, dev->dev_addr);
+	else {
+		gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
+		gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
+	}
+	spin_unlock_bh(&hw->phy_lock);
+
+	return 0;
 }
 }
 
 
 static const struct {
 static const struct {

+ 24 - 22
drivers/net/starfire.c

@@ -133,14 +133,18 @@
 	- finally added firmware (GPL'ed by Adaptec)
 	- finally added firmware (GPL'ed by Adaptec)
 	- removed compatibility code for 2.2.x
 	- removed compatibility code for 2.2.x
 
 
+	LK1.4.2.1 (Ion Badulescu)
+	- fixed 32/64 bit issues on i386 + CONFIG_HIGHMEM
+	- added 32-bit padding to outgoing skb's, removed previous workaround
+
 TODO:	- fix forced speed/duplexing code (broken a long time ago, when
 TODO:	- fix forced speed/duplexing code (broken a long time ago, when
 	somebody converted the driver to use the generic MII code)
 	somebody converted the driver to use the generic MII code)
 	- fix VLAN support
 	- fix VLAN support
 */
 */
 
 
 #define DRV_NAME	"starfire"
 #define DRV_NAME	"starfire"
-#define DRV_VERSION	"1.03+LK1.4.2"
-#define DRV_RELDATE	"January 19, 2005"
+#define DRV_VERSION	"1.03+LK1.4.2.1"
+#define DRV_RELDATE	"October 3, 2005"
 
 
 #include <linux/config.h>
 #include <linux/config.h>
 #include <linux/version.h>
 #include <linux/version.h>
@@ -165,6 +169,14 @@ TODO:	- fix forced speed/duplexing code (broken a long time ago, when
  * of length 1. If and when this is fixed, the #define below can be removed.
  * of length 1. If and when this is fixed, the #define below can be removed.
  */
  */
 #define HAS_BROKEN_FIRMWARE
 #define HAS_BROKEN_FIRMWARE
+
+/*
+ * If using the broken firmware, data must be padded to the next 32-bit boundary.
+ */
+#ifdef HAS_BROKEN_FIRMWARE
+#define PADDING_MASK 3
+#endif
+
 /*
 /*
  * Define this if using the driver with the zero-copy patch
  * Define this if using the driver with the zero-copy patch
  */
  */
@@ -257,9 +269,10 @@ static int full_duplex[MAX_UNITS] = {0, };
  * This SUCKS.
  * This SUCKS.
  * We need a much better method to determine if dma_addr_t is 64-bit.
  * We need a much better method to determine if dma_addr_t is 64-bit.
  */
  */
-#if (defined(__i386__) && defined(CONFIG_HIGHMEM) && (LINUX_VERSION_CODE > 0x20500 || defined(CONFIG_HIGHMEM64G))) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
+#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
 /* 64-bit dma_addr_t */
 /* 64-bit dma_addr_t */
 #define ADDR_64BITS	/* This chip uses 64 bit addresses. */
 #define ADDR_64BITS	/* This chip uses 64 bit addresses. */
+#define netdrv_addr_t u64
 #define cpu_to_dma(x) cpu_to_le64(x)
 #define cpu_to_dma(x) cpu_to_le64(x)
 #define dma_to_cpu(x) le64_to_cpu(x)
 #define dma_to_cpu(x) le64_to_cpu(x)
 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
@@ -268,6 +281,7 @@ static int full_duplex[MAX_UNITS] = {0, };
 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
 #define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
 #define RX_DESC_ADDR_SIZE RxDescAddr64bit
 #else  /* 32-bit dma_addr_t */
 #else  /* 32-bit dma_addr_t */
+#define netdrv_addr_t u32
 #define cpu_to_dma(x) cpu_to_le32(x)
 #define cpu_to_dma(x) cpu_to_le32(x)
 #define dma_to_cpu(x) le32_to_cpu(x)
 #define dma_to_cpu(x) le32_to_cpu(x)
 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
 #define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
@@ -1333,21 +1347,10 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
 	}
 	}
 
 
 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
 #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
-	{
-		int has_bad_length = 0;
-
-		if (skb_first_frag_len(skb) == 1)
-			has_bad_length = 1;
-		else {
-			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-				if (skb_shinfo(skb)->frags[i].size == 1) {
-					has_bad_length = 1;
-					break;
-				}
-		}
-
-		if (has_bad_length)
-			skb_checksum_help(skb, 0);
+	if (skb->ip_summed == CHECKSUM_HW) {
+		skb = skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK);
+		if (skb == NULL)
+			return NETDEV_TX_OK;
 	}
 	}
 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
 
 
@@ -2127,13 +2130,12 @@ static int __init starfire_init (void)
 #endif
 #endif
 #endif
 #endif
 
 
-#ifndef ADDR_64BITS
 	/* we can do this test only at run-time... sigh */
 	/* we can do this test only at run-time... sigh */
-	if (sizeof(dma_addr_t) == sizeof(u64)) {
-		printk("This driver has not been ported to this 64-bit architecture yet\n");
+	if (sizeof(dma_addr_t) != sizeof(netdrv_addr_t)) {
+		printk("This driver has dma_addr_t issues, please send email to maintainer\n");
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}
-#endif /* not ADDR_64BITS */
+
 	return pci_module_init (&starfire_driver);
 	return pci_module_init (&starfire_driver);
 }
 }
 
 

+ 2 - 1
drivers/net/sungem.h

@@ -1035,7 +1035,8 @@ struct gem {
 			
 			
 #define ALIGNED_RX_SKB_ADDR(addr) \
 #define ALIGNED_RX_SKB_ADDR(addr) \
         ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
         ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
-static __inline__ struct sk_buff *gem_alloc_skb(int size, int gfp_flags)
+static __inline__ struct sk_buff *gem_alloc_skb(int size,
+						unsigned int __nocast gfp_flags)
 {
 {
 	struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
 	struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
 
 

+ 0 - 5
drivers/net/tokenring/ibmtr.c

@@ -531,7 +531,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
 			if (!time_after(jiffies, timeout)) continue;
 			if (!time_after(jiffies, timeout)) continue;
 			DPRINTK( "Hardware timeout during initialization.\n");
 			DPRINTK( "Hardware timeout during initialization.\n");
 			iounmap(t_mmio);
 			iounmap(t_mmio);
-			kfree(ti);
 			return -ENODEV;
 			return -ENODEV;
 		}
 		}
 		ti->sram_phys =
 		ti->sram_phys =
@@ -645,7 +644,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
 			DPRINTK("Unknown shared ram paging info %01X\n",
 			DPRINTK("Unknown shared ram paging info %01X\n",
 							ti->shared_ram_paging);
 							ti->shared_ram_paging);
 			iounmap(t_mmio); 
 			iounmap(t_mmio); 
-			kfree(ti);
 			return -ENODEV;
 			return -ENODEV;
 			break;
 			break;
 		} /*end switch shared_ram_paging */
 		} /*end switch shared_ram_paging */
@@ -675,7 +673,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
 			"driver limit (%05x), adapter not started.\n",
 			"driver limit (%05x), adapter not started.\n",
 			chk_base, ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE);
 			chk_base, ibmtr_mem_base + IBMTR_SHARED_RAM_SIZE);
 			iounmap(t_mmio);
 			iounmap(t_mmio);
-			kfree(ti);
 			return -ENODEV;
 			return -ENODEV;
 		} else { /* seems cool, record what we have figured out */
 		} else { /* seems cool, record what we have figured out */
 			ti->sram_base = new_base >> 12;
 			ti->sram_base = new_base >> 12;
@@ -690,7 +687,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
 		DPRINTK("Could not grab irq %d.  Halting Token Ring driver.\n",
 		DPRINTK("Could not grab irq %d.  Halting Token Ring driver.\n",
 					irq);
 					irq);
 		iounmap(t_mmio);
 		iounmap(t_mmio);
-		kfree(ti);
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}
 	/*?? Now, allocate some of the PIO PORTs for this driver.. */
 	/*?? Now, allocate some of the PIO PORTs for this driver.. */
@@ -699,7 +695,6 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
 		DPRINTK("Could not grab PIO range. Halting driver.\n");
 		DPRINTK("Could not grab PIO range. Halting driver.\n");
 		free_irq(dev->irq, dev);
 		free_irq(dev->irq, dev);
 		iounmap(t_mmio);
 		iounmap(t_mmio);
-		kfree(ti);
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
 
 

+ 1 - 1
drivers/net/tulip/21142.c

@@ -172,7 +172,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
 			int i;
 			int i;
 			for (i = 0; i < tp->mtable->leafcount; i++)
 			for (i = 0; i < tp->mtable->leafcount; i++)
 				if (tp->mtable->mleaf[i].media == dev->if_port) {
 				if (tp->mtable->mleaf[i].media == dev->if_port) {
-					int startup = ! ((tp->chip_id == DC21143 && tp->revision == 65));
+					int startup = ! ((tp->chip_id == DC21143 && (tp->revision == 48 || tp->revision == 65)));
 					tp->cur_index = i;
 					tp->cur_index = i;
 					tulip_select_media(dev, startup);
 					tulip_select_media(dev, startup);
 					setup_done = 1;
 					setup_done = 1;

+ 9 - 5
drivers/net/wireless/orinoco.c

@@ -503,9 +503,14 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
 		return 0;
 		return 0;
 	}
 	}
 
 
-	/* Length of the packet body */
-	/* FIXME: what if the skb is smaller than this? */
-	len = max_t(int,skb->len - ETH_HLEN, ETH_ZLEN - ETH_HLEN);
+	/* Check packet length, pad short packets, round up odd length */
+	len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN);
+	if (skb->len < len) {
+		skb = skb_padto(skb, len);
+		if (skb == NULL)
+			goto fail;
+	}
+	len -= ETH_HLEN;
 
 
 	eh = (struct ethhdr *)skb->data;
 	eh = (struct ethhdr *)skb->data;
 
 
@@ -557,8 +562,7 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
 		p = skb->data;
 		p = skb->data;
 	}
 	}
 
 
-	/* Round up for odd length packets */
-	err = hermes_bap_pwrite(hw, USER_BAP, p, ALIGN(data_len, 2),
+	err = hermes_bap_pwrite(hw, USER_BAP, p, data_len,
 				txfid, data_off);
 				txfid, data_off);
 	if (err) {
 	if (err) {
 		printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
 		printk(KERN_ERR "%s: Error %d writing packet to BAP\n",

+ 2 - 0
drivers/s390/net/qeth.h

@@ -686,6 +686,7 @@ struct qeth_seqno {
 	__u32 pdu_hdr;
 	__u32 pdu_hdr;
 	__u32 pdu_hdr_ack;
 	__u32 pdu_hdr_ack;
 	__u16 ipa;
 	__u16 ipa;
+	__u32 pkt_seqno;
 };
 };
 
 
 struct qeth_reply {
 struct qeth_reply {
@@ -848,6 +849,7 @@ qeth_realloc_headroom(struct qeth_card *card, struct sk_buff **skb, int size)
                                   "on interface %s", QETH_CARD_IFNAME(card));
                                   "on interface %s", QETH_CARD_IFNAME(card));
                         return -ENOMEM;
                         return -ENOMEM;
                 }
                 }
+		kfree_skb(*skb);
                 *skb = new_skb;
                 *skb = new_skb;
 	}
 	}
 	return 0;
 	return 0;

+ 17 - 20
drivers/s390/net/qeth_main.c

@@ -511,7 +511,7 @@ static int
 __qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
 __qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
 {
 {
 	struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
 	struct qeth_card *card = (struct qeth_card *) cgdev->dev.driver_data;
-	int rc = 0;
+	int rc = 0, rc2 = 0, rc3 = 0;
 	enum qeth_card_states recover_flag;
 	enum qeth_card_states recover_flag;
 
 
 	QETH_DBF_TEXT(setup, 3, "setoffl");
 	QETH_DBF_TEXT(setup, 3, "setoffl");
@@ -523,11 +523,13 @@ __qeth_set_offline(struct ccwgroup_device *cgdev, int recovery_mode)
 			   CARD_BUS_ID(card));
 			   CARD_BUS_ID(card));
 		return -ERESTARTSYS;
 		return -ERESTARTSYS;
 	}
 	}
-	if ((rc = ccw_device_set_offline(CARD_DDEV(card))) ||
-	    (rc = ccw_device_set_offline(CARD_WDEV(card))) ||
-	    (rc = ccw_device_set_offline(CARD_RDEV(card)))) {
+	rc  = ccw_device_set_offline(CARD_DDEV(card));
+	rc2 = ccw_device_set_offline(CARD_WDEV(card));
+	rc3 = ccw_device_set_offline(CARD_RDEV(card));
+	if (!rc)
+		rc = (rc2) ? rc2 : rc3;
+	if (rc)
 		QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
 		QETH_DBF_TEXT_(setup, 2, "1err%d", rc);
-	}
 	if (recover_flag == CARD_STATE_UP)
 	if (recover_flag == CARD_STATE_UP)
 		card->state = CARD_STATE_RECOVER;
 		card->state = CARD_STATE_RECOVER;
 	qeth_notify_processes();
 	qeth_notify_processes();
@@ -1046,6 +1048,7 @@ qeth_setup_card(struct qeth_card *card)
 	spin_lock_init(&card->vlanlock);
 	spin_lock_init(&card->vlanlock);
 	card->vlangrp = NULL;
 	card->vlangrp = NULL;
 #endif
 #endif
+	spin_lock_init(&card->lock);
 	spin_lock_init(&card->ip_lock);
 	spin_lock_init(&card->ip_lock);
 	spin_lock_init(&card->thread_mask_lock);
 	spin_lock_init(&card->thread_mask_lock);
 	card->thread_start_mask = 0;
 	card->thread_start_mask = 0;
@@ -1626,16 +1629,6 @@ qeth_cmd_timeout(unsigned long data)
 	spin_unlock_irqrestore(&reply->card->lock, flags);
 	spin_unlock_irqrestore(&reply->card->lock, flags);
 }
 }
 
 
-static void
-qeth_reset_ip_addresses(struct qeth_card *card)
-{
-	QETH_DBF_TEXT(trace, 2, "rstipadd");
-
-	qeth_clear_ip_list(card, 0, 1);
-	/* this function will also schedule the SET_IP_THREAD */
-	qeth_set_multicast_list(card->dev);
-}
-
 static struct qeth_ipa_cmd *
 static struct qeth_ipa_cmd *
 qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
 qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
 {
 {
@@ -1664,9 +1657,8 @@ qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob)
 					   "IP address reset.\n",
 					   "IP address reset.\n",
 					   QETH_CARD_IFNAME(card),
 					   QETH_CARD_IFNAME(card),
 					   card->info.chpid);
 					   card->info.chpid);
-				card->lan_online = 1;
 				netif_carrier_on(card->dev);
 				netif_carrier_on(card->dev);
-				qeth_reset_ip_addresses(card);
+				qeth_schedule_recovery(card);
 				return NULL;
 				return NULL;
 			case IPA_CMD_REGISTER_LOCAL_ADDR:
 			case IPA_CMD_REGISTER_LOCAL_ADDR:
 				QETH_DBF_TEXT(trace,3, "irla");
 				QETH_DBF_TEXT(trace,3, "irla");
@@ -2387,6 +2379,7 @@ qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
 		skb_pull(skb, VLAN_HLEN);
 		skb_pull(skb, VLAN_HLEN);
 	}
 	}
 #endif
 #endif
+	*((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
 	return vlan_id;
 	return vlan_id;
 }
 }
 
 
@@ -3014,7 +3007,7 @@ qeth_alloc_buffer_pool(struct qeth_card *card)
 			return -ENOMEM;
 			return -ENOMEM;
 		}
 		}
 		for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){
 		for(j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j){
-			ptr = (void *) __get_free_page(GFP_KERNEL);
+			ptr = (void *) __get_free_page(GFP_KERNEL|GFP_DMA);
 			if (!ptr) {
 			if (!ptr) {
 				while (j > 0)
 				while (j > 0)
 					free_page((unsigned long)
 					free_page((unsigned long)
@@ -3058,7 +3051,8 @@ qeth_alloc_qdio_buffers(struct qeth_card *card)
 	if (card->qdio.state == QETH_QDIO_ALLOCATED)
 	if (card->qdio.state == QETH_QDIO_ALLOCATED)
 		return 0;
 		return 0;
 
 
-	card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), GFP_KERNEL);
+	card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), 
+				  GFP_KERNEL|GFP_DMA);
 	if (!card->qdio.in_q)
 	if (!card->qdio.in_q)
 		return - ENOMEM;
 		return - ENOMEM;
 	QETH_DBF_TEXT(setup, 2, "inq");
 	QETH_DBF_TEXT(setup, 2, "inq");
@@ -3083,7 +3077,7 @@ qeth_alloc_qdio_buffers(struct qeth_card *card)
 	}
 	}
 	for (i = 0; i < card->qdio.no_out_queues; ++i){
 	for (i = 0; i < card->qdio.no_out_queues; ++i){
 		card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
 		card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q),
-					       GFP_KERNEL);
+					       GFP_KERNEL|GFP_DMA);
 		if (!card->qdio.out_qs[i]){
 		if (!card->qdio.out_qs[i]){
 			while (i > 0)
 			while (i > 0)
 				kfree(card->qdio.out_qs[--i]);
 				kfree(card->qdio.out_qs[--i]);
@@ -6470,6 +6464,9 @@ qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply,
 	if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
 	if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
 		card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
 		card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
 		card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
 		card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
+		/* Disable IPV6 support hard coded for Hipersockets */
+		if(card->info.type == QETH_CARD_TYPE_IQD)
+			card->options.ipa4.supported_funcs &= ~IPA_IPV6;
 	} else {
 	} else {
 #ifdef CONFIG_QETH_IPV6
 #ifdef CONFIG_QETH_IPV6
 		card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
 		card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;

+ 8 - 0
drivers/scsi/Kconfig

@@ -521,6 +521,14 @@ config SCSI_SATA_SIL
 
 
 	  If unsure, say N.
 	  If unsure, say N.
 
 
+config SCSI_SATA_SIL24
+	tristate "Silicon Image 3124/3132 SATA support"
+	depends on SCSI_SATA && PCI && EXPERIMENTAL
+	help
+	  This option enables support for Silicon Image 3124/3132 Serial ATA.
+
+	  If unsure, say N.
+
 config SCSI_SATA_SIS
 config SCSI_SATA_SIS
 	tristate "SiS 964/180 SATA support"
 	tristate "SiS 964/180 SATA support"
 	depends on SCSI_SATA && PCI && EXPERIMENTAL
 	depends on SCSI_SATA && PCI && EXPERIMENTAL

+ 1 - 0
drivers/scsi/Makefile

@@ -130,6 +130,7 @@ obj-$(CONFIG_SCSI_ATA_PIIX)	+= libata.o ata_piix.o
 obj-$(CONFIG_SCSI_SATA_PROMISE)	+= libata.o sata_promise.o
 obj-$(CONFIG_SCSI_SATA_PROMISE)	+= libata.o sata_promise.o
 obj-$(CONFIG_SCSI_SATA_QSTOR)	+= libata.o sata_qstor.o
 obj-$(CONFIG_SCSI_SATA_QSTOR)	+= libata.o sata_qstor.o
 obj-$(CONFIG_SCSI_SATA_SIL)	+= libata.o sata_sil.o
 obj-$(CONFIG_SCSI_SATA_SIL)	+= libata.o sata_sil.o
+obj-$(CONFIG_SCSI_SATA_SIL24)	+= libata.o sata_sil24.o
 obj-$(CONFIG_SCSI_SATA_VIA)	+= libata.o sata_via.o
 obj-$(CONFIG_SCSI_SATA_VIA)	+= libata.o sata_via.o
 obj-$(CONFIG_SCSI_SATA_VITESSE)	+= libata.o sata_vsc.o
 obj-$(CONFIG_SCSI_SATA_VITESSE)	+= libata.o sata_vsc.o
 obj-$(CONFIG_SCSI_SATA_SIS)	+= libata.o sata_sis.o
 obj-$(CONFIG_SCSI_SATA_SIS)	+= libata.o sata_sis.o

+ 25 - 6
drivers/scsi/ahci.c

@@ -680,17 +680,36 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
 
 
         for (i = 0; i < host_set->n_ports; i++) {
         for (i = 0; i < host_set->n_ports; i++) {
 		struct ata_port *ap;
 		struct ata_port *ap;
-		u32 tmp;
 
 
-		VPRINTK("port %u\n", i);
+		if (!(irq_stat & (1 << i)))
+			continue;
+
 		ap = host_set->ports[i];
 		ap = host_set->ports[i];
-		tmp = irq_stat & (1 << i);
-		if (tmp && ap) {
+		if (ap) {
 			struct ata_queued_cmd *qc;
 			struct ata_queued_cmd *qc;
 			qc = ata_qc_from_tag(ap, ap->active_tag);
 			qc = ata_qc_from_tag(ap, ap->active_tag);
-			if (ahci_host_intr(ap, qc))
-				irq_ack |= (1 << i);
+			if (!ahci_host_intr(ap, qc))
+				if (ata_ratelimit()) {
+					struct pci_dev *pdev =
+					  to_pci_dev(ap->host_set->dev);
+					printk(KERN_WARNING
+					  "ahci(%s): unhandled interrupt on port %u\n",
+					  pci_name(pdev), i);
+				}
+
+			VPRINTK("port %u\n", i);
+		} else {
+			VPRINTK("port %u (no irq)\n", i);
+			if (ata_ratelimit()) {
+				struct pci_dev *pdev =
+				  to_pci_dev(ap->host_set->dev);
+				printk(KERN_WARNING
+				  "ahci(%s): interrupt on disabled port %u\n",
+				  pci_name(pdev), i);
+			}
 		}
 		}
+
+		irq_ack |= (1 << i);
 	}
 	}
 
 
 	if (irq_ack) {
 	if (irq_ack) {

+ 259 - 174
drivers/scsi/libata-core.c

@@ -48,6 +48,7 @@
 #include <linux/completion.h>
 #include <linux/completion.h>
 #include <linux/suspend.h>
 #include <linux/suspend.h>
 #include <linux/workqueue.h>
 #include <linux/workqueue.h>
+#include <linux/jiffies.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi.h>
 #include "scsi.h"
 #include "scsi.h"
 #include "scsi_priv.h"
 #include "scsi_priv.h"
@@ -62,6 +63,7 @@
 static unsigned int ata_busy_sleep (struct ata_port *ap,
 static unsigned int ata_busy_sleep (struct ata_port *ap,
 				    unsigned long tmout_pat,
 				    unsigned long tmout_pat,
 			    	    unsigned long tmout);
 			    	    unsigned long tmout);
+static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
 static void ata_set_mode(struct ata_port *ap);
 static void ata_set_mode(struct ata_port *ap);
 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
@@ -69,7 +71,6 @@ static int fgb(u32 bitmap);
 static int ata_choose_xfer_mode(struct ata_port *ap,
 static int ata_choose_xfer_mode(struct ata_port *ap,
 				u8 *xfer_mode_out,
 				u8 *xfer_mode_out,
 				unsigned int *xfer_shift_out);
 				unsigned int *xfer_shift_out);
-static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
 static void __ata_qc_complete(struct ata_queued_cmd *qc);
 static void __ata_qc_complete(struct ata_queued_cmd *qc);
 
 
 static unsigned int ata_unique_id = 1;
 static unsigned int ata_unique_id = 1;
@@ -1131,7 +1132,7 @@ static inline void ata_dump_id(struct ata_device *dev)
 static void ata_dev_identify(struct ata_port *ap, unsigned int device)
 static void ata_dev_identify(struct ata_port *ap, unsigned int device)
 {
 {
 	struct ata_device *dev = &ap->device[device];
 	struct ata_device *dev = &ap->device[device];
-	unsigned int i;
+	unsigned int major_version;
 	u16 tmp;
 	u16 tmp;
 	unsigned long xfer_modes;
 	unsigned long xfer_modes;
 	u8 status;
 	u8 status;
@@ -1229,9 +1230,9 @@ retry:
 	 * common ATA, ATAPI feature tests
 	 * common ATA, ATAPI feature tests
 	 */
 	 */
 
 
-	/* we require LBA and DMA support (bits 8 & 9 of word 49) */
-	if (!ata_id_has_dma(dev->id) || !ata_id_has_lba(dev->id)) {
-		printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id);
+	/* we require DMA support (bits 8 of word 49) */
+	if (!ata_id_has_dma(dev->id)) {
+		printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
 		goto err_out_nosup;
 		goto err_out_nosup;
 	}
 	}
 
 
@@ -1251,32 +1252,69 @@ retry:
 		if (!ata_id_is_ata(dev->id))	/* sanity check */
 		if (!ata_id_is_ata(dev->id))	/* sanity check */
 			goto err_out_nosup;
 			goto err_out_nosup;
 
 
+		/* get major version */
 		tmp = dev->id[ATA_ID_MAJOR_VER];
 		tmp = dev->id[ATA_ID_MAJOR_VER];
-		for (i = 14; i >= 1; i--)
-			if (tmp & (1 << i))
+		for (major_version = 14; major_version >= 1; major_version--)
+			if (tmp & (1 << major_version))
 				break;
 				break;
 
 
-		/* we require at least ATA-3 */
-		if (i < 3) {
-			printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id);
-			goto err_out_nosup;
-		}
+		/*
+		 * The exact sequence expected by certain pre-ATA4 drives is:
+		 * SRST RESET
+		 * IDENTIFY
+		 * INITIALIZE DEVICE PARAMETERS
+		 * anything else..
+		 * Some drives were very specific about that exact sequence.
+		 */
+		if (major_version < 4 || (!ata_id_has_lba(dev->id)))
+			ata_dev_init_params(ap, dev);
+
+		if (ata_id_has_lba(dev->id)) {
+			dev->flags |= ATA_DFLAG_LBA;
+
+			if (ata_id_has_lba48(dev->id)) {
+				dev->flags |= ATA_DFLAG_LBA48;
+				dev->n_sectors = ata_id_u64(dev->id, 100);
+			} else {
+				dev->n_sectors = ata_id_u32(dev->id, 60);
+			}
+
+			/* print device info to dmesg */
+			printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
+			       ap->id, device,
+			       major_version,
+			       ata_mode_string(xfer_modes),
+			       (unsigned long long)dev->n_sectors,
+			       dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
+		} else { 
+			/* CHS */
+
+			/* Default translation */
+			dev->cylinders	= dev->id[1];
+			dev->heads	= dev->id[3];
+			dev->sectors	= dev->id[6];
+			dev->n_sectors	= dev->cylinders * dev->heads * dev->sectors;
+
+			if (ata_id_current_chs_valid(dev->id)) {
+				/* Current CHS translation is valid. */
+				dev->cylinders = dev->id[54];
+				dev->heads     = dev->id[55];
+				dev->sectors   = dev->id[56];
+				
+				dev->n_sectors = ata_id_u32(dev->id, 57);
+			}
+
+			/* print device info to dmesg */
+			printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
+			       ap->id, device,
+			       major_version,
+			       ata_mode_string(xfer_modes),
+			       (unsigned long long)dev->n_sectors,
+			       (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
 
 
-		if (ata_id_has_lba48(dev->id)) {
-			dev->flags |= ATA_DFLAG_LBA48;
-			dev->n_sectors = ata_id_u64(dev->id, 100);
-		} else {
-			dev->n_sectors = ata_id_u32(dev->id, 60);
 		}
 		}
 
 
 		ap->host->max_cmd_len = 16;
 		ap->host->max_cmd_len = 16;
-
-		/* print device info to dmesg */
-		printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors:%s\n",
-		       ap->id, device,
-		       ata_mode_string(xfer_modes),
-		       (unsigned long long)dev->n_sectors,
-		       dev->flags & ATA_DFLAG_LBA48 ? " lba48" : "");
 	}
 	}
 
 
 	/* ATAPI-specific feature tests */
 	/* ATAPI-specific feature tests */
@@ -2143,6 +2181,54 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
 	DPRINTK("EXIT\n");
 	DPRINTK("EXIT\n");
 }
 }
 
 
+/**
+ *	ata_dev_init_params - Issue INIT DEV PARAMS command
+ *	@ap: Port associated with device @dev
+ *	@dev: Device to which command will be sent
+ *
+ *	LOCKING:
+ */
+
+static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
+{
+	DECLARE_COMPLETION(wait);
+	struct ata_queued_cmd *qc;
+	int rc;
+	unsigned long flags;
+	u16 sectors = dev->id[6];
+	u16 heads   = dev->id[3];
+
+	/* Number of sectors per track 1-255. Number of heads 1-16 */
+	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
+		return;
+
+	/* set up init dev params taskfile */
+	DPRINTK("init dev params \n");
+
+	qc = ata_qc_new_init(ap, dev);
+	BUG_ON(qc == NULL);
+
+	qc->tf.command = ATA_CMD_INIT_DEV_PARAMS;
+	qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	qc->tf.protocol = ATA_PROT_NODATA;
+	qc->tf.nsect = sectors;
+	qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
+
+	qc->waiting = &wait;
+	qc->complete_fn = ata_qc_complete_noop;
+
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+	rc = ata_qc_issue(qc);
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+	if (rc)
+		ata_port_disable(ap);
+	else
+		wait_for_completion(&wait);
+
+	DPRINTK("EXIT\n");
+}
+
 /**
 /**
  *	ata_sg_clean - Unmap DMA memory associated with command
  *	ata_sg_clean - Unmap DMA memory associated with command
  *	@qc: Command containing DMA memory to be released
  *	@qc: Command containing DMA memory to be released
@@ -2507,20 +2593,20 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
 static unsigned long ata_pio_poll(struct ata_port *ap)
 static unsigned long ata_pio_poll(struct ata_port *ap)
 {
 {
 	u8 status;
 	u8 status;
-	unsigned int poll_state = PIO_ST_UNKNOWN;
-	unsigned int reg_state = PIO_ST_UNKNOWN;
-	const unsigned int tmout_state = PIO_ST_TMOUT;
-
-	switch (ap->pio_task_state) {
-	case PIO_ST:
-	case PIO_ST_POLL:
-		poll_state = PIO_ST_POLL;
-		reg_state = PIO_ST;
+	unsigned int poll_state = HSM_ST_UNKNOWN;
+	unsigned int reg_state = HSM_ST_UNKNOWN;
+	const unsigned int tmout_state = HSM_ST_TMOUT;
+
+	switch (ap->hsm_task_state) {
+	case HSM_ST:
+	case HSM_ST_POLL:
+		poll_state = HSM_ST_POLL;
+		reg_state = HSM_ST;
 		break;
 		break;
-	case PIO_ST_LAST:
-	case PIO_ST_LAST_POLL:
-		poll_state = PIO_ST_LAST_POLL;
-		reg_state = PIO_ST_LAST;
+	case HSM_ST_LAST:
+	case HSM_ST_LAST_POLL:
+		poll_state = HSM_ST_LAST_POLL;
+		reg_state = HSM_ST_LAST;
 		break;
 		break;
 	default:
 	default:
 		BUG();
 		BUG();
@@ -2530,14 +2616,14 @@ static unsigned long ata_pio_poll(struct ata_port *ap)
 	status = ata_chk_status(ap);
 	status = ata_chk_status(ap);
 	if (status & ATA_BUSY) {
 	if (status & ATA_BUSY) {
 		if (time_after(jiffies, ap->pio_task_timeout)) {
 		if (time_after(jiffies, ap->pio_task_timeout)) {
-			ap->pio_task_state = tmout_state;
+			ap->hsm_task_state = tmout_state;
 			return 0;
 			return 0;
 		}
 		}
-		ap->pio_task_state = poll_state;
+		ap->hsm_task_state = poll_state;
 		return ATA_SHORT_PAUSE;
 		return ATA_SHORT_PAUSE;
 	}
 	}
 
 
-	ap->pio_task_state = reg_state;
+	ap->hsm_task_state = reg_state;
 	return 0;
 	return 0;
 }
 }
 
 
@@ -2562,14 +2648,14 @@ static int ata_pio_complete (struct ata_port *ap)
 	 * we enter, BSY will be cleared in a chk-status or two.  If not,
 	 * we enter, BSY will be cleared in a chk-status or two.  If not,
 	 * the drive is probably seeking or something.  Snooze for a couple
 	 * the drive is probably seeking or something.  Snooze for a couple
 	 * msecs, then chk-status again.  If still busy, fall back to
 	 * msecs, then chk-status again.  If still busy, fall back to
-	 * PIO_ST_POLL state.
+	 * HSM_ST_POLL state.
 	 */
 	 */
 	drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
 	drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
 	if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
 	if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
 		msleep(2);
 		msleep(2);
 		drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
 		drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
 		if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
 		if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
-			ap->pio_task_state = PIO_ST_LAST_POLL;
+			ap->hsm_task_state = HSM_ST_LAST_POLL;
 			ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
 			ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
 			return 0;
 			return 0;
 		}
 		}
@@ -2577,14 +2663,14 @@ static int ata_pio_complete (struct ata_port *ap)
 
 
 	drv_stat = ata_wait_idle(ap);
 	drv_stat = ata_wait_idle(ap);
 	if (!ata_ok(drv_stat)) {
 	if (!ata_ok(drv_stat)) {
-		ap->pio_task_state = PIO_ST_ERR;
+		ap->hsm_task_state = HSM_ST_ERR;
 		return 0;
 		return 0;
 	}
 	}
 
 
 	qc = ata_qc_from_tag(ap, ap->active_tag);
 	qc = ata_qc_from_tag(ap, ap->active_tag);
 	assert(qc != NULL);
 	assert(qc != NULL);
 
 
-	ap->pio_task_state = PIO_ST_IDLE;
+	ap->hsm_task_state = HSM_ST_IDLE;
 
 
 	ata_poll_qc_complete(qc, drv_stat);
 	ata_poll_qc_complete(qc, drv_stat);
 
 
@@ -2744,7 +2830,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
 	unsigned char *buf;
 	unsigned char *buf;
 
 
 	if (qc->cursect == (qc->nsect - 1))
 	if (qc->cursect == (qc->nsect - 1))
-		ap->pio_task_state = PIO_ST_LAST;
+		ap->hsm_task_state = HSM_ST_LAST;
 
 
 	page = sg[qc->cursg].page;
 	page = sg[qc->cursg].page;
 	offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
 	offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
@@ -2794,7 +2880,7 @@ static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
 	unsigned int offset, count;
 	unsigned int offset, count;
 
 
 	if (qc->curbytes + bytes >= qc->nbytes)
 	if (qc->curbytes + bytes >= qc->nbytes)
-		ap->pio_task_state = PIO_ST_LAST;
+		ap->hsm_task_state = HSM_ST_LAST;
 
 
 next_sg:
 next_sg:
 	if (unlikely(qc->cursg >= qc->n_elem)) {
 	if (unlikely(qc->cursg >= qc->n_elem)) {
@@ -2816,7 +2902,7 @@ next_sg:
 		for (i = 0; i < words; i++)
 		for (i = 0; i < words; i++)
 			ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
 			ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
 
 
-		ap->pio_task_state = PIO_ST_LAST;
+		ap->hsm_task_state = HSM_ST_LAST;
 		return;
 		return;
 	}
 	}
 
 
@@ -2897,7 +2983,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
 err_out:
 err_out:
 	printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
 	printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
 	      ap->id, dev->devno);
 	      ap->id, dev->devno);
-	ap->pio_task_state = PIO_ST_ERR;
+	ap->hsm_task_state = HSM_ST_ERR;
 }
 }
 
 
 /**
 /**
@@ -2919,14 +3005,14 @@ static void ata_pio_block(struct ata_port *ap)
 	 * a chk-status or two.  If not, the drive is probably seeking
 	 * a chk-status or two.  If not, the drive is probably seeking
 	 * or something.  Snooze for a couple msecs, then
 	 * or something.  Snooze for a couple msecs, then
 	 * chk-status again.  If still busy, fall back to
 	 * chk-status again.  If still busy, fall back to
-	 * PIO_ST_POLL state.
+	 * HSM_ST_POLL state.
 	 */
 	 */
 	status = ata_busy_wait(ap, ATA_BUSY, 5);
 	status = ata_busy_wait(ap, ATA_BUSY, 5);
 	if (status & ATA_BUSY) {
 	if (status & ATA_BUSY) {
 		msleep(2);
 		msleep(2);
 		status = ata_busy_wait(ap, ATA_BUSY, 10);
 		status = ata_busy_wait(ap, ATA_BUSY, 10);
 		if (status & ATA_BUSY) {
 		if (status & ATA_BUSY) {
-			ap->pio_task_state = PIO_ST_POLL;
+			ap->hsm_task_state = HSM_ST_POLL;
 			ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
 			ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
 			return;
 			return;
 		}
 		}
@@ -2938,7 +3024,7 @@ static void ata_pio_block(struct ata_port *ap)
 	if (is_atapi_taskfile(&qc->tf)) {
 	if (is_atapi_taskfile(&qc->tf)) {
 		/* no more data to transfer or unsupported ATAPI command */
 		/* no more data to transfer or unsupported ATAPI command */
 		if ((status & ATA_DRQ) == 0) {
 		if ((status & ATA_DRQ) == 0) {
-			ap->pio_task_state = PIO_ST_LAST;
+			ap->hsm_task_state = HSM_ST_LAST;
 			return;
 			return;
 		}
 		}
 
 
@@ -2946,7 +3032,7 @@ static void ata_pio_block(struct ata_port *ap)
 	} else {
 	} else {
 		/* handle BSY=0, DRQ=0 as error */
 		/* handle BSY=0, DRQ=0 as error */
 		if ((status & ATA_DRQ) == 0) {
 		if ((status & ATA_DRQ) == 0) {
-			ap->pio_task_state = PIO_ST_ERR;
+			ap->hsm_task_state = HSM_ST_ERR;
 			return;
 			return;
 		}
 		}
 
 
@@ -2966,7 +3052,7 @@ static void ata_pio_error(struct ata_port *ap)
 	printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
 	printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
 	       ap->id, drv_stat);
 	       ap->id, drv_stat);
 
 
-	ap->pio_task_state = PIO_ST_IDLE;
+	ap->hsm_task_state = HSM_ST_IDLE;
 
 
 	ata_poll_qc_complete(qc, drv_stat | ATA_ERR);
 	ata_poll_qc_complete(qc, drv_stat | ATA_ERR);
 }
 }
@@ -2981,25 +3067,25 @@ fsm_start:
 	timeout = 0;
 	timeout = 0;
 	qc_completed = 0;
 	qc_completed = 0;
 
 
-	switch (ap->pio_task_state) {
-	case PIO_ST_IDLE:
+	switch (ap->hsm_task_state) {
+	case HSM_ST_IDLE:
 		return;
 		return;
 
 
-	case PIO_ST:
+	case HSM_ST:
 		ata_pio_block(ap);
 		ata_pio_block(ap);
 		break;
 		break;
 
 
-	case PIO_ST_LAST:
+	case HSM_ST_LAST:
 		qc_completed = ata_pio_complete(ap);
 		qc_completed = ata_pio_complete(ap);
 		break;
 		break;
 
 
-	case PIO_ST_POLL:
-	case PIO_ST_LAST_POLL:
+	case HSM_ST_POLL:
+	case HSM_ST_LAST_POLL:
 		timeout = ata_pio_poll(ap);
 		timeout = ata_pio_poll(ap);
 		break;
 		break;
 
 
-	case PIO_ST_TMOUT:
-	case PIO_ST_ERR:
+	case HSM_ST_TMOUT:
+	case HSM_ST_ERR:
 		ata_pio_error(ap);
 		ata_pio_error(ap);
 		return;
 		return;
 	}
 	}
@@ -3010,52 +3096,6 @@ fsm_start:
 		goto fsm_start;
 		goto fsm_start;
 }
 }
 
 
-static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
-				struct scsi_cmnd *cmd)
-{
-	DECLARE_COMPLETION(wait);
-	struct ata_queued_cmd *qc;
-	unsigned long flags;
-	int rc;
-
-	DPRINTK("ATAPI request sense\n");
-
-	qc = ata_qc_new_init(ap, dev);
-	BUG_ON(qc == NULL);
-
-	/* FIXME: is this needed? */
-	memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
-
-	ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
-	qc->dma_dir = DMA_FROM_DEVICE;
-
-	memset(&qc->cdb, 0, ap->cdb_len);
-	qc->cdb[0] = REQUEST_SENSE;
-	qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
-
-	qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
-	qc->tf.command = ATA_CMD_PACKET;
-
-	qc->tf.protocol = ATA_PROT_ATAPI;
-	qc->tf.lbam = (8 * 1024) & 0xff;
-	qc->tf.lbah = (8 * 1024) >> 8;
-	qc->nbytes = SCSI_SENSE_BUFFERSIZE;
-
-	qc->waiting = &wait;
-	qc->complete_fn = ata_qc_complete_noop;
-
-	spin_lock_irqsave(&ap->host_set->lock, flags);
-	rc = ata_qc_issue(qc);
-	spin_unlock_irqrestore(&ap->host_set->lock, flags);
-
-	if (rc)
-		ata_port_disable(ap);
-	else
-		wait_for_completion(&wait);
-
-	DPRINTK("EXIT\n");
-}
-
 /**
 /**
  *	ata_qc_timeout - Handle timeout of queued command
  *	ata_qc_timeout - Handle timeout of queued command
  *	@qc: Command that timed out
  *	@qc: Command that timed out
@@ -3173,14 +3213,14 @@ void ata_eng_timeout(struct ata_port *ap)
 	DPRINTK("ENTER\n");
 	DPRINTK("ENTER\n");
 
 
 	qc = ata_qc_from_tag(ap, ap->active_tag);
 	qc = ata_qc_from_tag(ap, ap->active_tag);
-	if (!qc) {
+	if (qc)
+		ata_qc_timeout(qc);
+	else {
 		printk(KERN_ERR "ata%u: BUG: timeout without command\n",
 		printk(KERN_ERR "ata%u: BUG: timeout without command\n",
 		       ap->id);
 		       ap->id);
 		goto out;
 		goto out;
 	}
 	}
 
 
-	ata_qc_timeout(qc);
-
 out:
 out:
 	DPRINTK("EXIT\n");
 	DPRINTK("EXIT\n");
 }
 }
@@ -3238,14 +3278,18 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
 
 
 		ata_tf_init(ap, &qc->tf, dev->devno);
 		ata_tf_init(ap, &qc->tf, dev->devno);
 
 
-		if (dev->flags & ATA_DFLAG_LBA48)
-			qc->tf.flags |= ATA_TFLAG_LBA48;
+		if (dev->flags & ATA_DFLAG_LBA) {
+			qc->tf.flags |= ATA_TFLAG_LBA;
+
+			if (dev->flags & ATA_DFLAG_LBA48)
+				qc->tf.flags |= ATA_TFLAG_LBA48;
+		}
 	}
 	}
 
 
 	return qc;
 	return qc;
 }
 }
 
 
-static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
+int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
 {
 {
 	return 0;
 	return 0;
 }
 }
@@ -3442,7 +3486,7 @@ int ata_qc_issue_prot(struct ata_queued_cmd *qc)
 	case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
 	case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
 		ata_qc_set_polling(qc);
 		ata_qc_set_polling(qc);
 		ata_tf_to_host_nolock(ap, &qc->tf);
 		ata_tf_to_host_nolock(ap, &qc->tf);
-		ap->pio_task_state = PIO_ST;
+		ap->hsm_task_state = HSM_ST;
 		queue_work(ata_wq, &ap->pio_task);
 		queue_work(ata_wq, &ap->pio_task);
 		break;
 		break;
 
 
@@ -3668,7 +3712,7 @@ u8 ata_bmdma_status(struct ata_port *ap)
 		void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
 		void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
 		host_stat = readb(mmio + ATA_DMA_STATUS);
 		host_stat = readb(mmio + ATA_DMA_STATUS);
 	} else
 	} else
-	host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+		host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
 	return host_stat;
 	return host_stat;
 }
 }
 
 
@@ -3888,7 +3932,7 @@ static void atapi_packet_task(void *_data)
 		ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
 		ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
 
 
 		/* PIO commands are handled by polling */
 		/* PIO commands are handled by polling */
-		ap->pio_task_state = PIO_ST;
+		ap->hsm_task_state = HSM_ST;
 		queue_work(ata_wq, &ap->pio_task);
 		queue_work(ata_wq, &ap->pio_task);
 	}
 	}
 
 
@@ -4202,7 +4246,7 @@ int ata_device_add(struct ata_probe_ent *ent)
 	for (i = 0; i < count; i++) {
 	for (i = 0; i < count; i++) {
 		struct ata_port *ap = host_set->ports[i];
 		struct ata_port *ap = host_set->ports[i];
 
 
-		scsi_scan_host(ap->host);
+		ata_scsi_scan_host(ap);
 	}
 	}
 
 
 	dev_set_drvdata(dev, host_set);
 	dev_set_drvdata(dev, host_set);
@@ -4362,85 +4406,87 @@ void ata_pci_host_stop (struct ata_host_set *host_set)
  *	ata_pci_init_native_mode - Initialize native-mode driver
  *	ata_pci_init_native_mode - Initialize native-mode driver
  *	@pdev:  pci device to be initialized
  *	@pdev:  pci device to be initialized
  *	@port:  array[2] of pointers to port info structures.
  *	@port:  array[2] of pointers to port info structures.
+ *	@ports: bitmap of ports present
  *
  *
  *	Utility function which allocates and initializes an
  *	Utility function which allocates and initializes an
  *	ata_probe_ent structure for a standard dual-port
  *	ata_probe_ent structure for a standard dual-port
  *	PIO-based IDE controller.  The returned ata_probe_ent
  *	PIO-based IDE controller.  The returned ata_probe_ent
  *	structure can be passed to ata_device_add().  The returned
  *	structure can be passed to ata_device_add().  The returned
  *	ata_probe_ent structure should then be freed with kfree().
  *	ata_probe_ent structure should then be freed with kfree().
+ *
+ *	The caller need only pass the address of the primary port, the
+ *	secondary will be deduced automatically. If the device has non
+ *	standard secondary port mappings this function can be called twice,
+ *	once for each interface.
  */
  */
 
 
 struct ata_probe_ent *
 struct ata_probe_ent *
-ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
+ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
 {
 {
 	struct ata_probe_ent *probe_ent =
 	struct ata_probe_ent *probe_ent =
 		ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
 		ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
+	int p = 0;
+
 	if (!probe_ent)
 	if (!probe_ent)
 		return NULL;
 		return NULL;
 
 
-	probe_ent->n_ports = 2;
 	probe_ent->irq = pdev->irq;
 	probe_ent->irq = pdev->irq;
 	probe_ent->irq_flags = SA_SHIRQ;
 	probe_ent->irq_flags = SA_SHIRQ;
 
 
-	probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0);
-	probe_ent->port[0].altstatus_addr =
-	probe_ent->port[0].ctl_addr =
-		pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
-	probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
-
-	probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
-	probe_ent->port[1].altstatus_addr =
-	probe_ent->port[1].ctl_addr =
-		pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
-	probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
+	if (ports & ATA_PORT_PRIMARY) {
+		probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
+		probe_ent->port[p].altstatus_addr =
+		probe_ent->port[p].ctl_addr =
+			pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
+		probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
+		ata_std_ports(&probe_ent->port[p]);
+		p++;
+	}
 
 
-	ata_std_ports(&probe_ent->port[0]);
-	ata_std_ports(&probe_ent->port[1]);
+	if (ports & ATA_PORT_SECONDARY) {
+		probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
+		probe_ent->port[p].altstatus_addr =
+		probe_ent->port[p].ctl_addr =
+			pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
+		probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
+		ata_std_ports(&probe_ent->port[p]);
+		p++;
+	}
 
 
+	probe_ent->n_ports = p;
 	return probe_ent;
 	return probe_ent;
 }
 }
 
 
-static struct ata_probe_ent *
-ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
-    struct ata_probe_ent **ppe2)
+static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info **port, int port_num)
 {
 {
-	struct ata_probe_ent *probe_ent, *probe_ent2;
+	struct ata_probe_ent *probe_ent;
 
 
 	probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
 	probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
 	if (!probe_ent)
 	if (!probe_ent)
 		return NULL;
 		return NULL;
-	probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]);
-	if (!probe_ent2) {
-		kfree(probe_ent);
-		return NULL;
-	}
 
 
-	probe_ent->n_ports = 1;
-	probe_ent->irq = 14;
-
-	probe_ent->hard_port_no = 0;
+	
 	probe_ent->legacy_mode = 1;
 	probe_ent->legacy_mode = 1;
-
-	probe_ent2->n_ports = 1;
-	probe_ent2->irq = 15;
-
-	probe_ent2->hard_port_no = 1;
-	probe_ent2->legacy_mode = 1;
-
-	probe_ent->port[0].cmd_addr = 0x1f0;
-	probe_ent->port[0].altstatus_addr =
-	probe_ent->port[0].ctl_addr = 0x3f6;
-	probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
-
-	probe_ent2->port[0].cmd_addr = 0x170;
-	probe_ent2->port[0].altstatus_addr =
-	probe_ent2->port[0].ctl_addr = 0x376;
-	probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
-
+	probe_ent->n_ports = 1;
+	probe_ent->hard_port_no = port_num;
+
+	switch(port_num)
+	{
+		case 0:
+			probe_ent->irq = 14;
+			probe_ent->port[0].cmd_addr = 0x1f0;
+			probe_ent->port[0].altstatus_addr =
+			probe_ent->port[0].ctl_addr = 0x3f6;
+			break;
+		case 1:
+			probe_ent->irq = 15;
+			probe_ent->port[0].cmd_addr = 0x170;
+			probe_ent->port[0].altstatus_addr =
+			probe_ent->port[0].ctl_addr = 0x376;
+			break;
+	}
+	probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num;
 	ata_std_ports(&probe_ent->port[0]);
 	ata_std_ports(&probe_ent->port[0]);
-	ata_std_ports(&probe_ent2->port[0]);
-
-	*ppe2 = probe_ent2;
 	return probe_ent;
 	return probe_ent;
 }
 }
 
 
@@ -4469,7 +4515,7 @@ ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
 		      unsigned int n_ports)
 		      unsigned int n_ports)
 {
 {
-	struct ata_probe_ent *probe_ent, *probe_ent2 = NULL;
+	struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
 	struct ata_port_info *port[2];
 	struct ata_port_info *port[2];
 	u8 tmp8, mask;
 	u8 tmp8, mask;
 	unsigned int legacy_mode = 0;
 	unsigned int legacy_mode = 0;
@@ -4486,7 +4532,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
 
 
 	if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
 	if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
 	    && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
 	    && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
-		/* TODO: support transitioning to native mode? */
+		/* TODO: What if one channel is in native mode ... */
 		pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
 		pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
 		mask = (1 << 2) | (1 << 0);
 		mask = (1 << 2) | (1 << 0);
 		if ((tmp8 & mask) != mask)
 		if ((tmp8 & mask) != mask)
@@ -4494,11 +4540,20 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
 	}
 	}
 
 
 	/* FIXME... */
 	/* FIXME... */
-	if ((!legacy_mode) && (n_ports > 1)) {
-		printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n");
-		return -EINVAL;
+	if ((!legacy_mode) && (n_ports > 2)) {
+		printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
+		n_ports = 2;
+		/* For now */
 	}
 	}
 
 
+	/* FIXME: Really for ATA it isn't safe because the device may be
+	   multi-purpose and we want to leave it alone if it was already
+	   enabled. Secondly for shared use as Arjan says we want refcounting
+	   
+	   Checking dev->is_enabled is insufficient as this is not set at
+	   boot for the primary video which is BIOS enabled
+         */
+         
 	rc = pci_enable_device(pdev);
 	rc = pci_enable_device(pdev);
 	if (rc)
 	if (rc)
 		return rc;
 		return rc;
@@ -4509,6 +4564,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
 		goto err_out;
 		goto err_out;
 	}
 	}
 
 
+	/* FIXME: Should use platform specific mappers for legacy port ranges */
 	if (legacy_mode) {
 	if (legacy_mode) {
 		if (!request_region(0x1f0, 8, "libata")) {
 		if (!request_region(0x1f0, 8, "libata")) {
 			struct resource *conflict, res;
 			struct resource *conflict, res;
@@ -4553,10 +4609,17 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
 		goto err_out_regions;
 		goto err_out_regions;
 
 
 	if (legacy_mode) {
 	if (legacy_mode) {
-		probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2);
-	} else
-		probe_ent = ata_pci_init_native_mode(pdev, port);
-	if (!probe_ent) {
+		if (legacy_mode & (1 << 0))
+			probe_ent = ata_pci_init_legacy_port(pdev, port, 0);
+		if (legacy_mode & (1 << 1))
+			probe_ent2 = ata_pci_init_legacy_port(pdev, port, 1);
+	} else {
+		if (n_ports == 2)
+			probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
+		else
+			probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
+	}
+	if (!probe_ent && !probe_ent2) {
 		rc = -ENOMEM;
 		rc = -ENOMEM;
 		goto err_out_regions;
 		goto err_out_regions;
 	}
 	}
@@ -4668,6 +4731,27 @@ static void __exit ata_exit(void)
 module_init(ata_init);
 module_init(ata_init);
 module_exit(ata_exit);
 module_exit(ata_exit);
 
 
+static unsigned long ratelimit_time;
+static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
+
+int ata_ratelimit(void)
+{
+	int rc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ata_ratelimit_lock, flags);
+
+	if (time_after(jiffies, ratelimit_time)) {
+		rc = 1;
+		ratelimit_time = jiffies + (HZ/5);
+	} else
+		rc = 0;
+
+	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
+
+	return rc;
+}
+
 /*
 /*
  * libata is essentially a library of internal helper functions for
  * libata is essentially a library of internal helper functions for
  * low-level ATA host controller drivers.  As such, the API/ABI is
  * low-level ATA host controller drivers.  As such, the API/ABI is
@@ -4709,6 +4793,7 @@ EXPORT_SYMBOL_GPL(sata_phy_reset);
 EXPORT_SYMBOL_GPL(__sata_phy_reset);
 EXPORT_SYMBOL_GPL(__sata_phy_reset);
 EXPORT_SYMBOL_GPL(ata_bus_reset);
 EXPORT_SYMBOL_GPL(ata_bus_reset);
 EXPORT_SYMBOL_GPL(ata_port_disable);
 EXPORT_SYMBOL_GPL(ata_port_disable);
+EXPORT_SYMBOL_GPL(ata_ratelimit);
 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
 EXPORT_SYMBOL_GPL(ata_scsi_error);
 EXPORT_SYMBOL_GPL(ata_scsi_error);

+ 502 - 187
drivers/scsi/libata-scsi.c

@@ -49,6 +49,14 @@ static struct ata_device *
 ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev);
 ata_scsi_find_dev(struct ata_port *ap, struct scsi_device *scsidev);
 
 
 
 
+static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
+				   void (*done)(struct scsi_cmnd *))
+{
+	ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
+	/* "Invalid field in cbd" */
+	done(cmd);
+}
+
 /**
 /**
  *	ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
  *	ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
  *	@sdev: SCSI device for which BIOS geometry is to be determined
  *	@sdev: SCSI device for which BIOS geometry is to be determined
@@ -182,7 +190,6 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
 {
 {
 	struct scsi_cmnd *cmd = qc->scsicmd;
 	struct scsi_cmnd *cmd = qc->scsicmd;
 	u8 err = 0;
 	u8 err = 0;
-	unsigned char *sb = cmd->sense_buffer;
 	/* Based on the 3ware driver translation table */
 	/* Based on the 3ware driver translation table */
 	static unsigned char sense_table[][4] = {
 	static unsigned char sense_table[][4] = {
 		/* BBD|ECC|ID|MAR */
 		/* BBD|ECC|ID|MAR */
@@ -225,8 +232,6 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
 	};
 	};
 	int i = 0;
 	int i = 0;
 
 
-	cmd->result = SAM_STAT_CHECK_CONDITION;
-
 	/*
 	/*
 	 *	Is this an error we can process/parse
 	 *	Is this an error we can process/parse
 	 */
 	 */
@@ -281,11 +286,9 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
 		/* Look for best matches first */
 		/* Look for best matches first */
 		if((sense_table[i][0] & err) == sense_table[i][0])
 		if((sense_table[i][0] & err) == sense_table[i][0])
 		{
 		{
-			sb[0] = 0x70;
-			sb[2] = sense_table[i][1];
-			sb[7] = 0x0a;
-			sb[12] = sense_table[i][2];
-			sb[13] = sense_table[i][3];
+			ata_scsi_set_sense(cmd, sense_table[i][1] /* sk */,
+					   sense_table[i][2] /* asc */,
+					   sense_table[i][3] /* ascq */ );
 			return;
 			return;
 		}
 		}
 		i++;
 		i++;
@@ -300,11 +303,9 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
 	{
 	{
 		if(stat_table[i][0] & drv_stat)
 		if(stat_table[i][0] & drv_stat)
 		{
 		{
-			sb[0] = 0x70;
-			sb[2] = stat_table[i][1];
-			sb[7] = 0x0a;
-			sb[12] = stat_table[i][2];
-			sb[13] = stat_table[i][3];
+			ata_scsi_set_sense(cmd, sense_table[i][1] /* sk */,
+					   sense_table[i][2] /* asc */,
+					   sense_table[i][3] /* ascq */ );
 			return;
 			return;
 		}
 		}
 		i++;
 		i++;
@@ -313,15 +314,12 @@ void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat)
 	printk(KERN_ERR "ata%u: called with no error (%02X)!\n", qc->ap->id, drv_stat);
 	printk(KERN_ERR "ata%u: called with no error (%02X)!\n", qc->ap->id, drv_stat);
 	/* additional-sense-code[-qualifier] */
 	/* additional-sense-code[-qualifier] */
 
 
-	sb[0] = 0x70;
-	sb[2] = MEDIUM_ERROR;
-	sb[7] = 0x0A;
 	if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 	if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
-		sb[12] = 0x11; /* "unrecovered read error" */
-		sb[13] = 0x04;
+		ata_scsi_set_sense(cmd, MEDIUM_ERROR, 0x11, 0x4);
+		/* "unrecovered read error" */
 	} else {
 	} else {
-		sb[12] = 0x0C; /* "write error -             */
-		sb[13] = 0x02; /*  auto-reallocation failed" */
+		ata_scsi_set_sense(cmd, MEDIUM_ERROR, 0xc, 0x2);
+		/* "write error - auto-reallocation failed" */
 	}
 	}
 }
 }
 
 
@@ -440,15 +438,26 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
 		;	/* ignore IMMED bit, violates sat-r05 */
 		;	/* ignore IMMED bit, violates sat-r05 */
 	}
 	}
 	if (scsicmd[4] & 0x2)
 	if (scsicmd[4] & 0x2)
-		return 1;	/* LOEJ bit set not supported */
+		goto invalid_fld;       /* LOEJ bit set not supported */
 	if (((scsicmd[4] >> 4) & 0xf) != 0)
 	if (((scsicmd[4] >> 4) & 0xf) != 0)
-		return 1;	/* power conditions not supported */
+		goto invalid_fld;       /* power conditions not supported */
 	if (scsicmd[4] & 0x1) {
 	if (scsicmd[4] & 0x1) {
 		tf->nsect = 1;	/* 1 sector, lba=0 */
 		tf->nsect = 1;	/* 1 sector, lba=0 */
-		tf->lbah = 0x0;
-		tf->lbam = 0x0;
-		tf->lbal = 0x0;
-		tf->device |= ATA_LBA;
+
+		if (qc->dev->flags & ATA_DFLAG_LBA) {
+			qc->tf.flags |= ATA_TFLAG_LBA;
+
+			tf->lbah = 0x0;
+			tf->lbam = 0x0;
+			tf->lbal = 0x0;
+			tf->device |= ATA_LBA;
+		} else {
+			/* CHS */
+			tf->lbal = 0x1; /* sect */
+			tf->lbam = 0x0; /* cyl low */
+			tf->lbah = 0x0; /* cyl high */
+		}
+
 		tf->command = ATA_CMD_VERIFY;	/* READ VERIFY */
 		tf->command = ATA_CMD_VERIFY;	/* READ VERIFY */
 	} else {
 	} else {
 		tf->nsect = 0;	/* time period value (0 implies now) */
 		tf->nsect = 0;	/* time period value (0 implies now) */
@@ -463,6 +472,11 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc,
 	 */
 	 */
 
 
 	return 0;
 	return 0;
+
+invalid_fld:
+	ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
+	/* "Invalid field in cbd" */
+	return 1;
 }
 }
 
 
 
 
@@ -497,6 +511,99 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
 	return 0;
 	return 0;
 }
 }
 
 
+/**
+ *	scsi_6_lba_len - Get LBA and transfer length
+ *	@scsicmd: SCSI command to translate
+ *
+ *	Calculate LBA and transfer length for 6-byte commands.
+ *
+ *	RETURNS:
+ *	@plba: the LBA
+ *	@plen: the transfer length
+ */
+
+static void scsi_6_lba_len(u8 *scsicmd, u64 *plba, u32 *plen)
+{
+	u64 lba = 0;
+	u32 len = 0;
+
+	VPRINTK("six-byte command\n");
+
+	lba |= ((u64)scsicmd[2]) << 8;
+	lba |= ((u64)scsicmd[3]);
+
+	len |= ((u32)scsicmd[4]);
+
+	*plba = lba;
+	*plen = len;
+}
+
+/**
+ *	scsi_10_lba_len - Get LBA and transfer length
+ *	@scsicmd: SCSI command to translate
+ *
+ *	Calculate LBA and transfer length for 10-byte commands.
+ *
+ *	RETURNS:
+ *	@plba: the LBA
+ *	@plen: the transfer length
+ */
+
+static void scsi_10_lba_len(u8 *scsicmd, u64 *plba, u32 *plen)
+{
+	u64 lba = 0;
+	u32 len = 0;
+
+	VPRINTK("ten-byte command\n");
+
+	lba |= ((u64)scsicmd[2]) << 24;
+	lba |= ((u64)scsicmd[3]) << 16;
+	lba |= ((u64)scsicmd[4]) << 8;
+	lba |= ((u64)scsicmd[5]);
+
+	len |= ((u32)scsicmd[7]) << 8;
+	len |= ((u32)scsicmd[8]);
+
+	*plba = lba;
+	*plen = len;
+}
+
+/**
+ *	scsi_16_lba_len - Get LBA and transfer length
+ *	@scsicmd: SCSI command to translate
+ *
+ *	Calculate LBA and transfer length for 16-byte commands.
+ *
+ *	RETURNS:
+ *	@plba: the LBA
+ *	@plen: the transfer length
+ */
+
+static void scsi_16_lba_len(u8 *scsicmd, u64 *plba, u32 *plen)
+{
+	u64 lba = 0;
+	u32 len = 0;
+
+	VPRINTK("sixteen-byte command\n");
+
+	lba |= ((u64)scsicmd[2]) << 56;
+	lba |= ((u64)scsicmd[3]) << 48;
+	lba |= ((u64)scsicmd[4]) << 40;
+	lba |= ((u64)scsicmd[5]) << 32;
+	lba |= ((u64)scsicmd[6]) << 24;
+	lba |= ((u64)scsicmd[7]) << 16;
+	lba |= ((u64)scsicmd[8]) << 8;
+	lba |= ((u64)scsicmd[9]);
+
+	len |= ((u32)scsicmd[10]) << 24;
+	len |= ((u32)scsicmd[11]) << 16;
+	len |= ((u32)scsicmd[12]) << 8;
+	len |= ((u32)scsicmd[13]);
+
+	*plba = lba;
+	*plen = len;
+}
+
 /**
 /**
  *	ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
  *	ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one
  *	@qc: Storage for translated ATA taskfile
  *	@qc: Storage for translated ATA taskfile
@@ -514,79 +621,102 @@ static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
 static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
 static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
 {
 {
 	struct ata_taskfile *tf = &qc->tf;
 	struct ata_taskfile *tf = &qc->tf;
+	struct ata_device *dev = qc->dev;
+	unsigned int lba   = tf->flags & ATA_TFLAG_LBA;
 	unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48;
 	unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48;
 	u64 dev_sectors = qc->dev->n_sectors;
 	u64 dev_sectors = qc->dev->n_sectors;
-	u64 sect = 0;
-	u32 n_sect = 0;
+	u64 block;
+	u32 n_block;
 
 
 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 	tf->protocol = ATA_PROT_NODATA;
 	tf->protocol = ATA_PROT_NODATA;
-	tf->device |= ATA_LBA;
-
-	if (scsicmd[0] == VERIFY) {
-		sect |= ((u64)scsicmd[2]) << 24;
-		sect |= ((u64)scsicmd[3]) << 16;
-		sect |= ((u64)scsicmd[4]) << 8;
-		sect |= ((u64)scsicmd[5]);
-
-		n_sect |= ((u32)scsicmd[7]) << 8;
-		n_sect |= ((u32)scsicmd[8]);
-	}
-
-	else if (scsicmd[0] == VERIFY_16) {
-		sect |= ((u64)scsicmd[2]) << 56;
-		sect |= ((u64)scsicmd[3]) << 48;
-		sect |= ((u64)scsicmd[4]) << 40;
-		sect |= ((u64)scsicmd[5]) << 32;
-		sect |= ((u64)scsicmd[6]) << 24;
-		sect |= ((u64)scsicmd[7]) << 16;
-		sect |= ((u64)scsicmd[8]) << 8;
-		sect |= ((u64)scsicmd[9]);
-
-		n_sect |= ((u32)scsicmd[10]) << 24;
-		n_sect |= ((u32)scsicmd[11]) << 16;
-		n_sect |= ((u32)scsicmd[12]) << 8;
-		n_sect |= ((u32)scsicmd[13]);
-	}
 
 
+	if (scsicmd[0] == VERIFY)
+		scsi_10_lba_len(scsicmd, &block, &n_block);
+	else if (scsicmd[0] == VERIFY_16)
+		scsi_16_lba_len(scsicmd, &block, &n_block);
 	else
 	else
-		return 1;
-
-	if (!n_sect)
-		return 1;
-	if (sect >= dev_sectors)
-		return 1;
-	if ((sect + n_sect) > dev_sectors)
-		return 1;
+		goto invalid_fld;
+
+	if (!n_block)
+		goto nothing_to_do;
+	if (block >= dev_sectors)
+		goto out_of_range;
+	if ((block + n_block) > dev_sectors)
+		goto out_of_range;
 	if (lba48) {
 	if (lba48) {
-		if (n_sect > (64 * 1024))
-			return 1;
+		if (n_block > (64 * 1024))
+			goto invalid_fld;
 	} else {
 	} else {
-		if (n_sect > 256)
-			return 1;
+		if (n_block > 256)
+			goto invalid_fld;
 	}
 	}
 
 
-	if (lba48) {
-		tf->command = ATA_CMD_VERIFY_EXT;
+	if (lba) {
+		if (lba48) {
+			tf->command = ATA_CMD_VERIFY_EXT;
+
+			tf->hob_nsect = (n_block >> 8) & 0xff;
+
+			tf->hob_lbah = (block >> 40) & 0xff;
+			tf->hob_lbam = (block >> 32) & 0xff;
+			tf->hob_lbal = (block >> 24) & 0xff;
+		} else {
+			tf->command = ATA_CMD_VERIFY;
+
+			tf->device |= (block >> 24) & 0xf;
+		}
 
 
-		tf->hob_nsect = (n_sect >> 8) & 0xff;
+		tf->nsect = n_block & 0xff;
 
 
-		tf->hob_lbah = (sect >> 40) & 0xff;
-		tf->hob_lbam = (sect >> 32) & 0xff;
-		tf->hob_lbal = (sect >> 24) & 0xff;
+		tf->lbah = (block >> 16) & 0xff;
+		tf->lbam = (block >> 8) & 0xff;
+		tf->lbal = block & 0xff;
+
+		tf->device |= ATA_LBA;
 	} else {
 	} else {
+		/* CHS */
+		u32 sect, head, cyl, track;
+
+		/* Convert LBA to CHS */
+		track = (u32)block / dev->sectors;
+		cyl   = track / dev->heads;
+		head  = track % dev->heads;
+		sect  = (u32)block % dev->sectors + 1;
+
+		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
+			(u32)block, track, cyl, head, sect);
+		
+		/* Check whether the converted CHS can fit. 
+		   Cylinder: 0-65535 
+		   Head: 0-15
+		   Sector: 1-255*/
+		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 
+			goto out_of_range;
+		
 		tf->command = ATA_CMD_VERIFY;
 		tf->command = ATA_CMD_VERIFY;
-
-		tf->device |= (sect >> 24) & 0xf;
+		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
+		tf->lbal = sect;
+		tf->lbam = cyl;
+		tf->lbah = cyl >> 8;
+		tf->device |= head;
 	}
 	}
 
 
-	tf->nsect = n_sect & 0xff;
+	return 0;
 
 
-	tf->lbah = (sect >> 16) & 0xff;
-	tf->lbam = (sect >> 8) & 0xff;
-	tf->lbal = sect & 0xff;
+invalid_fld:
+	ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
+	/* "Invalid field in cbd" */
+	return 1;
 
 
-	return 0;
+out_of_range:
+	ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
+	/* "Logical Block Address out of range" */
+	return 1;
+
+nothing_to_do:
+	qc->scsicmd->result = SAM_STAT_GOOD;
+	return 1;
 }
 }
 
 
 /**
 /**
@@ -612,11 +742,14 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
 static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
 static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
 {
 {
 	struct ata_taskfile *tf = &qc->tf;
 	struct ata_taskfile *tf = &qc->tf;
+	struct ata_device *dev = qc->dev;
+	unsigned int lba   = tf->flags & ATA_TFLAG_LBA;
 	unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48;
 	unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48;
+	u64 block;
+	u32 n_block;
 
 
 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
 	tf->protocol = qc->dev->xfer_protocol;
 	tf->protocol = qc->dev->xfer_protocol;
-	tf->device |= ATA_LBA;
 
 
 	if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 ||
 	if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 ||
 	    scsicmd[0] == READ_16) {
 	    scsicmd[0] == READ_16) {
@@ -626,89 +759,115 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd)
 		tf->flags |= ATA_TFLAG_WRITE;
 		tf->flags |= ATA_TFLAG_WRITE;
 	}
 	}
 
 
-	if (scsicmd[0] == READ_10 || scsicmd[0] == WRITE_10) {
-		if (lba48) {
-			tf->hob_nsect = scsicmd[7];
-			tf->hob_lbal = scsicmd[2];
-
-			qc->nsect = ((unsigned int)scsicmd[7] << 8) |
-					scsicmd[8];
-		} else {
-			/* if we don't support LBA48 addressing, the request
-			 * -may- be too large. */
-			if ((scsicmd[2] & 0xf0) || scsicmd[7])
-				return 1;
-
-			/* stores LBA27:24 in lower 4 bits of device reg */
-			tf->device |= scsicmd[2];
+	/* Calculate the SCSI LBA and transfer length. */
+	switch (scsicmd[0]) {
+	case READ_10:
+	case WRITE_10:
+		scsi_10_lba_len(scsicmd, &block, &n_block);
+		break;
+	case READ_6:
+	case WRITE_6:
+		scsi_6_lba_len(scsicmd, &block, &n_block);
 
 
-			qc->nsect = scsicmd[8];
-		}
+		/* for 6-byte r/w commands, transfer length 0
+		 * means 256 blocks of data, not 0 block.
+		 */
+		if (!n_block)
+			n_block = 256;
+		break;
+	case READ_16:
+	case WRITE_16:
+		scsi_16_lba_len(scsicmd, &block, &n_block);
+		break;
+	default:
+		DPRINTK("no-byte command\n");
+		goto invalid_fld;
+	}
 
 
-		tf->nsect = scsicmd[8];
-		tf->lbal = scsicmd[5];
-		tf->lbam = scsicmd[4];
-		tf->lbah = scsicmd[3];
+	/* Check and compose ATA command */
+	if (!n_block)
+		/* For 10-byte and 16-byte SCSI R/W commands, transfer
+		 * length 0 means transfer 0 block of data.
+		 * However, for ATA R/W commands, sector count 0 means
+		 * 256 or 65536 sectors, not 0 sectors as in SCSI.
+		 */
+		goto nothing_to_do;
 
 
-		VPRINTK("ten-byte command\n");
-		if (qc->nsect == 0) /* we don't support length==0 cmds */
-			return 1;
-		return 0;
-	}
+	if (lba) {
+		if (lba48) {
+			/* The request -may- be too large for LBA48. */
+			if ((block >> 48) || (n_block > 65536))
+				goto out_of_range;
 
 
-	if (scsicmd[0] == READ_6 || scsicmd[0] == WRITE_6) {
-		qc->nsect = tf->nsect = scsicmd[4];
-		if (!qc->nsect) {
-			qc->nsect = 256;
-			if (lba48)
-				tf->hob_nsect = 1;
-		}
+			tf->hob_nsect = (n_block >> 8) & 0xff;
 
 
-		tf->lbal = scsicmd[3];
-		tf->lbam = scsicmd[2];
-		tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */
+			tf->hob_lbah = (block >> 40) & 0xff;
+			tf->hob_lbam = (block >> 32) & 0xff;
+			tf->hob_lbal = (block >> 24) & 0xff;
+		} else { 
+			/* LBA28 */
 
 
-		VPRINTK("six-byte command\n");
-		return 0;
-	}
+			/* The request -may- be too large for LBA28. */
+			if ((block >> 28) || (n_block > 256))
+				goto out_of_range;
 
 
-	if (scsicmd[0] == READ_16 || scsicmd[0] == WRITE_16) {
-		/* rule out impossible LBAs and sector counts */
-		if (scsicmd[2] || scsicmd[3] || scsicmd[10] || scsicmd[11])
-			return 1;
+			tf->device |= (block >> 24) & 0xf;
+		}
 
 
-		if (lba48) {
-			tf->hob_nsect = scsicmd[12];
-			tf->hob_lbal = scsicmd[6];
-			tf->hob_lbam = scsicmd[5];
-			tf->hob_lbah = scsicmd[4];
+		qc->nsect = n_block;
+		tf->nsect = n_block & 0xff;
 
 
-			qc->nsect = ((unsigned int)scsicmd[12] << 8) |
-					scsicmd[13];
-		} else {
-			/* once again, filter out impossible non-zero values */
-			if (scsicmd[4] || scsicmd[5] || scsicmd[12] ||
-			    (scsicmd[6] & 0xf0))
-				return 1;
+		tf->lbah = (block >> 16) & 0xff;
+		tf->lbam = (block >> 8) & 0xff;
+		tf->lbal = block & 0xff;
 
 
-			/* stores LBA27:24 in lower 4 bits of device reg */
-			tf->device |= scsicmd[6];
+		tf->device |= ATA_LBA;
+	} else { 
+		/* CHS */
+		u32 sect, head, cyl, track;
+
+		/* The request -may- be too large for CHS addressing. */
+		if ((block >> 28) || (n_block > 256))
+			goto out_of_range;
+
+		/* Convert LBA to CHS */
+		track = (u32)block / dev->sectors;
+		cyl   = track / dev->heads;
+		head  = track % dev->heads;
+		sect  = (u32)block % dev->sectors + 1;
+
+		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
+			(u32)block, track, cyl, head, sect);
+
+		/* Check whether the converted CHS can fit. 
+		   Cylinder: 0-65535 
+		   Head: 0-15
+		   Sector: 1-255*/
+		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
+			goto out_of_range;
+
+		qc->nsect = n_block;
+		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
+		tf->lbal = sect;
+		tf->lbam = cyl;
+		tf->lbah = cyl >> 8;
+		tf->device |= head;
+	}
 
 
-			qc->nsect = scsicmd[13];
-		}
+	return 0;
 
 
-		tf->nsect = scsicmd[13];
-		tf->lbal = scsicmd[9];
-		tf->lbam = scsicmd[8];
-		tf->lbah = scsicmd[7];
+invalid_fld:
+	ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x24, 0x0);
+	/* "Invalid field in cbd" */
+	return 1;
 
 
-		VPRINTK("sixteen-byte command\n");
-		if (qc->nsect == 0) /* we don't support length==0 cmds */
-			return 1;
-		return 0;
-	}
+out_of_range:
+	ata_scsi_set_sense(qc->scsicmd, ILLEGAL_REQUEST, 0x21, 0x0);
+	/* "Logical Block Address out of range" */
+	return 1;
 
 
-	DPRINTK("no-byte command\n");
+nothing_to_do:
+	qc->scsicmd->result = SAM_STAT_GOOD;
 	return 1;
 	return 1;
 }
 }
 
 
@@ -741,6 +900,12 @@ static int ata_scsi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
  *	This function sets up an ata_queued_cmd structure for the
  *	This function sets up an ata_queued_cmd structure for the
  *	SCSI command, and sends that ata_queued_cmd to the hardware.
  *	SCSI command, and sends that ata_queued_cmd to the hardware.
  *
  *
+ *	The xlat_func argument (actor) returns 0 if ready to execute
+ *	ATA command, else 1 to finish translation. If 1 is returned
+ *	then cmd->result (and possibly cmd->sense_buffer) are assumed
+ *	to be set reflecting an error condition or clean (early)
+ *	termination.
+ *
  *	LOCKING:
  *	LOCKING:
  *	spin_lock_irqsave(host_set lock)
  *	spin_lock_irqsave(host_set lock)
  */
  */
@@ -757,7 +922,7 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
 
 
 	qc = ata_scsi_qc_new(ap, dev, cmd, done);
 	qc = ata_scsi_qc_new(ap, dev, cmd, done);
 	if (!qc)
 	if (!qc)
-		return;
+		goto err_mem;
 
 
 	/* data is present; dma-map it */
 	/* data is present; dma-map it */
 	if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
 	if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
@@ -765,7 +930,7 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
 		if (unlikely(cmd->request_bufflen < 1)) {
 		if (unlikely(cmd->request_bufflen < 1)) {
 			printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n",
 			printk(KERN_WARNING "ata%u(%u): WARNING: zero len r/w req\n",
 			       ap->id, dev->devno);
 			       ap->id, dev->devno);
-			goto err_out;
+			goto err_did;
 		}
 		}
 
 
 		if (cmd->use_sg)
 		if (cmd->use_sg)
@@ -780,19 +945,28 @@ static void ata_scsi_translate(struct ata_port *ap, struct ata_device *dev,
 	qc->complete_fn = ata_scsi_qc_complete;
 	qc->complete_fn = ata_scsi_qc_complete;
 
 
 	if (xlat_func(qc, scsicmd))
 	if (xlat_func(qc, scsicmd))
-		goto err_out;
+		goto early_finish;
 
 
 	/* select device, send command to hardware */
 	/* select device, send command to hardware */
 	if (ata_qc_issue(qc))
 	if (ata_qc_issue(qc))
-		goto err_out;
+		goto err_did;
 
 
 	VPRINTK("EXIT\n");
 	VPRINTK("EXIT\n");
 	return;
 	return;
 
 
-err_out:
+early_finish:
+        ata_qc_free(qc);
+	done(cmd);
+	DPRINTK("EXIT - early finish (good or error)\n");
+	return;
+
+err_did:
 	ata_qc_free(qc);
 	ata_qc_free(qc);
-	ata_bad_cdb(cmd, done);
-	DPRINTK("EXIT - badcmd\n");
+err_mem:
+	cmd->result = (DID_ERROR << 16);
+	done(cmd);
+	DPRINTK("EXIT - internal\n");
+	return;
 }
 }
 
 
 /**
 /**
@@ -859,7 +1033,8 @@ static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
  *	Mapping the response buffer, calling the command's handler,
  *	Mapping the response buffer, calling the command's handler,
  *	and handling the handler's return value.  This return value
  *	and handling the handler's return value.  This return value
  *	indicates whether the handler wishes the SCSI command to be
  *	indicates whether the handler wishes the SCSI command to be
- *	completed successfully, or not.
+ *	completed successfully (0), or not (in which case cmd->result
+ *	and sense buffer are assumed to be set).
  *
  *
  *	LOCKING:
  *	LOCKING:
  *	spin_lock_irqsave(host_set lock)
  *	spin_lock_irqsave(host_set lock)
@@ -878,12 +1053,9 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
 	rc = actor(args, rbuf, buflen);
 	rc = actor(args, rbuf, buflen);
 	ata_scsi_rbuf_put(cmd, rbuf);
 	ata_scsi_rbuf_put(cmd, rbuf);
 
 
-	if (rc)
-		ata_bad_cdb(cmd, args->done);
-	else {
+	if (rc == 0)
 		cmd->result = SAM_STAT_GOOD;
 		cmd->result = SAM_STAT_GOOD;
-		args->done(cmd);
-	}
+	args->done(cmd);
 }
 }
 
 
 /**
 /**
@@ -1189,8 +1361,16 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
 	 * in the same manner)
 	 * in the same manner)
 	 */
 	 */
 	page_control = scsicmd[2] >> 6;
 	page_control = scsicmd[2] >> 6;
-	if ((page_control != 0) && (page_control != 3))
-		return 1;
+	switch (page_control) {
+	case 0: /* current */
+		break;  /* supported */
+	case 3: /* saved */
+		goto saving_not_supp;
+	case 1: /* changeable */
+	case 2: /* defaults */
+	default:
+		goto invalid_fld;
+	}
 
 
 	if (six_byte)
 	if (six_byte)
 		output_len = 4;
 		output_len = 4;
@@ -1221,7 +1401,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
 		break;
 		break;
 
 
 	default:		/* invalid page code */
 	default:		/* invalid page code */
-		return 1;
+		goto invalid_fld;
 	}
 	}
 
 
 	if (six_byte) {
 	if (six_byte) {
@@ -1234,6 +1414,16 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
 	}
 	}
 
 
 	return 0;
 	return 0;
+
+invalid_fld:
+	ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0);
+	/* "Invalid field in cbd" */
+	return 1;
+
+saving_not_supp:
+	ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
+	 /* "Saving parameters not supported" */
+	return 1;
 }
 }
 
 
 /**
 /**
@@ -1256,10 +1446,20 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
 
 
 	VPRINTK("ENTER\n");
 	VPRINTK("ENTER\n");
 
 
-	if (ata_id_has_lba48(args->id))
-		n_sectors = ata_id_u64(args->id, 100);
-	else
-		n_sectors = ata_id_u32(args->id, 60);
+	if (ata_id_has_lba(args->id)) {
+		if (ata_id_has_lba48(args->id))
+			n_sectors = ata_id_u64(args->id, 100);
+		else
+			n_sectors = ata_id_u32(args->id, 60);
+	} else {
+		/* CHS default translation */
+		n_sectors = args->id[1] * args->id[3] * args->id[6];
+
+		if (ata_id_current_chs_valid(args->id))
+			/* CHS current translation */
+			n_sectors = ata_id_u32(args->id, 57);
+	}
+
 	n_sectors--;		/* ATA TotalUserSectors - 1 */
 	n_sectors--;		/* ATA TotalUserSectors - 1 */
 
 
 	if (args->cmd->cmnd[0] == READ_CAPACITY) {
 	if (args->cmd->cmnd[0] == READ_CAPACITY) {
@@ -1322,6 +1522,34 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
 	return 0;
 	return 0;
 }
 }
 
 
+/**
+ *	ata_scsi_set_sense - Set SCSI sense data and status
+ *	@cmd: SCSI request to be handled
+ *	@sk: SCSI-defined sense key
+ *	@asc: SCSI-defined additional sense code
+ *	@ascq: SCSI-defined additional sense code qualifier
+ *
+ *	Helper function that builds a valid fixed format, current
+ *	response code and the given sense key (sk), additional sense
+ *	code (asc) and additional sense code qualifier (ascq) with
+ *	a SCSI command status of %SAM_STAT_CHECK_CONDITION and
+ *	DRIVER_SENSE set in the upper bits of scsi_cmnd::result .
+ *
+ *	LOCKING:
+ *	Not required
+ */
+
+void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+{
+	cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+
+	cmd->sense_buffer[0] = 0x70;	/* fixed format, current */
+	cmd->sense_buffer[2] = sk;
+	cmd->sense_buffer[7] = 18 - 8;	/* additional sense length */
+	cmd->sense_buffer[12] = asc;
+	cmd->sense_buffer[13] = ascq;
+}
+
 /**
 /**
  *	ata_scsi_badcmd - End a SCSI request with an error
  *	ata_scsi_badcmd - End a SCSI request with an error
  *	@cmd: SCSI request to be handled
  *	@cmd: SCSI request to be handled
@@ -1340,30 +1568,84 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
 void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq)
 void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq)
 {
 {
 	DPRINTK("ENTER\n");
 	DPRINTK("ENTER\n");
-	cmd->result = SAM_STAT_CHECK_CONDITION;
-
-	cmd->sense_buffer[0] = 0x70;
-	cmd->sense_buffer[2] = ILLEGAL_REQUEST;
-	cmd->sense_buffer[7] = 14 - 8;	/* addnl. sense len. FIXME: correct? */
-	cmd->sense_buffer[12] = asc;
-	cmd->sense_buffer[13] = ascq;
+	ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq);
 
 
 	done(cmd);
 	done(cmd);
 }
 }
 
 
+void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
+			 struct scsi_cmnd *cmd)
+{
+	DECLARE_COMPLETION(wait);
+	struct ata_queued_cmd *qc;
+	unsigned long flags;
+	int rc;
+
+	DPRINTK("ATAPI request sense\n");
+
+	qc = ata_qc_new_init(ap, dev);
+	BUG_ON(qc == NULL);
+
+	/* FIXME: is this needed? */
+	memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
+
+	ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
+	qc->dma_dir = DMA_FROM_DEVICE;
+
+	memset(&qc->cdb, 0, ap->cdb_len);
+	qc->cdb[0] = REQUEST_SENSE;
+	qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
+
+	qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	qc->tf.command = ATA_CMD_PACKET;
+
+	qc->tf.protocol = ATA_PROT_ATAPI;
+	qc->tf.lbam = (8 * 1024) & 0xff;
+	qc->tf.lbah = (8 * 1024) >> 8;
+	qc->nbytes = SCSI_SENSE_BUFFERSIZE;
+
+	qc->waiting = &wait;
+	qc->complete_fn = ata_qc_complete_noop;
+
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+	rc = ata_qc_issue(qc);
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+	if (rc)
+		ata_port_disable(ap);
+	else
+		wait_for_completion(&wait);
+
+	DPRINTK("EXIT\n");
+}
+
 static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
 static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
 {
 {
 	struct scsi_cmnd *cmd = qc->scsicmd;
 	struct scsi_cmnd *cmd = qc->scsicmd;
 
 
-	if (unlikely(drv_stat & (ATA_ERR | ATA_BUSY | ATA_DRQ))) {
+	VPRINTK("ENTER, drv_stat == 0x%x\n", drv_stat);
+
+	if (unlikely(drv_stat & (ATA_BUSY | ATA_DRQ)))
+		ata_to_sense_error(qc, drv_stat);
+
+	else if (unlikely(drv_stat & ATA_ERR)) {
 		DPRINTK("request check condition\n");
 		DPRINTK("request check condition\n");
 
 
+		/* FIXME: command completion with check condition
+		 * but no sense causes the error handler to run,
+		 * which then issues REQUEST SENSE, fills in the sense 
+		 * buffer, and completes the command (for the second
+		 * time).  We need to issue REQUEST SENSE some other
+		 * way, to avoid completing the command twice.
+		 */
 		cmd->result = SAM_STAT_CHECK_CONDITION;
 		cmd->result = SAM_STAT_CHECK_CONDITION;
 
 
 		qc->scsidone(cmd);
 		qc->scsidone(cmd);
 
 
 		return 1;
 		return 1;
-	} else {
+	}
+
+	else {
 		u8 *scsicmd = cmd->cmnd;
 		u8 *scsicmd = cmd->cmnd;
 
 
 		if (scsicmd[0] == INQUIRY) {
 		if (scsicmd[0] == INQUIRY) {
@@ -1371,15 +1653,30 @@ static int atapi_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
 			unsigned int buflen;
 			unsigned int buflen;
 
 
 			buflen = ata_scsi_rbuf_get(cmd, &buf);
 			buflen = ata_scsi_rbuf_get(cmd, &buf);
-			buf[2] = 0x5;
-			buf[3] = (buf[3] & 0xf0) | 2;
+
+	/* ATAPI devices typically report zero for their SCSI version,
+	 * and sometimes deviate from the spec WRT response data
+	 * format.  If SCSI version is reported as zero like normal,
+	 * then we make the following fixups:  1) Fake MMC-5 version,
+	 * to indicate to the Linux scsi midlayer this is a modern
+	 * device.  2) Ensure response data format / ATAPI information
+	 * are always correct.
+	 */
+	/* FIXME: do we ever override EVPD pages and the like, with
+	 * this code?
+	 */
+			if (buf[2] == 0) {
+				buf[2] = 0x5;
+				buf[3] = 0x32;
+			}
+
 			ata_scsi_rbuf_put(cmd, buf);
 			ata_scsi_rbuf_put(cmd, buf);
 		}
 		}
+
 		cmd->result = SAM_STAT_GOOD;
 		cmd->result = SAM_STAT_GOOD;
 	}
 	}
 
 
 	qc->scsidone(cmd);
 	qc->scsidone(cmd);
-
 	return 0;
 	return 0;
 }
 }
 /**
 /**
@@ -1640,7 +1937,7 @@ void ata_scsi_simulate(u16 *id,
 
 
 		case INQUIRY:
 		case INQUIRY:
 			if (scsicmd[1] & 2)	           /* is CmdDt set?  */
 			if (scsicmd[1] & 2)	           /* is CmdDt set?  */
-				ata_bad_cdb(cmd, done);
+				ata_scsi_invalid_field(cmd, done);
 			else if ((scsicmd[1] & 1) == 0)    /* is EVPD clear? */
 			else if ((scsicmd[1] & 1) == 0)    /* is EVPD clear? */
 				ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
 				ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
 			else if (scsicmd[2] == 0x00)
 			else if (scsicmd[2] == 0x00)
@@ -1650,7 +1947,7 @@ void ata_scsi_simulate(u16 *id,
 			else if (scsicmd[2] == 0x83)
 			else if (scsicmd[2] == 0x83)
 				ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
 				ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
 			else
 			else
-				ata_bad_cdb(cmd, done);
+				ata_scsi_invalid_field(cmd, done);
 			break;
 			break;
 
 
 		case MODE_SENSE:
 		case MODE_SENSE:
@@ -1660,7 +1957,7 @@ void ata_scsi_simulate(u16 *id,
 
 
 		case MODE_SELECT:	/* unconditionally return */
 		case MODE_SELECT:	/* unconditionally return */
 		case MODE_SELECT_10:	/* bad-field-in-cdb */
 		case MODE_SELECT_10:	/* bad-field-in-cdb */
-			ata_bad_cdb(cmd, done);
+			ata_scsi_invalid_field(cmd, done);
 			break;
 			break;
 
 
 		case READ_CAPACITY:
 		case READ_CAPACITY:
@@ -1671,7 +1968,7 @@ void ata_scsi_simulate(u16 *id,
 			if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
 			if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
 				ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
 				ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
 			else
 			else
-				ata_bad_cdb(cmd, done);
+				ata_scsi_invalid_field(cmd, done);
 			break;
 			break;
 
 
 		case REPORT_LUNS:
 		case REPORT_LUNS:
@@ -1683,8 +1980,26 @@ void ata_scsi_simulate(u16 *id,
 
 
 		/* all other commands */
 		/* all other commands */
 		default:
 		default:
-			ata_bad_scsiop(cmd, done);
+			ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
+			/* "Invalid command operation code" */
+			done(cmd);
 			break;
 			break;
 	}
 	}
 }
 }
 
 
+void ata_scsi_scan_host(struct ata_port *ap)
+{
+	struct ata_device *dev;
+	unsigned int i;
+
+	if (ap->flags & ATA_FLAG_PORT_DISABLED)
+		return;
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		dev = &ap->device[i];
+
+		if (ata_dev_present(dev))
+			scsi_scan_target(&ap->host->shost_gendev, 0, i, 0, 0);
+	}
+}
+

+ 6 - 10
drivers/scsi/libata.h

@@ -39,6 +39,7 @@ struct ata_scsi_args {
 
 
 /* libata-core.c */
 /* libata-core.c */
 extern int atapi_enabled;
 extern int atapi_enabled;
+extern int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
 extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
 extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
 				      struct ata_device *dev);
 				      struct ata_device *dev);
 extern void ata_qc_free(struct ata_queued_cmd *qc);
 extern void ata_qc_free(struct ata_queued_cmd *qc);
@@ -51,6 +52,9 @@ extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
 
 
 
 
 /* libata-scsi.c */
 /* libata-scsi.c */
+extern void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
+			 struct scsi_cmnd *cmd);
+extern void ata_scsi_scan_host(struct ata_port *ap);
 extern void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat);
 extern void ata_to_sense_error(struct ata_queued_cmd *qc, u8 drv_stat);
 extern int ata_scsi_error(struct Scsi_Host *host);
 extern int ata_scsi_error(struct Scsi_Host *host);
 extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
 extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
@@ -76,18 +80,10 @@ extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
 extern void ata_scsi_badcmd(struct scsi_cmnd *cmd,
 extern void ata_scsi_badcmd(struct scsi_cmnd *cmd,
 			    void (*done)(struct scsi_cmnd *),
 			    void (*done)(struct scsi_cmnd *),
 			    u8 asc, u8 ascq);
 			    u8 asc, u8 ascq);
+extern void ata_scsi_set_sense(struct scsi_cmnd *cmd,
+			       u8 sk, u8 asc, u8 ascq);
 extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
 extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
                         unsigned int (*actor) (struct ata_scsi_args *args,
                         unsigned int (*actor) (struct ata_scsi_args *args,
                                            u8 *rbuf, unsigned int buflen));
                                            u8 *rbuf, unsigned int buflen));
 
 
-static inline void ata_bad_scsiop(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
-{
-	ata_scsi_badcmd(cmd, done, 0x20, 0x00);
-}
-
-static inline void ata_bad_cdb(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
-{
-	ata_scsi_badcmd(cmd, done, 0x24, 0x00);
-}
-
 #endif /* __LIBATA_H__ */
 #endif /* __LIBATA_H__ */

+ 1 - 0
drivers/scsi/megaraid/megaraid_sas.c

@@ -34,6 +34,7 @@
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/uio.h>
 #include <linux/uio.h>
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
+#include <linux/fs.h>
 #include <linux/compat.h>
 #include <linux/compat.h>
 
 
 #include <scsi/scsi.h>
 #include <scsi/scsi.h>

文件差异内容过多而无法显示
+ 687 - 106
drivers/scsi/sata_mv.c


+ 8 - 8
drivers/scsi/sata_nv.c

@@ -29,6 +29,8 @@
  *  NV-specific details such as register offsets, SATA phy location,
  *  NV-specific details such as register offsets, SATA phy location,
  *  hotplug info, etc.
  *  hotplug info, etc.
  *
  *
+ *  0.09
+ *     - Fixed bug introduced by 0.08's MCP51 and MCP55 support.
  *
  *
  *  0.08
  *  0.08
  *     - Added support for MCP51 and MCP55.
  *     - Added support for MCP51 and MCP55.
@@ -132,9 +134,7 @@ enum nv_host_type
 	GENERIC,
 	GENERIC,
 	NFORCE2,
 	NFORCE2,
 	NFORCE3,
 	NFORCE3,
-	CK804,
-	MCP51,
-	MCP55
+	CK804
 };
 };
 
 
 static struct pci_device_id nv_pci_tbl[] = {
 static struct pci_device_id nv_pci_tbl[] = {
@@ -153,13 +153,13 @@ static struct pci_device_id nv_pci_tbl[] = {
 	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
 	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0, CK804 },
 	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA,
 	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 },
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
 	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2,
 	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP51 },
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
 	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
 	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
 	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
 	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, MCP55 },
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0, GENERIC },
 	{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
 	{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
 		PCI_ANY_ID, PCI_ANY_ID,
 		PCI_ANY_ID, PCI_ANY_ID,
 		PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
 		PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
@@ -405,7 +405,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 	rc = -ENOMEM;
 	rc = -ENOMEM;
 
 
 	ppi = &nv_port_info;
 	ppi = &nv_port_info;
-	probe_ent = ata_pci_init_native_mode(pdev, &ppi);
+	probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
 	if (!probe_ent)
 	if (!probe_ent)
 		goto err_out_regions;
 		goto err_out_regions;
 
 

+ 3 - 3
drivers/scsi/sata_promise.c

@@ -438,11 +438,11 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
 		break;
 		break;
 
 
         default:
         default:
-                ap->stats.idle_irq++;
-                break;
+		ap->stats.idle_irq++;
+		break;
         }
         }
 
 
-        return handled;
+	return handled;
 }
 }
 
 
 static void pdc_irq_clear(struct ata_port *ap)
 static void pdc_irq_clear(struct ata_port *ap)

+ 875 - 0
drivers/scsi/sata_sil24.c

@@ -0,0 +1,875 @@
+/*
+ * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers
+ *
+ * Copyright 2005  Tejun Heo
+ *
+ * Based on preview driver from Silicon Image.
+ *
+ * NOTE: No NCQ/ATAPI support yet.  The preview driver didn't support
+ * NCQ nor ATAPI, and, unfortunately, I couldn't find out how to make
+ * those work.  Enabling those shouldn't be difficult.  Basic
+ * structure is all there (in libata-dev tree).  If you have any
+ * information about this hardware, please contact me or linux-ide.
+ * Info is needed on...
+ *
+ * - How to issue tagged commands and turn on sactive on issue accordingly.
+ * - Where to put an ATAPI command and how to tell the device to send it.
+ * - How to enable/use 64bit.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <scsi/scsi_host.h>
+#include "scsi.h"
+#include <linux/libata.h>
+#include <asm/io.h>
+
+#define DRV_NAME	"sata_sil24"
+#define DRV_VERSION	"0.22"	/* Silicon Image's preview driver was 0.10 */
+
+/*
+ * Port request block (PRB) 32 bytes
+ */
+struct sil24_prb {
+	u16	ctrl;
+	u16	prot;
+	u32	rx_cnt;
+	u8	fis[6 * 4];
+};
+
+/*
+ * Scatter gather entry (SGE) 16 bytes
+ */
+struct sil24_sge {
+	u64	addr;
+	u32	cnt;
+	u32	flags;
+};
+
+/*
+ * Port multiplier
+ */
+struct sil24_port_multiplier {
+	u32	diag;
+	u32	sactive;
+};
+
+enum {
+	/*
+	 * Global controller registers (128 bytes @ BAR0)
+	 */
+		/* 32 bit regs */
+	HOST_SLOT_STAT		= 0x00, /* 32 bit slot stat * 4 */
+	HOST_CTRL		= 0x40,
+	HOST_IRQ_STAT		= 0x44,
+	HOST_PHY_CFG		= 0x48,
+	HOST_BIST_CTRL		= 0x50,
+	HOST_BIST_PTRN		= 0x54,
+	HOST_BIST_STAT		= 0x58,
+	HOST_MEM_BIST_STAT	= 0x5c,
+	HOST_FLASH_CMD		= 0x70,
+		/* 8 bit regs */
+	HOST_FLASH_DATA		= 0x74,
+	HOST_TRANSITION_DETECT	= 0x75,
+	HOST_GPIO_CTRL		= 0x76,
+	HOST_I2C_ADDR		= 0x78, /* 32 bit */
+	HOST_I2C_DATA		= 0x7c,
+	HOST_I2C_XFER_CNT	= 0x7e,
+	HOST_I2C_CTRL		= 0x7f,
+
+	/* HOST_SLOT_STAT bits */
+	HOST_SSTAT_ATTN		= (1 << 31),
+
+	/*
+	 * Port registers
+	 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
+	 */
+	PORT_REGS_SIZE		= 0x2000,
+	PORT_PRB		= 0x0000, /* (32 bytes PRB + 16 bytes SGEs * 6) * 31 (3968 bytes) */
+
+	PORT_PM			= 0x0f80, /* 8 bytes PM * 16 (128 bytes) */
+		/* 32 bit regs */
+	PORT_CTRL_STAT		= 0x1000, /* write: ctrl-set, read: stat */
+	PORT_CTRL_CLR		= 0x1004, /* write: ctrl-clear */
+	PORT_IRQ_STAT		= 0x1008, /* high: status, low: interrupt */
+	PORT_IRQ_ENABLE_SET	= 0x1010, /* write: enable-set */
+	PORT_IRQ_ENABLE_CLR	= 0x1014, /* write: enable-clear */
+	PORT_ACTIVATE_UPPER_ADDR= 0x101c,
+	PORT_EXEC_FIFO		= 0x1020, /* command execution fifo */
+	PORT_CMD_ERR		= 0x1024, /* command error number */
+	PORT_FIS_CFG		= 0x1028,
+	PORT_FIFO_THRES		= 0x102c,
+		/* 16 bit regs */
+	PORT_DECODE_ERR_CNT	= 0x1040,
+	PORT_DECODE_ERR_THRESH	= 0x1042,
+	PORT_CRC_ERR_CNT	= 0x1044,
+	PORT_CRC_ERR_THRESH	= 0x1046,
+	PORT_HSHK_ERR_CNT	= 0x1048,
+	PORT_HSHK_ERR_THRESH	= 0x104a,
+		/* 32 bit regs */
+	PORT_PHY_CFG		= 0x1050,
+	PORT_SLOT_STAT		= 0x1800,
+	PORT_CMD_ACTIVATE	= 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
+	PORT_EXEC_DIAG		= 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
+	PORT_PSD_DIAG		= 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
+	PORT_SCONTROL		= 0x1f00,
+	PORT_SSTATUS		= 0x1f04,
+	PORT_SERROR		= 0x1f08,
+	PORT_SACTIVE		= 0x1f0c,
+
+	/* PORT_CTRL_STAT bits */
+	PORT_CS_PORT_RST	= (1 << 0), /* port reset */
+	PORT_CS_DEV_RST		= (1 << 1), /* device reset */
+	PORT_CS_INIT		= (1 << 2), /* port initialize */
+	PORT_CS_IRQ_WOC		= (1 << 3), /* interrupt write one to clear */
+	PORT_CS_RESUME		= (1 << 6), /* port resume */
+	PORT_CS_32BIT_ACTV	= (1 << 10), /* 32-bit activation */
+	PORT_CS_PM_EN		= (1 << 13), /* port multiplier enable */
+	PORT_CS_RDY		= (1 << 31), /* port ready to accept commands */
+
+	/* PORT_IRQ_STAT/ENABLE_SET/CLR */
+	/* bits[11:0] are masked */
+	PORT_IRQ_COMPLETE	= (1 << 0), /* command(s) completed */
+	PORT_IRQ_ERROR		= (1 << 1), /* command execution error */
+	PORT_IRQ_PORTRDY_CHG	= (1 << 2), /* port ready change */
+	PORT_IRQ_PWR_CHG	= (1 << 3), /* power management change */
+	PORT_IRQ_PHYRDY_CHG	= (1 << 4), /* PHY ready change */
+	PORT_IRQ_COMWAKE	= (1 << 5), /* COMWAKE received */
+	PORT_IRQ_UNK_FIS	= (1 << 6), /* Unknown FIS received */
+	PORT_IRQ_SDB_FIS	= (1 << 11), /* SDB FIS received */
+
+	/* bits[27:16] are unmasked (raw) */
+	PORT_IRQ_RAW_SHIFT	= 16,
+	PORT_IRQ_MASKED_MASK	= 0x7ff,
+	PORT_IRQ_RAW_MASK	= (0x7ff << PORT_IRQ_RAW_SHIFT),
+
+	/* ENABLE_SET/CLR specific, intr steering - 2 bit field */
+	PORT_IRQ_STEER_SHIFT	= 30,
+	PORT_IRQ_STEER_MASK	= (3 << PORT_IRQ_STEER_SHIFT),
+
+	/* PORT_CMD_ERR constants */
+	PORT_CERR_DEV		= 1, /* Error bit in D2H Register FIS */
+	PORT_CERR_SDB		= 2, /* Error bit in SDB FIS */
+	PORT_CERR_DATA		= 3, /* Error in data FIS not detected by dev */
+	PORT_CERR_SEND		= 4, /* Initial cmd FIS transmission failure */
+	PORT_CERR_INCONSISTENT	= 5, /* Protocol mismatch */
+	PORT_CERR_DIRECTION	= 6, /* Data direction mismatch */
+	PORT_CERR_UNDERRUN	= 7, /* Ran out of SGEs while writing */
+	PORT_CERR_OVERRUN	= 8, /* Ran out of SGEs while reading */
+	PORT_CERR_PKT_PROT	= 11, /* DIR invalid in 1st PIO setup of ATAPI */
+	PORT_CERR_SGT_BOUNDARY	= 16, /* PLD ecode 00 - SGT not on qword boundary */
+	PORT_CERR_SGT_TGTABRT	= 17, /* PLD ecode 01 - target abort */
+	PORT_CERR_SGT_MSTABRT	= 18, /* PLD ecode 10 - master abort */
+	PORT_CERR_SGT_PCIPERR	= 19, /* PLD ecode 11 - PCI parity err while fetching SGT */
+	PORT_CERR_CMD_BOUNDARY	= 24, /* ctrl[15:13] 001 - PRB not on qword boundary */
+	PORT_CERR_CMD_TGTABRT	= 25, /* ctrl[15:13] 010 - target abort */
+	PORT_CERR_CMD_MSTABRT	= 26, /* ctrl[15:13] 100 - master abort */
+	PORT_CERR_CMD_PCIPERR	= 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
+	PORT_CERR_XFR_UNDEF	= 32, /* PSD ecode 00 - undefined */
+	PORT_CERR_XFR_TGTABRT	= 33, /* PSD ecode 01 - target abort */
+	PORT_CERR_XFR_MSGABRT	= 34, /* PSD ecode 10 - master abort */
+	PORT_CERR_XFR_PCIPERR	= 35, /* PSD ecode 11 - PCI prity err during transfer */
+	PORT_CERR_SENDSERVICE	= 36, /* FIS received while sending service */
+
+	/*
+	 * Other constants
+	 */
+	SGE_TRM			= (1 << 31), /* Last SGE in chain */
+	PRB_SOFT_RST		= (1 << 7),  /* Soft reset request (ign BSY?) */
+
+	/* board id */
+	BID_SIL3124		= 0,
+	BID_SIL3132		= 1,
+	BID_SIL3131		= 2,
+
+	IRQ_STAT_4PORTS		= 0xf,
+};
+
+struct sil24_cmd_block {
+	struct sil24_prb prb;
+	struct sil24_sge sge[LIBATA_MAX_PRD];
+};
+
+/*
+ * ap->private_data
+ *
+ * The preview driver always returned 0 for status.  We emulate it
+ * here from the previous interrupt.
+ */
+struct sil24_port_priv {
+	struct sil24_cmd_block *cmd_block;	/* 32 cmd blocks */
+	dma_addr_t cmd_block_dma;		/* DMA base addr for them */
+	struct ata_taskfile tf;			/* Cached taskfile registers */
+};
+
+/* ap->host_set->private_data */
+struct sil24_host_priv {
+	void *host_base;	/* global controller control (128 bytes @BAR0) */
+	void *port_base;	/* port registers (4 * 8192 bytes @BAR2) */
+};
+
+static u8 sil24_check_status(struct ata_port *ap);
+static u8 sil24_check_err(struct ata_port *ap);
+static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
+static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val);
+static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
+static void sil24_phy_reset(struct ata_port *ap);
+static void sil24_qc_prep(struct ata_queued_cmd *qc);
+static int sil24_qc_issue(struct ata_queued_cmd *qc);
+static void sil24_irq_clear(struct ata_port *ap);
+static void sil24_eng_timeout(struct ata_port *ap);
+static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static int sil24_port_start(struct ata_port *ap);
+static void sil24_port_stop(struct ata_port *ap);
+static void sil24_host_stop(struct ata_host_set *host_set);
+static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+
+static struct pci_device_id sil24_pci_tbl[] = {
+	{ 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
+	{ 0x1095, 0x3132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3132 },
+	{ 0x1095, 0x3131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
+	{ 0x1095, 0x3531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3131 },
+	{ } /* terminate list */
+};
+
+static struct pci_driver sil24_pci_driver = {
+	.name			= DRV_NAME,
+	.id_table		= sil24_pci_tbl,
+	.probe			= sil24_init_one,
+	.remove			= ata_pci_remove_one, /* safe? */
+};
+
+static Scsi_Host_Template sil24_sht = {
+	.module			= THIS_MODULE,
+	.name			= DRV_NAME,
+	.ioctl			= ata_scsi_ioctl,
+	.queuecommand		= ata_scsi_queuecmd,
+	.eh_strategy_handler	= ata_scsi_error,
+	.can_queue		= ATA_DEF_QUEUE,
+	.this_id		= ATA_SHT_THIS_ID,
+	.sg_tablesize		= LIBATA_MAX_PRD,
+	.max_sectors		= ATA_MAX_SECTORS,
+	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
+	.emulated		= ATA_SHT_EMULATED,
+	.use_clustering		= ATA_SHT_USE_CLUSTERING,
+	.proc_name		= DRV_NAME,
+	.dma_boundary		= ATA_DMA_BOUNDARY,
+	.slave_configure	= ata_scsi_slave_config,
+	.bios_param		= ata_std_bios_param,
+	.ordered_flush		= 1, /* NCQ not supported yet */
+};
+
+static struct ata_port_operations sil24_ops = {
+	.port_disable		= ata_port_disable,
+
+	.check_status		= sil24_check_status,
+	.check_altstatus	= sil24_check_status,
+	.check_err		= sil24_check_err,
+	.dev_select		= ata_noop_dev_select,
+
+	.tf_read		= sil24_tf_read,
+
+	.phy_reset		= sil24_phy_reset,
+
+	.qc_prep		= sil24_qc_prep,
+	.qc_issue		= sil24_qc_issue,
+
+	.eng_timeout		= sil24_eng_timeout,
+
+	.irq_handler		= sil24_interrupt,
+	.irq_clear		= sil24_irq_clear,
+
+	.scr_read		= sil24_scr_read,
+	.scr_write		= sil24_scr_write,
+
+	.port_start		= sil24_port_start,
+	.port_stop		= sil24_port_stop,
+	.host_stop		= sil24_host_stop,
+};
+
+/*
+ * Use bits 30-31 of host_flags to encode available port numbers.
+ * Current maxium is 4.
+ */
+#define SIL24_NPORTS2FLAG(nports)	((((unsigned)(nports) - 1) & 0x3) << 30)
+#define SIL24_FLAG2NPORTS(flag)		((((flag) >> 30) & 0x3) + 1)
+
+static struct ata_port_info sil24_port_info[] = {
+	/* sil_3124 */
+	{
+		.sht		= &sil24_sht,
+		.host_flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
+				  ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(4),
+		.pio_mask	= 0x1f,			/* pio0-4 */
+		.mwdma_mask	= 0x07,			/* mwdma0-2 */
+		.udma_mask	= 0x3f,			/* udma0-5 */
+		.port_ops	= &sil24_ops,
+	},
+	/* sil_3132 */ 
+	{
+		.sht		= &sil24_sht,
+		.host_flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
+				  ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(2),
+		.pio_mask	= 0x1f,			/* pio0-4 */
+		.mwdma_mask	= 0x07,			/* mwdma0-2 */
+		.udma_mask	= 0x3f,			/* udma0-5 */
+		.port_ops	= &sil24_ops,
+	},
+	/* sil_3131/sil_3531 */
+	{
+		.sht		= &sil24_sht,
+		.host_flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+				  ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
+				  ATA_FLAG_PIO_DMA | SIL24_NPORTS2FLAG(1),
+		.pio_mask	= 0x1f,			/* pio0-4 */
+		.mwdma_mask	= 0x07,			/* mwdma0-2 */
+		.udma_mask	= 0x3f,			/* udma0-5 */
+		.port_ops	= &sil24_ops,
+	},
+};
+
+static inline void sil24_update_tf(struct ata_port *ap)
+{
+	struct sil24_port_priv *pp = ap->private_data;
+	void *port = (void *)ap->ioaddr.cmd_addr;
+	struct sil24_prb *prb = port;
+
+	ata_tf_from_fis(prb->fis, &pp->tf);
+}
+
+static u8 sil24_check_status(struct ata_port *ap)
+{
+	struct sil24_port_priv *pp = ap->private_data;
+	return pp->tf.command;
+}
+
+static u8 sil24_check_err(struct ata_port *ap)
+{
+	struct sil24_port_priv *pp = ap->private_data;
+	return pp->tf.feature;
+}
+
+static int sil24_scr_map[] = {
+	[SCR_CONTROL]	= 0,
+	[SCR_STATUS]	= 1,
+	[SCR_ERROR]	= 2,
+	[SCR_ACTIVE]	= 3,
+};
+
+static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg)
+{
+	void *scr_addr = (void *)ap->ioaddr.scr_addr;
+	if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
+		void *addr;
+		addr = scr_addr + sil24_scr_map[sc_reg] * 4;
+		return readl(scr_addr + sil24_scr_map[sc_reg] * 4);
+	}
+	return 0xffffffffU;
+}
+
+static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
+{
+	void *scr_addr = (void *)ap->ioaddr.scr_addr;
+	if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
+		void *addr;
+		addr = scr_addr + sil24_scr_map[sc_reg] * 4;
+		writel(val, scr_addr + sil24_scr_map[sc_reg] * 4);
+	}
+}
+
+static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	struct sil24_port_priv *pp = ap->private_data;
+	*tf = pp->tf;
+}
+
+static void sil24_phy_reset(struct ata_port *ap)
+{
+	__sata_phy_reset(ap);
+	/*
+	 * No ATAPI yet.  Just unconditionally indicate ATA device.
+	 * If ATAPI device is attached, it will fail ATA_CMD_ID_ATA
+	 * and libata core will ignore the device.
+	 */
+	if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
+		ap->device[0].class = ATA_DEV_ATA;
+}
+
+static inline void sil24_fill_sg(struct ata_queued_cmd *qc,
+				 struct sil24_cmd_block *cb)
+{
+	struct scatterlist *sg = qc->sg;
+	struct sil24_sge *sge = cb->sge;
+	unsigned i;
+
+	for (i = 0; i < qc->n_elem; i++, sg++, sge++) {
+		sge->addr = cpu_to_le64(sg_dma_address(sg));
+		sge->cnt = cpu_to_le32(sg_dma_len(sg));
+		sge->flags = 0;
+		sge->flags = i < qc->n_elem - 1 ? 0 : cpu_to_le32(SGE_TRM);
+	}
+}
+
+static void sil24_qc_prep(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct sil24_port_priv *pp = ap->private_data;
+	struct sil24_cmd_block *cb = pp->cmd_block + qc->tag;
+	struct sil24_prb *prb = &cb->prb;
+
+	switch (qc->tf.protocol) {
+	case ATA_PROT_PIO:
+	case ATA_PROT_DMA:
+	case ATA_PROT_NODATA:
+		break;
+	default:
+		/* ATAPI isn't supported yet */
+		BUG();
+	}
+
+	ata_tf_to_fis(&qc->tf, prb->fis, 0);
+
+	if (qc->flags & ATA_QCFLAG_DMAMAP)
+		sil24_fill_sg(qc, cb);
+}
+
+static int sil24_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	void *port = (void *)ap->ioaddr.cmd_addr;
+	struct sil24_port_priv *pp = ap->private_data;
+	dma_addr_t paddr = pp->cmd_block_dma + qc->tag * sizeof(*pp->cmd_block);
+
+	writel((u32)paddr, port + PORT_CMD_ACTIVATE);
+	return 0;
+}
+
+static void sil24_irq_clear(struct ata_port *ap)
+{
+	/* unused */
+}
+
+static int __sil24_reset_controller(void *port)
+{
+	int cnt;
+	u32 tmp;
+
+	/* Reset controller state.  Is this correct? */
+	writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT);
+	readl(port + PORT_CTRL_STAT);	/* sync */
+
+	/* Max ~100ms */
+	for (cnt = 0; cnt < 1000; cnt++) {
+		udelay(100);
+		tmp = readl(port + PORT_CTRL_STAT);
+		if (!(tmp & PORT_CS_DEV_RST))
+			break;
+	}
+
+	if (tmp & PORT_CS_DEV_RST)
+		return -1;
+	return 0;
+}
+
+static void sil24_reset_controller(struct ata_port *ap)
+{
+	printk(KERN_NOTICE DRV_NAME
+	       " ata%u: resetting controller...\n", ap->id);
+	if (__sil24_reset_controller((void *)ap->ioaddr.cmd_addr))
+                printk(KERN_ERR DRV_NAME
+                       " ata%u: failed to reset controller\n", ap->id);
+}
+
+static void sil24_eng_timeout(struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+
+	qc = ata_qc_from_tag(ap, ap->active_tag);
+	if (!qc) {
+		printk(KERN_ERR "ata%u: BUG: tiemout without command\n",
+		       ap->id);
+		return;
+	}
+
+	/*
+	 * hack alert!  We cannot use the supplied completion
+	 * function from inside the ->eh_strategy_handler() thread.
+	 * libata is the only user of ->eh_strategy_handler() in
+	 * any kernel, so the default scsi_done() assumes it is
+	 * not being called from the SCSI EH.
+	 */
+	printk(KERN_ERR "ata%u: command timeout\n", ap->id);
+	qc->scsidone = scsi_finish_command;
+	ata_qc_complete(qc, ATA_ERR);
+
+	sil24_reset_controller(ap);
+}
+
+static void sil24_error_intr(struct ata_port *ap, u32 slot_stat)
+{
+	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
+	struct sil24_port_priv *pp = ap->private_data;
+	void *port = (void *)ap->ioaddr.cmd_addr;
+	u32 irq_stat, cmd_err, sstatus, serror;
+
+	irq_stat = readl(port + PORT_IRQ_STAT);
+	writel(irq_stat, port + PORT_IRQ_STAT);		/* clear irq */
+
+	if (!(irq_stat & PORT_IRQ_ERROR)) {
+		/* ignore non-completion, non-error irqs for now */
+		printk(KERN_WARNING DRV_NAME
+		       "ata%u: non-error exception irq (irq_stat %x)\n",
+		       ap->id, irq_stat);
+		return;
+	}
+
+	cmd_err = readl(port + PORT_CMD_ERR);
+	sstatus = readl(port + PORT_SSTATUS);
+	serror = readl(port + PORT_SERROR);
+	if (serror)
+		writel(serror, port + PORT_SERROR);
+
+	printk(KERN_ERR DRV_NAME " ata%u: error interrupt on port%d\n"
+	       "  stat=0x%x irq=0x%x cmd_err=%d sstatus=0x%x serror=0x%x\n",
+	       ap->id, ap->port_no, slot_stat, irq_stat, cmd_err, sstatus, serror);
+
+	if (cmd_err == PORT_CERR_DEV || cmd_err == PORT_CERR_SDB) {
+		/*
+		 * Device is reporting error, tf registers are valid.
+		 */
+		sil24_update_tf(ap);
+	} else {
+		/*
+		 * Other errors.  libata currently doesn't have any
+		 * mechanism to report these errors.  Just turn on
+		 * ATA_ERR.
+		 */
+		pp->tf.command = ATA_ERR;
+	}
+
+	if (qc)
+		ata_qc_complete(qc, pp->tf.command);
+
+	sil24_reset_controller(ap);
+}
+
+static inline void sil24_host_intr(struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
+	void *port = (void *)ap->ioaddr.cmd_addr;
+	u32 slot_stat;
+
+	slot_stat = readl(port + PORT_SLOT_STAT);
+	if (!(slot_stat & HOST_SSTAT_ATTN)) {
+		struct sil24_port_priv *pp = ap->private_data;
+		/*
+		 * !HOST_SSAT_ATTN guarantees successful completion,
+		 * so reading back tf registers is unnecessary for
+		 * most commands.  TODO: read tf registers for
+		 * commands which require these values on successful
+		 * completion (EXECUTE DEVICE DIAGNOSTIC, CHECK POWER,
+		 * DEVICE RESET and READ PORT MULTIPLIER (any more?).
+		 */
+		sil24_update_tf(ap);
+
+		if (qc)
+			ata_qc_complete(qc, pp->tf.command);
+	} else
+		sil24_error_intr(ap, slot_stat);
+}
+
+static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+{
+	struct ata_host_set *host_set = dev_instance;
+	struct sil24_host_priv *hpriv = host_set->private_data;
+	unsigned handled = 0;
+	u32 status;
+	int i;
+
+	status = readl(hpriv->host_base + HOST_IRQ_STAT);
+
+	if (status == 0xffffffff) {
+		printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, "
+		       "PCI fault or device removal?\n");
+		goto out;
+	}
+
+	if (!(status & IRQ_STAT_4PORTS))
+		goto out;
+
+	spin_lock(&host_set->lock);
+
+	for (i = 0; i < host_set->n_ports; i++)
+		if (status & (1 << i)) {
+			struct ata_port *ap = host_set->ports[i];
+			if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
+				sil24_host_intr(host_set->ports[i]);
+				handled++;
+			} else
+				printk(KERN_ERR DRV_NAME
+				       ": interrupt from disabled port %d\n", i);
+		}
+
+	spin_unlock(&host_set->lock);
+ out:
+	return IRQ_RETVAL(handled);
+}
+
+static int sil24_port_start(struct ata_port *ap)
+{
+	struct device *dev = ap->host_set->dev;
+	struct sil24_port_priv *pp;
+	struct sil24_cmd_block *cb;
+	size_t cb_size = sizeof(*cb);
+	dma_addr_t cb_dma;
+
+	pp = kmalloc(sizeof(*pp), GFP_KERNEL);
+	if (!pp)
+		return -ENOMEM;
+	memset(pp, 0, sizeof(*pp));
+
+	pp->tf.command = ATA_DRDY;
+
+	cb = dma_alloc_coherent(dev, cb_size, &cb_dma, GFP_KERNEL);
+	if (!cb) {
+		kfree(pp);
+		return -ENOMEM;
+	}
+	memset(cb, 0, cb_size);
+
+	pp->cmd_block = cb;
+	pp->cmd_block_dma = cb_dma;
+
+	ap->private_data = pp;
+
+	return 0;
+}
+
+static void sil24_port_stop(struct ata_port *ap)
+{
+	struct device *dev = ap->host_set->dev;
+	struct sil24_port_priv *pp = ap->private_data;
+	size_t cb_size = sizeof(*pp->cmd_block);
+
+	dma_free_coherent(dev, cb_size, pp->cmd_block, pp->cmd_block_dma);
+	kfree(pp);
+}
+
+static void sil24_host_stop(struct ata_host_set *host_set)
+{
+	struct sil24_host_priv *hpriv = host_set->private_data;
+
+	iounmap(hpriv->host_base);
+	iounmap(hpriv->port_base);
+	kfree(hpriv);
+}
+
+static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int printed_version = 0;
+	unsigned int board_id = (unsigned int)ent->driver_data;
+	struct ata_port_info *pinfo = &sil24_port_info[board_id];
+	struct ata_probe_ent *probe_ent = NULL;
+	struct sil24_host_priv *hpriv = NULL;
+	void *host_base = NULL, *port_base = NULL;
+	int i, rc;
+
+	if (!printed_version++)
+		printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
+
+	rc = pci_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	rc = pci_request_regions(pdev, DRV_NAME);
+	if (rc)
+		goto out_disable;
+
+	rc = -ENOMEM;
+	/* ioremap mmio registers */
+	host_base = ioremap(pci_resource_start(pdev, 0),
+			    pci_resource_len(pdev, 0));
+	if (!host_base)
+		goto out_free;
+	port_base = ioremap(pci_resource_start(pdev, 2),
+			    pci_resource_len(pdev, 2));
+	if (!port_base)
+		goto out_free;
+
+	/* allocate & init probe_ent and hpriv */
+	probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
+	if (!probe_ent)
+		goto out_free;
+
+	hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
+	if (!hpriv)
+		goto out_free;
+
+	memset(probe_ent, 0, sizeof(*probe_ent));
+	probe_ent->dev = pci_dev_to_dev(pdev);
+	INIT_LIST_HEAD(&probe_ent->node);
+
+	probe_ent->sht		= pinfo->sht;
+	probe_ent->host_flags	= pinfo->host_flags;
+	probe_ent->pio_mask	= pinfo->pio_mask;
+	probe_ent->udma_mask	= pinfo->udma_mask;
+	probe_ent->port_ops	= pinfo->port_ops;
+	probe_ent->n_ports	= SIL24_FLAG2NPORTS(pinfo->host_flags);
+
+	probe_ent->irq = pdev->irq;
+	probe_ent->irq_flags = SA_SHIRQ;
+	probe_ent->mmio_base = port_base;
+	probe_ent->private_data = hpriv;
+
+	memset(hpriv, 0, sizeof(*hpriv));
+	hpriv->host_base = host_base;
+	hpriv->port_base = port_base;
+
+	/*
+	 * Configure the device
+	 */
+	/*
+	 * FIXME: This device is certainly 64-bit capable.  We just
+	 * don't know how to use it.  After fixing 32bit activation in
+	 * this function, enable 64bit masks here.
+	 */
+	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+	if (rc) {
+		printk(KERN_ERR DRV_NAME "(%s): 32-bit DMA enable failed\n",
+		       pci_name(pdev));
+		goto out_free;
+	}
+	rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+	if (rc) {
+		printk(KERN_ERR DRV_NAME "(%s): 32-bit consistent DMA enable failed\n",
+		       pci_name(pdev));
+		goto out_free;
+	}
+
+	/* GPIO off */
+	writel(0, host_base + HOST_FLASH_CMD);
+
+	/* Mask interrupts during initialization */
+	writel(0, host_base + HOST_CTRL);
+
+	for (i = 0; i < probe_ent->n_ports; i++) {
+		void *port = port_base + i * PORT_REGS_SIZE;
+		unsigned long portu = (unsigned long)port;
+		u32 tmp;
+		int cnt;
+
+		probe_ent->port[i].cmd_addr = portu + PORT_PRB;
+		probe_ent->port[i].scr_addr = portu + PORT_SCONTROL;
+
+		ata_std_ports(&probe_ent->port[i]);
+
+		/* Initial PHY setting */
+		writel(0x20c, port + PORT_PHY_CFG);
+
+		/* Clear port RST */
+		tmp = readl(port + PORT_CTRL_STAT);
+		if (tmp & PORT_CS_PORT_RST) {
+			writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR);
+			readl(port + PORT_CTRL_STAT);	/* sync */
+			for (cnt = 0; cnt < 10; cnt++) {
+				msleep(10);
+				tmp = readl(port + PORT_CTRL_STAT);
+				if (!(tmp & PORT_CS_PORT_RST))
+					break;
+			}
+			if (tmp & PORT_CS_PORT_RST)
+				printk(KERN_ERR DRV_NAME
+				       "(%s): failed to clear port RST\n",
+				       pci_name(pdev));
+		}
+
+		/* Zero error counters. */
+		writel(0x8000, port + PORT_DECODE_ERR_THRESH);
+		writel(0x8000, port + PORT_CRC_ERR_THRESH);
+		writel(0x8000, port + PORT_HSHK_ERR_THRESH);
+		writel(0x0000, port + PORT_DECODE_ERR_CNT);
+		writel(0x0000, port + PORT_CRC_ERR_CNT);
+		writel(0x0000, port + PORT_HSHK_ERR_CNT);
+
+		/* FIXME: 32bit activation? */
+		writel(0, port + PORT_ACTIVATE_UPPER_ADDR);
+		writel(PORT_CS_32BIT_ACTV, port + PORT_CTRL_STAT);
+
+		/* Configure interrupts */
+		writel(0xffff, port + PORT_IRQ_ENABLE_CLR);
+		writel(PORT_IRQ_COMPLETE | PORT_IRQ_ERROR | PORT_IRQ_SDB_FIS,
+		       port + PORT_IRQ_ENABLE_SET);
+
+		/* Clear interrupts */
+		writel(0x0fff0fff, port + PORT_IRQ_STAT);
+		writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
+
+		/* Clear port multiplier enable and resume bits */
+		writel(PORT_CS_PM_EN | PORT_CS_RESUME, port + PORT_CTRL_CLR);
+
+		/* Reset itself */
+		if (__sil24_reset_controller(port))
+			printk(KERN_ERR DRV_NAME
+			       "(%s): failed to reset controller\n",
+			       pci_name(pdev));
+	}
+
+	/* Turn on interrupts */
+	writel(IRQ_STAT_4PORTS, host_base + HOST_CTRL);
+
+	pci_set_master(pdev);
+
+	/* FIXME: check ata_device_add return value */
+	ata_device_add(probe_ent);
+
+	kfree(probe_ent);
+	return 0;
+
+ out_free:
+	if (host_base)
+		iounmap(host_base);
+	if (port_base)
+		iounmap(port_base);
+	kfree(probe_ent);
+	kfree(hpriv);
+	pci_release_regions(pdev);
+ out_disable:
+	pci_disable_device(pdev);
+	return rc;
+}
+
+static int __init sil24_init(void)
+{
+	return pci_module_init(&sil24_pci_driver);
+}
+
+static void __exit sil24_exit(void)
+{
+	pci_unregister_driver(&sil24_pci_driver);
+}
+
+MODULE_AUTHOR("Tejun Heo");
+MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, sil24_pci_tbl);
+
+module_init(sil24_init);
+module_exit(sil24_exit);

+ 1 - 1
drivers/scsi/sata_sis.c

@@ -263,7 +263,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto err_out_regions;
 		goto err_out_regions;
 
 
 	ppi = &sis_port_info;
 	ppi = &sis_port_info;
-	probe_ent = ata_pci_init_native_mode(pdev, &ppi);
+	probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
 	if (!probe_ent) {
 	if (!probe_ent) {
 		rc = -ENOMEM;
 		rc = -ENOMEM;
 		goto err_out_regions;
 		goto err_out_regions;

+ 1 - 1
drivers/scsi/sata_uli.c

@@ -202,7 +202,7 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto err_out_regions;
 		goto err_out_regions;
 
 
 	ppi = &uli_port_info;
 	ppi = &uli_port_info;
-	probe_ent = ata_pci_init_native_mode(pdev, &ppi);
+	probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
 	if (!probe_ent) {
 	if (!probe_ent) {
 		rc = -ENOMEM;
 		rc = -ENOMEM;
 		goto err_out_regions;
 		goto err_out_regions;

+ 1 - 1
drivers/scsi/sata_via.c

@@ -212,7 +212,7 @@ static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
 	struct ata_probe_ent *probe_ent;
 	struct ata_probe_ent *probe_ent;
 	struct ata_port_info *ppi = &svia_port_info;
 	struct ata_port_info *ppi = &svia_port_info;
 
 
-	probe_ent = ata_pci_init_native_mode(pdev, &ppi);
+	probe_ent = ata_pci_init_native_mode(pdev, &ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
 	if (!probe_ent)
 	if (!probe_ent)
 		return NULL;
 		return NULL;
 
 

+ 0 - 4
drivers/serial/sunsu.c

@@ -518,11 +518,7 @@ static void sunsu_change_mouse_baud(struct uart_sunsu_port *up)
 
 
 	quot = up->port.uartclk / (16 * new_baud);
 	quot = up->port.uartclk / (16 * new_baud);
 
 
-	spin_unlock(&up->port.lock);
-
 	sunsu_change_speed(&up->port, up->cflag, 0, quot);
 	sunsu_change_speed(&up->port, up->cflag, 0, quot);
-
-	spin_lock(&up->port.lock);
 }
 }
 
 
 static void receive_kbd_ms_chars(struct uart_sunsu_port *up, struct pt_regs *regs, int is_break)
 static void receive_kbd_ms_chars(struct uart_sunsu_port *up, struct pt_regs *regs, int is_break)

+ 1 - 1
fs/bfs/dir.c

@@ -108,7 +108,7 @@ static int bfs_create(struct inode * dir, struct dentry * dentry, int mode,
 	inode->i_mapping->a_ops = &bfs_aops;
 	inode->i_mapping->a_ops = &bfs_aops;
 	inode->i_mode = mode;
 	inode->i_mode = mode;
 	inode->i_ino = ino;
 	inode->i_ino = ino;
-	BFS_I(inode)->i_dsk_ino = cpu_to_le16(ino);
+	BFS_I(inode)->i_dsk_ino = ino;
 	BFS_I(inode)->i_sblock = 0;
 	BFS_I(inode)->i_sblock = 0;
 	BFS_I(inode)->i_eblock = 0;
 	BFS_I(inode)->i_eblock = 0;
 	insert_inode_hash(inode);
 	insert_inode_hash(inode);

+ 31 - 13
fs/bfs/inode.c

@@ -357,28 +357,46 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
 	}
 	}
 
 
 	info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1)>>BFS_BSIZE_BITS; /* for statfs(2) */
 	info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1)>>BFS_BSIZE_BITS; /* for statfs(2) */
-	info->si_freeb = (le32_to_cpu(bfs_sb->s_end) + 1 -  cpu_to_le32(bfs_sb->s_start))>>BFS_BSIZE_BITS;
+	info->si_freeb = (le32_to_cpu(bfs_sb->s_end) + 1 -  le32_to_cpu(bfs_sb->s_start))>>BFS_BSIZE_BITS;
 	info->si_freei = 0;
 	info->si_freei = 0;
 	info->si_lf_eblk = 0;
 	info->si_lf_eblk = 0;
 	info->si_lf_sblk = 0;
 	info->si_lf_sblk = 0;
 	info->si_lf_ioff = 0;
 	info->si_lf_ioff = 0;
+	bh = NULL;
 	for (i=BFS_ROOT_INO; i<=info->si_lasti; i++) {
 	for (i=BFS_ROOT_INO; i<=info->si_lasti; i++) {
-		inode = iget(s,i);
-		if (BFS_I(inode)->i_dsk_ino == 0)
+		struct bfs_inode *di;
+		int block = (i - BFS_ROOT_INO)/BFS_INODES_PER_BLOCK + 1;
+		int off = (i - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK;
+		unsigned long sblock, eblock;
+
+		if (!off) {
+			brelse(bh);
+			bh = sb_bread(s, block);
+		}
+
+		if (!bh)
+			continue;
+
+		di = (struct bfs_inode *)bh->b_data + off;
+
+		if (!di->i_ino) {
 			info->si_freei++;
 			info->si_freei++;
-		else {
-			set_bit(i, info->si_imap);
-			info->si_freeb -= inode->i_blocks;
-			if (BFS_I(inode)->i_eblock > info->si_lf_eblk) {
-				info->si_lf_eblk = BFS_I(inode)->i_eblock;
-				info->si_lf_sblk = BFS_I(inode)->i_sblock;
-				info->si_lf_ioff = BFS_INO2OFF(i);
-			}
+			continue;
+		}
+		set_bit(i, info->si_imap);
+		info->si_freeb -= BFS_FILEBLOCKS(di);
+
+		sblock =  le32_to_cpu(di->i_sblock);
+		eblock =  le32_to_cpu(di->i_eblock);
+		if (eblock > info->si_lf_eblk) {
+			info->si_lf_eblk = eblock;
+			info->si_lf_sblk = sblock;
+			info->si_lf_ioff = BFS_INO2OFF(i);
 		}
 		}
-		iput(inode);
 	}
 	}
+	brelse(bh);
 	if (!(s->s_flags & MS_RDONLY)) {
 	if (!(s->s_flags & MS_RDONLY)) {
-		mark_buffer_dirty(bh);
+		mark_buffer_dirty(info->si_sbh);
 		s->s_dirt = 1;
 		s->s_dirt = 1;
 	} 
 	} 
 	dump_imap("read_super", s);
 	dump_imap("read_super", s);

+ 3 - 3
fs/namei.c

@@ -1551,19 +1551,19 @@ do_link:
 	if (nd->last_type != LAST_NORM)
 	if (nd->last_type != LAST_NORM)
 		goto exit;
 		goto exit;
 	if (nd->last.name[nd->last.len]) {
 	if (nd->last.name[nd->last.len]) {
-		putname(nd->last.name);
+		__putname(nd->last.name);
 		goto exit;
 		goto exit;
 	}
 	}
 	error = -ELOOP;
 	error = -ELOOP;
 	if (count++==32) {
 	if (count++==32) {
-		putname(nd->last.name);
+		__putname(nd->last.name);
 		goto exit;
 		goto exit;
 	}
 	}
 	dir = nd->dentry;
 	dir = nd->dentry;
 	down(&dir->d_inode->i_sem);
 	down(&dir->d_inode->i_sem);
 	path.dentry = __lookup_hash(&nd->last, nd->dentry, nd);
 	path.dentry = __lookup_hash(&nd->last, nd->dentry, nd);
 	path.mnt = nd->mnt;
 	path.mnt = nd->mnt;
-	putname(nd->last.name);
+	__putname(nd->last.name);
 	goto do_last;
 	goto do_last;
 }
 }
 
 

+ 3 - 0
fs/ntfs/ChangeLog

@@ -102,6 +102,9 @@ ToDo/Notes:
 	  inode instead of a vfs inode as parameter.
 	  inode instead of a vfs inode as parameter.
 	- Fix the definition of the CHKD ntfs record magic.  It had an off by
 	- Fix the definition of the CHKD ntfs record magic.  It had an off by
 	  two error causing it to be CHKB instead of CHKD.
 	  two error causing it to be CHKB instead of CHKD.
+	- Fix a stupid bug in __ntfs_bitmap_set_bits_in_run() which caused the
+	  count to become negative and hence we had a wild memset() scribbling
+	  all over the system's ram.
 
 
 2.1.23 - Implement extension of resident files and make writing safe as well as
 2.1.23 - Implement extension of resident files and make writing safe as well as
 	 many bug fixes, cleanups, and enhancements...
 	 many bug fixes, cleanups, and enhancements...

+ 3 - 2
fs/ntfs/bitmap.c

@@ -1,7 +1,7 @@
 /*
 /*
  * bitmap.c - NTFS kernel bitmap handling.  Part of the Linux-NTFS project.
  * bitmap.c - NTFS kernel bitmap handling.  Part of the Linux-NTFS project.
  *
  *
- * Copyright (c) 2004 Anton Altaparmakov
+ * Copyright (c) 2004-2005 Anton Altaparmakov
  *
  *
  * This program/include file is free software; you can redistribute it and/or
  * This program/include file is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as published
  * modify it under the terms of the GNU General Public License as published
@@ -90,7 +90,8 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
 	/* If the first byte is partial, modify the appropriate bits in it. */
 	/* If the first byte is partial, modify the appropriate bits in it. */
 	if (bit) {
 	if (bit) {
 		u8 *byte = kaddr + pos;
 		u8 *byte = kaddr + pos;
-		while ((bit & 7) && cnt--) {
+		while ((bit & 7) && cnt) {
+			cnt--;
 			if (value)
 			if (value)
 				*byte |= 1 << bit++;
 				*byte |= 1 << bit++;
 			else
 			else

+ 1 - 1
fs/ntfs/layout.h

@@ -309,7 +309,7 @@ typedef le16 MFT_RECORD_FLAGS;
  * Note: The _LE versions will return a CPU endian formatted value!
  * Note: The _LE versions will return a CPU endian formatted value!
  */
  */
 #define MFT_REF_MASK_CPU 0x0000ffffffffffffULL
 #define MFT_REF_MASK_CPU 0x0000ffffffffffffULL
-#define MFT_REF_MASK_LE const_cpu_to_le64(0x0000ffffffffffffULL)
+#define MFT_REF_MASK_LE const_cpu_to_le64(MFT_REF_MASK_CPU)
 
 
 typedef u64 MFT_REF;
 typedef u64 MFT_REF;
 typedef le64 leMFT_REF;
 typedef le64 leMFT_REF;

+ 2 - 1
fs/ntfs/mft.c

@@ -58,7 +58,8 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni)
 	 * overflowing the unsigned long, but I don't think we would ever get
 	 * overflowing the unsigned long, but I don't think we would ever get
 	 * here if the volume was that big...
 	 * here if the volume was that big...
 	 */
 	 */
-	index = ni->mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT;
+	index = (u64)ni->mft_no << vol->mft_record_size_bits >>
+			PAGE_CACHE_SHIFT;
 	ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
 	ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
 
 
 	i_size = i_size_read(mft_vi);
 	i_size = i_size_read(mft_vi);

+ 1 - 1
fs/ntfs/unistr.c

@@ -1,7 +1,7 @@
 /*
 /*
  * unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project.
  * unistr.c - NTFS Unicode string handling. Part of the Linux-NTFS project.
  *
  *
- * Copyright (c) 2001-2004 Anton Altaparmakov
+ * Copyright (c) 2001-2005 Anton Altaparmakov
  *
  *
  * This program/include file is free software; you can redistribute it and/or
  * This program/include file is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as published
  * modify it under the terms of the GNU General Public License as published

+ 5 - 3
include/asm-arm/arch-h720x/system.h

@@ -17,9 +17,11 @@
 static void arch_idle(void)
 static void arch_idle(void)
 {
 {
 	CPU_REG (PMU_BASE, PMU_MODE) = PMU_MODE_IDLE;
 	CPU_REG (PMU_BASE, PMU_MODE) = PMU_MODE_IDLE;
-	__asm__ __volatile__(
-	"mov	r0, r0\n\t"
-	"mov	r0, r0");
+	nop();
+	nop();
+	CPU_REG (PMU_BASE, PMU_MODE) = PMU_MODE_RUN;
+	nop();
+	nop();
 }
 }
 
 
 
 

+ 34 - 12
include/asm-arm/arch-imx/imx-regs.h

@@ -76,6 +76,7 @@
 #define GPIO_PIN_MASK 0x1f
 #define GPIO_PIN_MASK 0x1f
 #define GPIO_PORT_MASK (0x3 << 5)
 #define GPIO_PORT_MASK (0x3 << 5)
 
 
+#define GPIO_PORT_SHIFT 5
 #define GPIO_PORTA (0<<5)
 #define GPIO_PORTA (0<<5)
 #define GPIO_PORTB (1<<5)
 #define GPIO_PORTB (1<<5)
 #define GPIO_PORTC (2<<5)
 #define GPIO_PORTC (2<<5)
@@ -88,24 +89,37 @@
 #define GPIO_PF    (0<<9)
 #define GPIO_PF    (0<<9)
 #define GPIO_AF    (1<<9)
 #define GPIO_AF    (1<<9)
 
 
+#define GPIO_OCR_SHIFT 10
 #define GPIO_OCR_MASK (3<<10)
 #define GPIO_OCR_MASK (3<<10)
 #define GPIO_AIN   (0<<10)
 #define GPIO_AIN   (0<<10)
 #define GPIO_BIN   (1<<10)
 #define GPIO_BIN   (1<<10)
 #define GPIO_CIN   (2<<10)
 #define GPIO_CIN   (2<<10)
-#define GPIO_GPIO  (3<<10)
+#define GPIO_DR    (3<<10)
 
 
-#define GPIO_AOUT  (1<<12)
-#define GPIO_BOUT  (1<<13)
+#define GPIO_AOUT_SHIFT 12
+#define GPIO_AOUT_MASK (3<<12)
+#define GPIO_AOUT     (0<<12)
+#define GPIO_AOUT_ISR (1<<12)
+#define GPIO_AOUT_0   (2<<12)
+#define GPIO_AOUT_1   (3<<12)
+
+#define GPIO_BOUT_SHIFT 14
+#define GPIO_BOUT_MASK (3<<14)
+#define GPIO_BOUT      (0<<14)
+#define GPIO_BOUT_ISR  (1<<14)
+#define GPIO_BOUT_0    (2<<14)
+#define GPIO_BOUT_1    (3<<14)
+
+#define GPIO_GIUS      (1<<16)
 
 
 /* assignements for GPIO alternate/primary functions */
 /* assignements for GPIO alternate/primary functions */
 
 
 /* FIXME: This list is not completed. The correct directions are
 /* FIXME: This list is not completed. The correct directions are
  * missing on some (many) pins
  * missing on some (many) pins
  */
  */
-#define PA0_PF_A24           ( GPIO_PORTA | GPIO_PF | 0 )
-#define PA0_AIN_SPI2_CLK     ( GPIO_PORTA | GPIO_OUT | GPIO_AIN | 0 )
+#define PA0_AIN_SPI2_CLK     ( GPIO_GIUS | GPIO_PORTA | GPIO_OUT | 0 )
 #define PA0_AF_ETMTRACESYNC  ( GPIO_PORTA | GPIO_AF | 0 )
 #define PA0_AF_ETMTRACESYNC  ( GPIO_PORTA | GPIO_AF | 0 )
-#define PA1_AOUT_SPI2_RXD    ( GPIO_PORTA | GPIO_IN | GPIO_AOUT | 1 )
+#define PA1_AOUT_SPI2_RXD    ( GPIO_GIUS | GPIO_PORTA | GPIO_IN | 1 )
 #define PA1_PF_TIN           ( GPIO_PORTA | GPIO_PF | 1 )
 #define PA1_PF_TIN           ( GPIO_PORTA | GPIO_PF | 1 )
 #define PA2_PF_PWM0          ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 2 )
 #define PA2_PF_PWM0          ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 2 )
 #define PA3_PF_CSI_MCLK      ( GPIO_PORTA | GPIO_PF | 3 )
 #define PA3_PF_CSI_MCLK      ( GPIO_PORTA | GPIO_PF | 3 )
@@ -123,7 +137,7 @@
 #define PA15_PF_I2C_SDA      ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 15 )
 #define PA15_PF_I2C_SDA      ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 15 )
 #define PA16_PF_I2C_SCL      ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 16 )
 #define PA16_PF_I2C_SCL      ( GPIO_PORTA | GPIO_OUT | GPIO_PF | 16 )
 #define PA17_AF_ETMTRACEPKT4 ( GPIO_PORTA | GPIO_AF | 17 )
 #define PA17_AF_ETMTRACEPKT4 ( GPIO_PORTA | GPIO_AF | 17 )
-#define PA17_AIN_SPI2_SS     ( GPIO_PORTA | GPIO_AIN | 17 )
+#define PA17_AIN_SPI2_SS     ( GPIO_GIUS | GPIO_PORTA | GPIO_OUT | 17 )
 #define PA18_AF_ETMTRACEPKT5 ( GPIO_PORTA | GPIO_AF | 18 )
 #define PA18_AF_ETMTRACEPKT5 ( GPIO_PORTA | GPIO_AF | 18 )
 #define PA19_AF_ETMTRACEPKT6 ( GPIO_PORTA | GPIO_AF | 19 )
 #define PA19_AF_ETMTRACEPKT6 ( GPIO_PORTA | GPIO_AF | 19 )
 #define PA20_AF_ETMTRACEPKT7 ( GPIO_PORTA | GPIO_AF | 20 )
 #define PA20_AF_ETMTRACEPKT7 ( GPIO_PORTA | GPIO_AF | 20 )
@@ -191,19 +205,27 @@
 #define PC15_PF_SPI1_SS      ( GPIO_PORTC | GPIO_PF | 15 )
 #define PC15_PF_SPI1_SS      ( GPIO_PORTC | GPIO_PF | 15 )
 #define PC16_PF_SPI1_MISO    ( GPIO_PORTC | GPIO_PF | 16 )
 #define PC16_PF_SPI1_MISO    ( GPIO_PORTC | GPIO_PF | 16 )
 #define PC17_PF_SPI1_MOSI    ( GPIO_PORTC | GPIO_PF | 17 )
 #define PC17_PF_SPI1_MOSI    ( GPIO_PORTC | GPIO_PF | 17 )
+#define PC24_BIN_UART3_RI    ( GPIO_GIUS | GPIO_PORTC | GPIO_OUT | GPIO_BIN | 24 )
+#define PC25_BIN_UART3_DSR   ( GPIO_GIUS | GPIO_PORTC | GPIO_OUT | GPIO_BIN | 25 )
+#define PC26_AOUT_UART3_DTR  ( GPIO_GIUS | GPIO_PORTC | GPIO_IN | 26 )
+#define PC27_BIN_UART3_DCD   ( GPIO_GIUS | GPIO_PORTC | GPIO_OUT | GPIO_BIN | 27 )
+#define PC28_BIN_UART3_CTS   ( GPIO_GIUS | GPIO_PORTC | GPIO_OUT | GPIO_BIN | 28 )
+#define PC29_AOUT_UART3_RTS  ( GPIO_GIUS | GPIO_PORTC | GPIO_IN | 29 )
+#define PC30_BIN_UART3_TX    ( GPIO_GIUS | GPIO_PORTC | GPIO_BIN | 30 )
+#define PC31_AOUT_UART3_RX   ( GPIO_GIUS | GPIO_PORTC | GPIO_IN | 31)
 #define PD6_PF_LSCLK         ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 6 )
 #define PD6_PF_LSCLK         ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 6 )
 #define PD7_PF_REV           ( GPIO_PORTD | GPIO_PF | 7 )
 #define PD7_PF_REV           ( GPIO_PORTD | GPIO_PF | 7 )
 #define PD7_AF_UART2_DTR     ( GPIO_PORTD | GPIO_IN | GPIO_AF | 7 )
 #define PD7_AF_UART2_DTR     ( GPIO_PORTD | GPIO_IN | GPIO_AF | 7 )
-#define PD7_AIN_SPI2_SCLK    ( GPIO_PORTD | GPIO_AIN | 7 )
+#define PD7_AIN_SPI2_SCLK    ( GPIO_GIUS | GPIO_PORTD | GPIO_AIN | 7 )
 #define PD8_PF_CLS           ( GPIO_PORTD | GPIO_PF | 8 )
 #define PD8_PF_CLS           ( GPIO_PORTD | GPIO_PF | 8 )
 #define PD8_AF_UART2_DCD     ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 8 )
 #define PD8_AF_UART2_DCD     ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 8 )
-#define PD8_AIN_SPI2_SS      ( GPIO_PORTD | GPIO_AIN | 8 )
+#define PD8_AIN_SPI2_SS      ( GPIO_GIUS | GPIO_PORTD | GPIO_AIN | 8 )
 #define PD9_PF_PS            ( GPIO_PORTD | GPIO_PF | 9 )
 #define PD9_PF_PS            ( GPIO_PORTD | GPIO_PF | 9 )
 #define PD9_AF_UART2_RI      ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 9 )
 #define PD9_AF_UART2_RI      ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 9 )
-#define PD9_AOUT_SPI2_RXD    ( GPIO_PORTD | GPIO_IN | GPIO_AOUT | 9 )
+#define PD9_AOUT_SPI2_RXD    ( GPIO_GIUS | GPIO_PORTD | GPIO_IN | 9 )
 #define PD10_PF_SPL_SPR      ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 10 )
 #define PD10_PF_SPL_SPR      ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 10 )
 #define PD10_AF_UART2_DSR    ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 10 )
 #define PD10_AF_UART2_DSR    ( GPIO_PORTD | GPIO_OUT | GPIO_AF | 10 )
-#define PD10_AIN_SPI2_TXD    ( GPIO_PORTD | GPIO_OUT | GPIO_AIN | 10 )
+#define PD10_AIN_SPI2_TXD    ( GPIO_GIUS | GPIO_PORTD | GPIO_OUT | 10 )
 #define PD11_PF_CONTRAST     ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 11 )
 #define PD11_PF_CONTRAST     ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 11 )
 #define PD12_PF_ACD_OE       ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 12 )
 #define PD12_PF_ACD_OE       ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 12 )
 #define PD13_PF_LP_HSYNC     ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 13 )
 #define PD13_PF_LP_HSYNC     ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 13 )
@@ -225,7 +247,7 @@
 #define PD29_PF_LD14         ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 29 )
 #define PD29_PF_LD14         ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 29 )
 #define PD30_PF_LD15         ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 30 )
 #define PD30_PF_LD15         ( GPIO_PORTD | GPIO_OUT | GPIO_PF | 30 )
 #define PD31_PF_TMR2OUT      ( GPIO_PORTD | GPIO_PF | 31 )
 #define PD31_PF_TMR2OUT      ( GPIO_PORTD | GPIO_PF | 31 )
-#define PD31_BIN_SPI2_TXD    ( GPIO_PORTD | GPIO_BIN | 31 )
+#define PD31_BIN_SPI2_TXD    ( GPIO_GIUS | GPIO_PORTD | GPIO_BIN | 31 )
 
 
 /*
 /*
  * PWM controller
  * PWM controller

+ 1 - 1
include/asm-arm/arch-ixp4xx/platform.h

@@ -93,7 +93,7 @@ extern struct pci_bus *ixp4xx_scan_bus(int nr, struct pci_sys_data *sys);
 
 
 static inline void gpio_line_config(u8 line, u32 direction)
 static inline void gpio_line_config(u8 line, u32 direction)
 {
 {
-	if (direction == IXP4XX_GPIO_OUT)
+	if (direction == IXP4XX_GPIO_IN)
 		*IXP4XX_GPIO_GPOER |= (1 << line);
 		*IXP4XX_GPIO_GPOER |= (1 << line);
 	else
 	else
 		*IXP4XX_GPIO_GPOER &= ~(1 << line);
 		*IXP4XX_GPIO_GPOER &= ~(1 << line);

+ 12 - 12
include/asm-sparc/btfixup.h

@@ -49,17 +49,17 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
 /* Put bottom 13bits into some register variable */
 /* Put bottom 13bits into some register variable */
 
 
 #define BTFIXUPDEF_SIMM13(__name)							\
 #define BTFIXUPDEF_SIMM13(__name)							\
-	extern unsigned int ___sf_##__name(void) __attribute_const__;		\
+	static inline unsigned int ___sf_##__name(void) __attribute_const__;		\
 	extern unsigned ___ss_##__name[2];						\
 	extern unsigned ___ss_##__name[2];						\
-	extern __inline__ unsigned int ___sf_##__name(void) {				\
+	static inline unsigned int ___sf_##__name(void) {				\
 		unsigned int ret;							\
 		unsigned int ret;							\
 		__asm__ ("or %%g0, ___s_" #__name ", %0" : "=r"(ret));			\
 		__asm__ ("or %%g0, ___s_" #__name ", %0" : "=r"(ret));			\
 		return ret;								\
 		return ret;								\
 	}
 	}
 #define BTFIXUPDEF_SIMM13_INIT(__name,__val)						\
 #define BTFIXUPDEF_SIMM13_INIT(__name,__val)						\
-	extern unsigned int ___sf_##__name(void) __attribute_const__;		\
+	static inline unsigned int ___sf_##__name(void) __attribute_const__;		\
 	extern unsigned ___ss_##__name[2];						\
 	extern unsigned ___ss_##__name[2];						\
-	extern __inline__ unsigned int ___sf_##__name(void) {				\
+	static inline unsigned int ___sf_##__name(void) {				\
 		unsigned int ret;							\
 		unsigned int ret;							\
 		__asm__ ("or %%g0, ___s_" #__name "__btset_" #__val ", %0" : "=r"(ret));\
 		__asm__ ("or %%g0, ___s_" #__name "__btset_" #__val ", %0" : "=r"(ret));\
 		return ret;								\
 		return ret;								\
@@ -71,17 +71,17 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
  */
  */
 
 
 #define BTFIXUPDEF_HALF(__name)								\
 #define BTFIXUPDEF_HALF(__name)								\
-	extern unsigned int ___af_##__name(void) __attribute_const__;		\
+	static inline unsigned int ___af_##__name(void) __attribute_const__;		\
 	extern unsigned ___as_##__name[2];						\
 	extern unsigned ___as_##__name[2];						\
-	extern __inline__ unsigned int ___af_##__name(void) {				\
+	static inline unsigned int ___af_##__name(void) {				\
 		unsigned int ret;							\
 		unsigned int ret;							\
 		__asm__ ("or %%g0, ___a_" #__name ", %0" : "=r"(ret));			\
 		__asm__ ("or %%g0, ___a_" #__name ", %0" : "=r"(ret));			\
 		return ret;								\
 		return ret;								\
 	}
 	}
 #define BTFIXUPDEF_HALF_INIT(__name,__val)						\
 #define BTFIXUPDEF_HALF_INIT(__name,__val)						\
-	extern unsigned int ___af_##__name(void) __attribute_const__;		\
+	static inline unsigned int ___af_##__name(void) __attribute_const__;		\
 	extern unsigned ___as_##__name[2];						\
 	extern unsigned ___as_##__name[2];						\
-	extern __inline__ unsigned int ___af_##__name(void) {				\
+	static inline unsigned int ___af_##__name(void) {				\
 		unsigned int ret;							\
 		unsigned int ret;							\
 		__asm__ ("or %%g0, ___a_" #__name "__btset_" #__val ", %0" : "=r"(ret));\
 		__asm__ ("or %%g0, ___a_" #__name "__btset_" #__val ", %0" : "=r"(ret));\
 		return ret;								\
 		return ret;								\
@@ -90,17 +90,17 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
 /* Put upper 22 bits into some register variable */
 /* Put upper 22 bits into some register variable */
 
 
 #define BTFIXUPDEF_SETHI(__name)							\
 #define BTFIXUPDEF_SETHI(__name)							\
-	extern unsigned int ___hf_##__name(void) __attribute_const__;		\
+	static inline unsigned int ___hf_##__name(void) __attribute_const__;		\
 	extern unsigned ___hs_##__name[2];						\
 	extern unsigned ___hs_##__name[2];						\
-	extern __inline__ unsigned int ___hf_##__name(void) {				\
+	static inline unsigned int ___hf_##__name(void) {				\
 		unsigned int ret;							\
 		unsigned int ret;							\
 		__asm__ ("sethi %%hi(___h_" #__name "), %0" : "=r"(ret));		\
 		__asm__ ("sethi %%hi(___h_" #__name "), %0" : "=r"(ret));		\
 		return ret;								\
 		return ret;								\
 	}
 	}
 #define BTFIXUPDEF_SETHI_INIT(__name,__val)						\
 #define BTFIXUPDEF_SETHI_INIT(__name,__val)						\
-	extern unsigned int ___hf_##__name(void) __attribute_const__;		\
+	static inline unsigned int ___hf_##__name(void) __attribute_const__;		\
 	extern unsigned ___hs_##__name[2];						\
 	extern unsigned ___hs_##__name[2];						\
-	extern __inline__ unsigned int ___hf_##__name(void) {				\
+	static inline unsigned int ___hf_##__name(void) {				\
 		unsigned int ret;							\
 		unsigned int ret;							\
 		__asm__ ("sethi %%hi(___h_" #__name "__btset_" #__val "), %0" : 	\
 		__asm__ ("sethi %%hi(___h_" #__name "__btset_" #__val "), %0" : 	\
 			 "=r"(ret));							\
 			 "=r"(ret));							\

+ 9 - 9
include/asm-sparc/cache.h

@@ -27,7 +27,7 @@
  */
  */
 
 
 /* First, cache-tag access. */
 /* First, cache-tag access. */
-extern __inline__ unsigned int get_icache_tag(int setnum, int tagnum)
+static inline unsigned int get_icache_tag(int setnum, int tagnum)
 {
 {
 	unsigned int vaddr, retval;
 	unsigned int vaddr, retval;
 
 
@@ -38,7 +38,7 @@ extern __inline__ unsigned int get_icache_tag(int setnum, int tagnum)
 	return retval;
 	return retval;
 }
 }
 
 
-extern __inline__ void put_icache_tag(int setnum, int tagnum, unsigned int entry)
+static inline void put_icache_tag(int setnum, int tagnum, unsigned int entry)
 {
 {
 	unsigned int vaddr;
 	unsigned int vaddr;
 
 
@@ -51,7 +51,7 @@ extern __inline__ void put_icache_tag(int setnum, int tagnum, unsigned int entry
 /* Second cache-data access.  The data is returned two-32bit quantities
 /* Second cache-data access.  The data is returned two-32bit quantities
  * at a time.
  * at a time.
  */
  */
-extern __inline__ void get_icache_data(int setnum, int tagnum, int subblock,
+static inline void get_icache_data(int setnum, int tagnum, int subblock,
 				       unsigned int *data)
 				       unsigned int *data)
 {
 {
 	unsigned int value1, value2, vaddr;
 	unsigned int value1, value2, vaddr;
@@ -67,7 +67,7 @@ extern __inline__ void get_icache_data(int setnum, int tagnum, int subblock,
 	data[0] = value1; data[1] = value2;
 	data[0] = value1; data[1] = value2;
 }
 }
 
 
-extern __inline__ void put_icache_data(int setnum, int tagnum, int subblock,
+static inline void put_icache_data(int setnum, int tagnum, int subblock,
 				       unsigned int *data)
 				       unsigned int *data)
 {
 {
 	unsigned int value1, value2, vaddr;
 	unsigned int value1, value2, vaddr;
@@ -92,35 +92,35 @@ extern __inline__ void put_icache_data(int setnum, int tagnum, int subblock,
  */
  */
 
 
 /* Flushes which clear out both the on-chip and external caches */
 /* Flushes which clear out both the on-chip and external caches */
-extern __inline__ void flush_ei_page(unsigned int addr)
+static inline void flush_ei_page(unsigned int addr)
 {
 {
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
 			     "r" (addr), "i" (ASI_M_FLUSH_PAGE) :
 			     "r" (addr), "i" (ASI_M_FLUSH_PAGE) :
 			     "memory");
 			     "memory");
 }
 }
 
 
-extern __inline__ void flush_ei_seg(unsigned int addr)
+static inline void flush_ei_seg(unsigned int addr)
 {
 {
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
 			     "r" (addr), "i" (ASI_M_FLUSH_SEG) :
 			     "r" (addr), "i" (ASI_M_FLUSH_SEG) :
 			     "memory");
 			     "memory");
 }
 }
 
 
-extern __inline__ void flush_ei_region(unsigned int addr)
+static inline void flush_ei_region(unsigned int addr)
 {
 {
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
 			     "r" (addr), "i" (ASI_M_FLUSH_REGION) :
 			     "r" (addr), "i" (ASI_M_FLUSH_REGION) :
 			     "memory");
 			     "memory");
 }
 }
 
 
-extern __inline__ void flush_ei_ctx(unsigned int addr)
+static inline void flush_ei_ctx(unsigned int addr)
 {
 {
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
 			     "r" (addr), "i" (ASI_M_FLUSH_CTX) :
 			     "r" (addr), "i" (ASI_M_FLUSH_CTX) :
 			     "memory");
 			     "memory");
 }
 }
 
 
-extern __inline__ void flush_ei_user(unsigned int addr)
+static inline void flush_ei_user(unsigned int addr)
 {
 {
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
 			     "r" (addr), "i" (ASI_M_FLUSH_USER) :
 			     "r" (addr), "i" (ASI_M_FLUSH_USER) :

+ 4 - 4
include/asm-sparc/cypress.h

@@ -48,25 +48,25 @@
 #define CYPRESS_NFAULT    0x00000002
 #define CYPRESS_NFAULT    0x00000002
 #define CYPRESS_MENABLE   0x00000001
 #define CYPRESS_MENABLE   0x00000001
 
 
-extern __inline__ void cypress_flush_page(unsigned long page)
+static inline void cypress_flush_page(unsigned long page)
 {
 {
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
 			     "r" (page), "i" (ASI_M_FLUSH_PAGE));
 			     "r" (page), "i" (ASI_M_FLUSH_PAGE));
 }
 }
 
 
-extern __inline__ void cypress_flush_segment(unsigned long addr)
+static inline void cypress_flush_segment(unsigned long addr)
 {
 {
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
 			     "r" (addr), "i" (ASI_M_FLUSH_SEG));
 			     "r" (addr), "i" (ASI_M_FLUSH_SEG));
 }
 }
 
 
-extern __inline__ void cypress_flush_region(unsigned long addr)
+static inline void cypress_flush_region(unsigned long addr)
 {
 {
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
 			     "r" (addr), "i" (ASI_M_FLUSH_REGION));
 			     "r" (addr), "i" (ASI_M_FLUSH_REGION));
 }
 }
 
 
-extern __inline__ void cypress_flush_context(void)
+static inline void cypress_flush_context(void)
 {
 {
 	__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
 	__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
 			     "i" (ASI_M_FLUSH_CTX));
 			     "i" (ASI_M_FLUSH_CTX));

+ 1 - 1
include/asm-sparc/delay.h

@@ -10,7 +10,7 @@
 #include <linux/config.h>
 #include <linux/config.h>
 #include <asm/cpudata.h>
 #include <asm/cpudata.h>
 
 
-extern __inline__ void __delay(unsigned long loops)
+static inline void __delay(unsigned long loops)
 {
 {
 	__asm__ __volatile__("cmp %0, 0\n\t"
 	__asm__ __volatile__("cmp %0, 0\n\t"
 			     "1: bne 1b\n\t"
 			     "1: bne 1b\n\t"

+ 1 - 1
include/asm-sparc/dma.h

@@ -198,7 +198,7 @@ extern void dvma_init(struct sbus_bus *);
 /* Pause until counter runs out or BIT isn't set in the DMA condition
 /* Pause until counter runs out or BIT isn't set in the DMA condition
  * register.
  * register.
  */
  */
-extern __inline__ void sparc_dma_pause(struct sparc_dma_registers *regs,
+static inline void sparc_dma_pause(struct sparc_dma_registers *regs,
 				       unsigned long bit)
 				       unsigned long bit)
 {
 {
 	int ctr = 50000;   /* Let's find some bugs ;) */
 	int ctr = 50000;   /* Let's find some bugs ;) */

+ 2 - 2
include/asm-sparc/iommu.h

@@ -108,12 +108,12 @@ struct iommu_struct {
 	struct bit_map usemap;
 	struct bit_map usemap;
 };
 };
 
 
-extern __inline__ void iommu_invalidate(struct iommu_regs *regs)
+static inline void iommu_invalidate(struct iommu_regs *regs)
 {
 {
 	regs->tlbflush = 0;
 	regs->tlbflush = 0;
 }
 }
 
 
-extern __inline__ void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba)
+static inline void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba)
 {
 {
 	regs->pageflush = (ba & PAGE_MASK);
 	regs->pageflush = (ba & PAGE_MASK);
 }
 }

+ 1 - 1
include/asm-sparc/kdebug.h

@@ -46,7 +46,7 @@ struct kernel_debug {
 extern struct kernel_debug *linux_dbvec;
 extern struct kernel_debug *linux_dbvec;
 
 
 /* Use this macro in C-code to enter the debugger. */
 /* Use this macro in C-code to enter the debugger. */
-extern __inline__ void sp_enter_debugger(void)
+static inline void sp_enter_debugger(void)
 {
 {
 	__asm__ __volatile__("jmpl %0, %%o7\n\t"
 	__asm__ __volatile__("jmpl %0, %%o7\n\t"
 			     "nop\n\t" : :
 			     "nop\n\t" : :

+ 2 - 2
include/asm-sparc/mbus.h

@@ -83,7 +83,7 @@ extern unsigned int hwbug_bitmask;
  */
  */
 #define TBR_ID_SHIFT            20
 #define TBR_ID_SHIFT            20
 
 
-extern __inline__ int get_cpuid(void)
+static inline int get_cpuid(void)
 {
 {
 	register int retval;
 	register int retval;
 	__asm__ __volatile__("rd %%tbr, %0\n\t"
 	__asm__ __volatile__("rd %%tbr, %0\n\t"
@@ -93,7 +93,7 @@ extern __inline__ int get_cpuid(void)
 	return (retval & 3);
 	return (retval & 3);
 }
 }
 
 
-extern __inline__ int get_modid(void)
+static inline int get_modid(void)
 {
 {
 	return (get_cpuid() | 0x8);
 	return (get_cpuid() | 0x8);
 }
 }

+ 1 - 1
include/asm-sparc/msi.h

@@ -19,7 +19,7 @@
 #define MSI_ASYNC_MODE  0x80000000	/* Operate the MSI asynchronously */
 #define MSI_ASYNC_MODE  0x80000000	/* Operate the MSI asynchronously */
 
 
 
 
-extern __inline__ void msi_set_sync(void)
+static inline void msi_set_sync(void)
 {
 {
 	__asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
 	__asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
 			      "andn %%g3, %2, %%g3\n\t"
 			      "andn %%g3, %2, %%g3\n\t"

+ 4 - 4
include/asm-sparc/mxcc.h

@@ -85,7 +85,7 @@
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 
-extern __inline__ void mxcc_set_stream_src(unsigned long *paddr)
+static inline void mxcc_set_stream_src(unsigned long *paddr)
 {
 {
 	unsigned long data0 = paddr[0];
 	unsigned long data0 = paddr[0];
 	unsigned long data1 = paddr[1];
 	unsigned long data1 = paddr[1];
@@ -98,7 +98,7 @@ extern __inline__ void mxcc_set_stream_src(unsigned long *paddr)
 			      "i" (ASI_M_MXCC) : "g2", "g3");
 			      "i" (ASI_M_MXCC) : "g2", "g3");
 }
 }
 
 
-extern __inline__ void mxcc_set_stream_dst(unsigned long *paddr)
+static inline void mxcc_set_stream_dst(unsigned long *paddr)
 {
 {
 	unsigned long data0 = paddr[0];
 	unsigned long data0 = paddr[0];
 	unsigned long data1 = paddr[1];
 	unsigned long data1 = paddr[1];
@@ -111,7 +111,7 @@ extern __inline__ void mxcc_set_stream_dst(unsigned long *paddr)
 			      "i" (ASI_M_MXCC) : "g2", "g3");
 			      "i" (ASI_M_MXCC) : "g2", "g3");
 }
 }
 
 
-extern __inline__ unsigned long mxcc_get_creg(void)
+static inline unsigned long mxcc_get_creg(void)
 {
 {
 	unsigned long mxcc_control;
 	unsigned long mxcc_control;
 
 
@@ -125,7 +125,7 @@ extern __inline__ unsigned long mxcc_get_creg(void)
 	return mxcc_control;
 	return mxcc_control;
 }
 }
 
 
-extern __inline__ void mxcc_set_creg(unsigned long mxcc_control)
+static inline void mxcc_set_creg(unsigned long mxcc_control)
 {
 {
 	__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
 	__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
 			     "r" (mxcc_control), "r" (MXCC_CREG),
 			     "r" (mxcc_control), "r" (MXCC_CREG),

+ 15 - 15
include/asm-sparc/obio.h

@@ -98,7 +98,7 @@
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 
-extern __inline__ int bw_get_intr_mask(int sbus_level)
+static inline int bw_get_intr_mask(int sbus_level)
 {
 {
 	int mask;
 	int mask;
 	
 	
@@ -109,7 +109,7 @@ extern __inline__ int bw_get_intr_mask(int sbus_level)
 	return mask;
 	return mask;
 }
 }
 
 
-extern __inline__ void bw_clear_intr_mask(int sbus_level, int mask)
+static inline void bw_clear_intr_mask(int sbus_level, int mask)
 {
 {
 	__asm__ __volatile__ ("stha %0, [%1] %2" : :
 	__asm__ __volatile__ ("stha %0, [%1] %2" : :
 			      "r" (mask),
 			      "r" (mask),
@@ -117,7 +117,7 @@ extern __inline__ void bw_clear_intr_mask(int sbus_level, int mask)
 			      "i" (ASI_M_CTL));
 			      "i" (ASI_M_CTL));
 }
 }
 
 
-extern __inline__ unsigned bw_get_prof_limit(int cpu)
+static inline unsigned bw_get_prof_limit(int cpu)
 {
 {
 	unsigned limit;
 	unsigned limit;
 	
 	
@@ -128,7 +128,7 @@ extern __inline__ unsigned bw_get_prof_limit(int cpu)
 	return limit;
 	return limit;
 }
 }
 
 
-extern __inline__ void bw_set_prof_limit(int cpu, unsigned limit)
+static inline void bw_set_prof_limit(int cpu, unsigned limit)
 {
 {
 	__asm__ __volatile__ ("sta %0, [%1] %2" : :
 	__asm__ __volatile__ ("sta %0, [%1] %2" : :
 			      "r" (limit),
 			      "r" (limit),
@@ -136,7 +136,7 @@ extern __inline__ void bw_set_prof_limit(int cpu, unsigned limit)
 			      "i" (ASI_M_CTL));
 			      "i" (ASI_M_CTL));
 }
 }
 
 
-extern __inline__ unsigned bw_get_ctrl(int cpu)
+static inline unsigned bw_get_ctrl(int cpu)
 {
 {
 	unsigned ctrl;
 	unsigned ctrl;
 	
 	
@@ -147,7 +147,7 @@ extern __inline__ unsigned bw_get_ctrl(int cpu)
 	return ctrl;
 	return ctrl;
 }
 }
 
 
-extern __inline__ void bw_set_ctrl(int cpu, unsigned ctrl)
+static inline void bw_set_ctrl(int cpu, unsigned ctrl)
 {
 {
 	__asm__ __volatile__ ("sta %0, [%1] %2" : :
 	__asm__ __volatile__ ("sta %0, [%1] %2" : :
 			      "r" (ctrl),
 			      "r" (ctrl),
@@ -157,7 +157,7 @@ extern __inline__ void bw_set_ctrl(int cpu, unsigned ctrl)
 
 
 extern unsigned char cpu_leds[32];
 extern unsigned char cpu_leds[32];
 
 
-extern __inline__ void show_leds(int cpuid)
+static inline void show_leds(int cpuid)
 {
 {
 	cpuid &= 0x1e;
 	cpuid &= 0x1e;
 	__asm__ __volatile__ ("stba %0, [%1] %2" : :
 	__asm__ __volatile__ ("stba %0, [%1] %2" : :
@@ -166,7 +166,7 @@ extern __inline__ void show_leds(int cpuid)
 			      "i" (ASI_M_CTL));
 			      "i" (ASI_M_CTL));
 }
 }
 
 
-extern __inline__ unsigned cc_get_ipen(void)
+static inline unsigned cc_get_ipen(void)
 {
 {
 	unsigned pending;
 	unsigned pending;
 	
 	
@@ -177,7 +177,7 @@ extern __inline__ unsigned cc_get_ipen(void)
 	return pending;
 	return pending;
 }
 }
 
 
-extern __inline__ void cc_set_iclr(unsigned clear)
+static inline void cc_set_iclr(unsigned clear)
 {
 {
 	__asm__ __volatile__ ("stha %0, [%1] %2" : :
 	__asm__ __volatile__ ("stha %0, [%1] %2" : :
 			      "r" (clear),
 			      "r" (clear),
@@ -185,7 +185,7 @@ extern __inline__ void cc_set_iclr(unsigned clear)
 			      "i" (ASI_M_MXCC));
 			      "i" (ASI_M_MXCC));
 }
 }
 
 
-extern __inline__ unsigned cc_get_imsk(void)
+static inline unsigned cc_get_imsk(void)
 {
 {
 	unsigned mask;
 	unsigned mask;
 	
 	
@@ -196,7 +196,7 @@ extern __inline__ unsigned cc_get_imsk(void)
 	return mask;
 	return mask;
 }
 }
 
 
-extern __inline__ void cc_set_imsk(unsigned mask)
+static inline void cc_set_imsk(unsigned mask)
 {
 {
 	__asm__ __volatile__ ("stha %0, [%1] %2" : :
 	__asm__ __volatile__ ("stha %0, [%1] %2" : :
 			      "r" (mask),
 			      "r" (mask),
@@ -204,7 +204,7 @@ extern __inline__ void cc_set_imsk(unsigned mask)
 			      "i" (ASI_M_MXCC));
 			      "i" (ASI_M_MXCC));
 }
 }
 
 
-extern __inline__ unsigned cc_get_imsk_other(int cpuid)
+static inline unsigned cc_get_imsk_other(int cpuid)
 {
 {
 	unsigned mask;
 	unsigned mask;
 	
 	
@@ -215,7 +215,7 @@ extern __inline__ unsigned cc_get_imsk_other(int cpuid)
 	return mask;
 	return mask;
 }
 }
 
 
-extern __inline__ void cc_set_imsk_other(int cpuid, unsigned mask)
+static inline void cc_set_imsk_other(int cpuid, unsigned mask)
 {
 {
 	__asm__ __volatile__ ("stha %0, [%1] %2" : :
 	__asm__ __volatile__ ("stha %0, [%1] %2" : :
 			      "r" (mask),
 			      "r" (mask),
@@ -223,7 +223,7 @@ extern __inline__ void cc_set_imsk_other(int cpuid, unsigned mask)
 			      "i" (ASI_M_CTL));
 			      "i" (ASI_M_CTL));
 }
 }
 
 
-extern __inline__ void cc_set_igen(unsigned gen)
+static inline void cc_set_igen(unsigned gen)
 {
 {
 	__asm__ __volatile__ ("sta %0, [%1] %2" : :
 	__asm__ __volatile__ ("sta %0, [%1] %2" : :
 			      "r" (gen),
 			      "r" (gen),
@@ -239,7 +239,7 @@ extern __inline__ void cc_set_igen(unsigned gen)
 #define IGEN_MESSAGE(bcast, devid, sid, levels) \
 #define IGEN_MESSAGE(bcast, devid, sid, levels) \
 	(((bcast) << 31) | ((devid) << 23) | ((sid) << 15) | (levels))
 	(((bcast) << 31) | ((devid) << 23) | ((sid) << 15) | (levels))
             
             
-extern __inline__ void sun4d_send_ipi(int cpu, int level)
+static inline void sun4d_send_ipi(int cpu, int level)
 {
 {
 	cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1)));
 	cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1)));
 }
 }

+ 3 - 3
include/asm-sparc/pci.h

@@ -15,12 +15,12 @@
 
 
 #define PCI_IRQ_NONE		0xffffffff
 #define PCI_IRQ_NONE		0xffffffff
 
 
-extern inline void pcibios_set_master(struct pci_dev *dev)
+static inline void pcibios_set_master(struct pci_dev *dev)
 {
 {
 	/* No special bus mastering setup handling */
 	/* No special bus mastering setup handling */
 }
 }
 
 
-extern inline void pcibios_penalize_isa_irq(int irq, int active)
+static inline void pcibios_penalize_isa_irq(int irq, int active)
 {
 {
 	/* We don't do dynamic PCI IRQ allocation */
 	/* We don't do dynamic PCI IRQ allocation */
 }
 }
@@ -137,7 +137,7 @@ extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist
  * only drive the low 24-bits during PCI bus mastering, then
  * only drive the low 24-bits during PCI bus mastering, then
  * you would pass 0x00ffffff as the mask to this function.
  * you would pass 0x00ffffff as the mask to this function.
  */
  */
-extern inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
+static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
 {
 {
 	return 1;
 	return 1;
 }
 }

+ 22 - 22
include/asm-sparc/pgtable.h

@@ -154,7 +154,7 @@ BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t)
 BTFIXUPDEF_CALL(void, pte_clear, pte_t *)
 BTFIXUPDEF_CALL(void, pte_clear, pte_t *)
 BTFIXUPDEF_CALL(int, pte_read, pte_t)
 BTFIXUPDEF_CALL(int, pte_read, pte_t)
 
 
-extern __inline__ int pte_none(pte_t pte)
+static inline int pte_none(pte_t pte)
 {
 {
 	return !(pte_val(pte) & ~BTFIXUP_SETHI(none_mask));
 	return !(pte_val(pte) & ~BTFIXUP_SETHI(none_mask));
 }
 }
@@ -167,7 +167,7 @@ BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t)
 BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t)
 BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t)
 BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *)
 BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *)
 
 
-extern __inline__ int pmd_none(pmd_t pmd)
+static inline int pmd_none(pmd_t pmd)
 {
 {
 	return !(pmd_val(pmd) & ~BTFIXUP_SETHI(none_mask));
 	return !(pmd_val(pmd) & ~BTFIXUP_SETHI(none_mask));
 }
 }
@@ -194,20 +194,20 @@ BTFIXUPDEF_HALF(pte_writei)
 BTFIXUPDEF_HALF(pte_dirtyi)
 BTFIXUPDEF_HALF(pte_dirtyi)
 BTFIXUPDEF_HALF(pte_youngi)
 BTFIXUPDEF_HALF(pte_youngi)
 
 
-extern int pte_write(pte_t pte) __attribute_const__;
-extern __inline__ int pte_write(pte_t pte)
+static int pte_write(pte_t pte) __attribute_const__;
+static inline int pte_write(pte_t pte)
 {
 {
 	return pte_val(pte) & BTFIXUP_HALF(pte_writei);
 	return pte_val(pte) & BTFIXUP_HALF(pte_writei);
 }
 }
 
 
-extern int pte_dirty(pte_t pte) __attribute_const__;
-extern __inline__ int pte_dirty(pte_t pte)
+static int pte_dirty(pte_t pte) __attribute_const__;
+static inline int pte_dirty(pte_t pte)
 {
 {
 	return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi);
 	return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi);
 }
 }
 
 
-extern int pte_young(pte_t pte) __attribute_const__;
-extern __inline__ int pte_young(pte_t pte)
+static int pte_young(pte_t pte) __attribute_const__;
+static inline int pte_young(pte_t pte)
 {
 {
 	return pte_val(pte) & BTFIXUP_HALF(pte_youngi);
 	return pte_val(pte) & BTFIXUP_HALF(pte_youngi);
 }
 }
@@ -217,8 +217,8 @@ extern __inline__ int pte_young(pte_t pte)
  */
  */
 BTFIXUPDEF_HALF(pte_filei)
 BTFIXUPDEF_HALF(pte_filei)
 
 
-extern int pte_file(pte_t pte) __attribute_const__;
-extern __inline__ int pte_file(pte_t pte)
+static int pte_file(pte_t pte) __attribute_const__;
+static inline int pte_file(pte_t pte)
 {
 {
 	return pte_val(pte) & BTFIXUP_HALF(pte_filei);
 	return pte_val(pte) & BTFIXUP_HALF(pte_filei);
 }
 }
@@ -229,20 +229,20 @@ BTFIXUPDEF_HALF(pte_wrprotecti)
 BTFIXUPDEF_HALF(pte_mkcleani)
 BTFIXUPDEF_HALF(pte_mkcleani)
 BTFIXUPDEF_HALF(pte_mkoldi)
 BTFIXUPDEF_HALF(pte_mkoldi)
 
 
-extern pte_t pte_wrprotect(pte_t pte) __attribute_const__;
-extern __inline__ pte_t pte_wrprotect(pte_t pte)
+static pte_t pte_wrprotect(pte_t pte) __attribute_const__;
+static inline pte_t pte_wrprotect(pte_t pte)
 {
 {
 	return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti));
 	return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti));
 }
 }
 
 
-extern pte_t pte_mkclean(pte_t pte) __attribute_const__;
-extern __inline__ pte_t pte_mkclean(pte_t pte)
+static pte_t pte_mkclean(pte_t pte) __attribute_const__;
+static inline pte_t pte_mkclean(pte_t pte)
 {
 {
 	return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani));
 	return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani));
 }
 }
 
 
-extern pte_t pte_mkold(pte_t pte) __attribute_const__;
-extern __inline__ pte_t pte_mkold(pte_t pte)
+static pte_t pte_mkold(pte_t pte) __attribute_const__;
+static inline pte_t pte_mkold(pte_t pte)
 {
 {
 	return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi));
 	return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi));
 }
 }
@@ -278,8 +278,8 @@ BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int)
 
 
 BTFIXUPDEF_INT(pte_modify_mask)
 BTFIXUPDEF_INT(pte_modify_mask)
 
 
-extern pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
-extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot)
+static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
 {
 	return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) |
 	return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) |
 		pgprot_val(newprot));
 		pgprot_val(newprot));
@@ -386,13 +386,13 @@ extern struct ctx_list ctx_used;        /* Head of used contexts list */
 
 
 #define NO_CONTEXT     -1
 #define NO_CONTEXT     -1
 
 
-extern __inline__ void remove_from_ctx_list(struct ctx_list *entry)
+static inline void remove_from_ctx_list(struct ctx_list *entry)
 {
 {
 	entry->next->prev = entry->prev;
 	entry->next->prev = entry->prev;
 	entry->prev->next = entry->next;
 	entry->prev->next = entry->next;
 }
 }
 
 
-extern __inline__ void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
+static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
 {
 {
 	entry->next = head;
 	entry->next = head;
 	(entry->prev = head->prev)->next = entry;
 	(entry->prev = head->prev)->next = entry;
@@ -401,7 +401,7 @@ extern __inline__ void add_to_ctx_list(struct ctx_list *head, struct ctx_list *e
 #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
 #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
 #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
 #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
 
 
-extern __inline__ unsigned long
+static inline unsigned long
 __get_phys (unsigned long addr)
 __get_phys (unsigned long addr)
 {
 {
 	switch (sparc_cpu_model){
 	switch (sparc_cpu_model){
@@ -416,7 +416,7 @@ __get_phys (unsigned long addr)
 	}
 	}
 }
 }
 
 
-extern __inline__ int
+static inline int
 __get_iospace (unsigned long addr)
 __get_iospace (unsigned long addr)
 {
 {
 	switch (sparc_cpu_model){
 	switch (sparc_cpu_model){

+ 15 - 15
include/asm-sparc/pgtsrmmu.h

@@ -148,7 +148,7 @@ extern void *srmmu_nocache_pool;
 #define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
 #define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
 
 
 /* Accessing the MMU control register. */
 /* Accessing the MMU control register. */
-extern __inline__ unsigned int srmmu_get_mmureg(void)
+static inline unsigned int srmmu_get_mmureg(void)
 {
 {
         unsigned int retval;
         unsigned int retval;
 	__asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
 	__asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
@@ -157,14 +157,14 @@ extern __inline__ unsigned int srmmu_get_mmureg(void)
 	return retval;
 	return retval;
 }
 }
 
 
-extern __inline__ void srmmu_set_mmureg(unsigned long regval)
+static inline void srmmu_set_mmureg(unsigned long regval)
 {
 {
 	__asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
 	__asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
 			     "r" (regval), "i" (ASI_M_MMUREGS) : "memory");
 			     "r" (regval), "i" (ASI_M_MMUREGS) : "memory");
 
 
 }
 }
 
 
-extern __inline__ void srmmu_set_ctable_ptr(unsigned long paddr)
+static inline void srmmu_set_ctable_ptr(unsigned long paddr)
 {
 {
 	paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
 	paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
 	__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
 	__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
@@ -173,7 +173,7 @@ extern __inline__ void srmmu_set_ctable_ptr(unsigned long paddr)
 			     "memory");
 			     "memory");
 }
 }
 
 
-extern __inline__ unsigned long srmmu_get_ctable_ptr(void)
+static inline unsigned long srmmu_get_ctable_ptr(void)
 {
 {
 	unsigned int retval;
 	unsigned int retval;
 
 
@@ -184,14 +184,14 @@ extern __inline__ unsigned long srmmu_get_ctable_ptr(void)
 	return (retval & SRMMU_CTX_PMASK) << 4;
 	return (retval & SRMMU_CTX_PMASK) << 4;
 }
 }
 
 
-extern __inline__ void srmmu_set_context(int context)
+static inline void srmmu_set_context(int context)
 {
 {
 	__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
 	__asm__ __volatile__("sta %0, [%1] %2\n\t" : :
 			     "r" (context), "r" (SRMMU_CTX_REG),
 			     "r" (context), "r" (SRMMU_CTX_REG),
 			     "i" (ASI_M_MMUREGS) : "memory");
 			     "i" (ASI_M_MMUREGS) : "memory");
 }
 }
 
 
-extern __inline__ int srmmu_get_context(void)
+static inline int srmmu_get_context(void)
 {
 {
 	register int retval;
 	register int retval;
 	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
 	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
@@ -201,7 +201,7 @@ extern __inline__ int srmmu_get_context(void)
 	return retval;
 	return retval;
 }
 }
 
 
-extern __inline__ unsigned int srmmu_get_fstatus(void)
+static inline unsigned int srmmu_get_fstatus(void)
 {
 {
 	unsigned int retval;
 	unsigned int retval;
 
 
@@ -211,7 +211,7 @@ extern __inline__ unsigned int srmmu_get_fstatus(void)
 	return retval;
 	return retval;
 }
 }
 
 
-extern __inline__ unsigned int srmmu_get_faddr(void)
+static inline unsigned int srmmu_get_faddr(void)
 {
 {
 	unsigned int retval;
 	unsigned int retval;
 
 
@@ -222,7 +222,7 @@ extern __inline__ unsigned int srmmu_get_faddr(void)
 }
 }
 
 
 /* This is guaranteed on all SRMMU's. */
 /* This is guaranteed on all SRMMU's. */
-extern __inline__ void srmmu_flush_whole_tlb(void)
+static inline void srmmu_flush_whole_tlb(void)
 {
 {
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
 			     "r" (0x400),        /* Flush entire TLB!! */
 			     "r" (0x400),        /* Flush entire TLB!! */
@@ -231,7 +231,7 @@ extern __inline__ void srmmu_flush_whole_tlb(void)
 }
 }
 
 
 /* These flush types are not available on all chips... */
 /* These flush types are not available on all chips... */
-extern __inline__ void srmmu_flush_tlb_ctx(void)
+static inline void srmmu_flush_tlb_ctx(void)
 {
 {
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
 			     "r" (0x300),        /* Flush TLB ctx.. */
 			     "r" (0x300),        /* Flush TLB ctx.. */
@@ -239,7 +239,7 @@ extern __inline__ void srmmu_flush_tlb_ctx(void)
 
 
 }
 }
 
 
-extern __inline__ void srmmu_flush_tlb_region(unsigned long addr)
+static inline void srmmu_flush_tlb_region(unsigned long addr)
 {
 {
 	addr &= SRMMU_PGDIR_MASK;
 	addr &= SRMMU_PGDIR_MASK;
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
@@ -249,7 +249,7 @@ extern __inline__ void srmmu_flush_tlb_region(unsigned long addr)
 }
 }
 
 
 
 
-extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr)
+static inline void srmmu_flush_tlb_segment(unsigned long addr)
 {
 {
 	addr &= SRMMU_REAL_PMD_MASK;
 	addr &= SRMMU_REAL_PMD_MASK;
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
@@ -258,7 +258,7 @@ extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr)
 
 
 }
 }
 
 
-extern __inline__ void srmmu_flush_tlb_page(unsigned long page)
+static inline void srmmu_flush_tlb_page(unsigned long page)
 {
 {
 	page &= PAGE_MASK;
 	page &= PAGE_MASK;
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
 	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
@@ -267,7 +267,7 @@ extern __inline__ void srmmu_flush_tlb_page(unsigned long page)
 
 
 }
 }
 
 
-extern __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr)
+static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
 {
 {
 	unsigned long retval;
 	unsigned long retval;
 
 
@@ -279,7 +279,7 @@ extern __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr)
 	return retval;
 	return retval;
 }
 }
 
 
-extern __inline__ int
+static inline int
 srmmu_get_pte (unsigned long addr)
 srmmu_get_pte (unsigned long addr)
 {
 {
 	register unsigned long entry;
 	register unsigned long entry;

+ 1 - 1
include/asm-sparc/processor.h

@@ -79,7 +79,7 @@ struct thread_struct {
 extern unsigned long thread_saved_pc(struct task_struct *t);
 extern unsigned long thread_saved_pc(struct task_struct *t);
 
 
 /* Do necessary setup to start up a newly executed thread. */
 /* Do necessary setup to start up a newly executed thread. */
-extern __inline__ void start_thread(struct pt_regs * regs, unsigned long pc,
+static inline void start_thread(struct pt_regs * regs, unsigned long pc,
 				    unsigned long sp)
 				    unsigned long sp)
 {
 {
 	register unsigned long zero asm("g1");
 	register unsigned long zero asm("g1");

+ 3 - 3
include/asm-sparc/psr.h

@@ -38,7 +38,7 @@
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 /* Get the %psr register. */
 /* Get the %psr register. */
-extern __inline__ unsigned int get_psr(void)
+static inline unsigned int get_psr(void)
 {
 {
 	unsigned int psr;
 	unsigned int psr;
 	__asm__ __volatile__(
 	__asm__ __volatile__(
@@ -53,7 +53,7 @@ extern __inline__ unsigned int get_psr(void)
 	return psr;
 	return psr;
 }
 }
 
 
-extern __inline__ void put_psr(unsigned int new_psr)
+static inline void put_psr(unsigned int new_psr)
 {
 {
 	__asm__ __volatile__(
 	__asm__ __volatile__(
 		"wr	%0, 0x0, %%psr\n\t"
 		"wr	%0, 0x0, %%psr\n\t"
@@ -72,7 +72,7 @@ extern __inline__ void put_psr(unsigned int new_psr)
 
 
 extern unsigned int fsr_storage;
 extern unsigned int fsr_storage;
 
 
-extern __inline__ unsigned int get_fsr(void)
+static inline unsigned int get_fsr(void)
 {
 {
 	unsigned int fsr = 0;
 	unsigned int fsr = 0;
 
 

+ 5 - 5
include/asm-sparc/sbi.h

@@ -65,7 +65,7 @@ struct sbi_regs {
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 
-extern __inline__ int acquire_sbi(int devid, int mask)
+static inline int acquire_sbi(int devid, int mask)
 {
 {
 	__asm__ __volatile__ ("swapa [%2] %3, %0" :
 	__asm__ __volatile__ ("swapa [%2] %3, %0" :
 			      "=r" (mask) :
 			      "=r" (mask) :
@@ -75,7 +75,7 @@ extern __inline__ int acquire_sbi(int devid, int mask)
 	return mask;
 	return mask;
 }
 }
 
 
-extern __inline__ void release_sbi(int devid, int mask)
+static inline void release_sbi(int devid, int mask)
 {
 {
 	__asm__ __volatile__ ("sta %0, [%1] %2" : :
 	__asm__ __volatile__ ("sta %0, [%1] %2" : :
 			      "r" (mask),
 			      "r" (mask),
@@ -83,7 +83,7 @@ extern __inline__ void release_sbi(int devid, int mask)
 			      "i" (ASI_M_CTL));
 			      "i" (ASI_M_CTL));
 }
 }
 
 
-extern __inline__ void set_sbi_tid(int devid, int targetid)
+static inline void set_sbi_tid(int devid, int targetid)
 {
 {
 	__asm__ __volatile__ ("sta %0, [%1] %2" : :
 	__asm__ __volatile__ ("sta %0, [%1] %2" : :
 			      "r" (targetid),
 			      "r" (targetid),
@@ -91,7 +91,7 @@ extern __inline__ void set_sbi_tid(int devid, int targetid)
 			      "i" (ASI_M_CTL));
 			      "i" (ASI_M_CTL));
 }
 }
 
 
-extern __inline__ int get_sbi_ctl(int devid, int cfgno)
+static inline int get_sbi_ctl(int devid, int cfgno)
 {
 {
 	int cfg;
 	int cfg;
 	
 	
@@ -102,7 +102,7 @@ extern __inline__ int get_sbi_ctl(int devid, int cfgno)
 	return cfg;
 	return cfg;
 }
 }
 
 
-extern __inline__ void set_sbi_ctl(int devid, int cfgno, int cfg)
+static inline void set_sbi_ctl(int devid, int cfgno, int cfg)
 {
 {
 	__asm__ __volatile__ ("sta %0, [%1] %2" : :
 	__asm__ __volatile__ ("sta %0, [%1] %2" : :
 			      "r" (cfg),
 			      "r" (cfg),

+ 3 - 3
include/asm-sparc/sbus.h

@@ -28,12 +28,12 @@
  * numbers + offsets, and vice versa.
  * numbers + offsets, and vice versa.
  */
  */
 
 
-extern __inline__ unsigned long sbus_devaddr(int slotnum, unsigned long offset)
+static inline unsigned long sbus_devaddr(int slotnum, unsigned long offset)
 {
 {
   return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<25)+(offset));
   return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<25)+(offset));
 }
 }
 
 
-extern __inline__ int sbus_dev_slot(unsigned long dev_addr)
+static inline int sbus_dev_slot(unsigned long dev_addr)
 {
 {
   return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>25);
   return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>25);
 }
 }
@@ -80,7 +80,7 @@ struct sbus_bus {
 
 
 extern struct sbus_bus *sbus_root;
 extern struct sbus_bus *sbus_root;
 
 
-extern __inline__ int
+static inline int
 sbus_is_slave(struct sbus_dev *dev)
 sbus_is_slave(struct sbus_dev *dev)
 {
 {
 	/* XXX Have to write this for sun4c's */
 	/* XXX Have to write this for sun4c's */

+ 13 - 13
include/asm-sparc/smp.h

@@ -60,22 +60,22 @@ BTFIXUPDEF_BLACKBOX(load_current)
 #define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5)
 #define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5)
 #define smp_message_pass(target,msg,data,wait) BTFIXUP_CALL(smp_message_pass)(target,msg,data,wait)
 #define smp_message_pass(target,msg,data,wait) BTFIXUP_CALL(smp_message_pass)(target,msg,data,wait)
 
 
-extern __inline__ void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); }
-extern __inline__ void xc1(smpfunc_t func, unsigned long arg1)
+static inline void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); }
+static inline void xc1(smpfunc_t func, unsigned long arg1)
 { smp_cross_call(func, arg1, 0, 0, 0, 0); }
 { smp_cross_call(func, arg1, 0, 0, 0, 0); }
-extern __inline__ void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
+static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
 { smp_cross_call(func, arg1, arg2, 0, 0, 0); }
 { smp_cross_call(func, arg1, arg2, 0, 0, 0); }
-extern __inline__ void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
+static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
 			   unsigned long arg3)
 			   unsigned long arg3)
 { smp_cross_call(func, arg1, arg2, arg3, 0, 0); }
 { smp_cross_call(func, arg1, arg2, arg3, 0, 0); }
-extern __inline__ void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
+static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
 			   unsigned long arg3, unsigned long arg4)
 			   unsigned long arg3, unsigned long arg4)
 { smp_cross_call(func, arg1, arg2, arg3, arg4, 0); }
 { smp_cross_call(func, arg1, arg2, arg3, arg4, 0); }
-extern __inline__ void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2,
+static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2,
 			   unsigned long arg3, unsigned long arg4, unsigned long arg5)
 			   unsigned long arg3, unsigned long arg4, unsigned long arg5)
 { smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); }
 { smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); }
 
 
-extern __inline__ int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait)
+static inline int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait)
 {
 {
 	xc1((smpfunc_t)func, (unsigned long)info);
 	xc1((smpfunc_t)func, (unsigned long)info);
 	return 0;
 	return 0;
@@ -84,16 +84,16 @@ extern __inline__ int smp_call_function(void (*func)(void *info), void *info, in
 extern __volatile__ int __cpu_number_map[NR_CPUS];
 extern __volatile__ int __cpu_number_map[NR_CPUS];
 extern __volatile__ int __cpu_logical_map[NR_CPUS];
 extern __volatile__ int __cpu_logical_map[NR_CPUS];
 
 
-extern __inline__ int cpu_logical_map(int cpu)
+static inline int cpu_logical_map(int cpu)
 {
 {
 	return __cpu_logical_map[cpu];
 	return __cpu_logical_map[cpu];
 }
 }
-extern __inline__ int cpu_number_map(int cpu)
+static inline int cpu_number_map(int cpu)
 {
 {
 	return __cpu_number_map[cpu];
 	return __cpu_number_map[cpu];
 }
 }
 
 
-extern __inline__ int hard_smp4m_processor_id(void)
+static inline int hard_smp4m_processor_id(void)
 {
 {
 	int cpuid;
 	int cpuid;
 
 
@@ -104,7 +104,7 @@ extern __inline__ int hard_smp4m_processor_id(void)
 	return cpuid;
 	return cpuid;
 }
 }
 
 
-extern __inline__ int hard_smp4d_processor_id(void)
+static inline int hard_smp4d_processor_id(void)
 {
 {
 	int cpuid;
 	int cpuid;
 
 
@@ -114,7 +114,7 @@ extern __inline__ int hard_smp4d_processor_id(void)
 }
 }
 
 
 #ifndef MODULE
 #ifndef MODULE
-extern __inline__ int hard_smp_processor_id(void)
+static inline int hard_smp_processor_id(void)
 {
 {
 	int cpuid;
 	int cpuid;
 
 
@@ -136,7 +136,7 @@ extern __inline__ int hard_smp_processor_id(void)
 	return cpuid;
 	return cpuid;
 }
 }
 #else
 #else
-extern __inline__ int hard_smp_processor_id(void)
+static inline int hard_smp_processor_id(void)
 {
 {
 	int cpuid;
 	int cpuid;
 	
 	

+ 4 - 4
include/asm-sparc/smpprim.h

@@ -15,7 +15,7 @@
  * atomic.
  * atomic.
  */
  */
 
 
-extern __inline__ __volatile__ char test_and_set(void *addr)
+static inline __volatile__ char test_and_set(void *addr)
 {
 {
 	char state = 0;
 	char state = 0;
 
 
@@ -27,7 +27,7 @@ extern __inline__ __volatile__ char test_and_set(void *addr)
 }
 }
 
 
 /* Initialize a spin-lock. */
 /* Initialize a spin-lock. */
-extern __inline__ __volatile__ smp_initlock(void *spinlock)
+static inline __volatile__ smp_initlock(void *spinlock)
 {
 {
 	/* Unset the lock. */
 	/* Unset the lock. */
 	*((unsigned char *) spinlock) = 0;
 	*((unsigned char *) spinlock) = 0;
@@ -36,7 +36,7 @@ extern __inline__ __volatile__ smp_initlock(void *spinlock)
 }
 }
 
 
 /* This routine spins until it acquires the lock at ADDR. */
 /* This routine spins until it acquires the lock at ADDR. */
-extern __inline__ __volatile__ smp_lock(void *addr)
+static inline __volatile__ smp_lock(void *addr)
 {
 {
 	while(test_and_set(addr) == 0xff)
 	while(test_and_set(addr) == 0xff)
 		;
 		;
@@ -46,7 +46,7 @@ extern __inline__ __volatile__ smp_lock(void *addr)
 }
 }
 
 
 /* This routine releases the lock at ADDR. */
 /* This routine releases the lock at ADDR. */
-extern __inline__ __volatile__ smp_unlock(void *addr)
+static inline __volatile__ smp_unlock(void *addr)
 {
 {
 	*((unsigned char *) addr) = 0;
 	*((unsigned char *) addr) = 0;
 }
 }

+ 5 - 5
include/asm-sparc/spinlock.h

@@ -17,7 +17,7 @@
 #define __raw_spin_unlock_wait(lock) \
 #define __raw_spin_unlock_wait(lock) \
 	do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
 	do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
 
 
-extern __inline__ void __raw_spin_lock(raw_spinlock_t *lock)
+static inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
 {
 	__asm__ __volatile__(
 	__asm__ __volatile__(
 	"\n1:\n\t"
 	"\n1:\n\t"
@@ -37,7 +37,7 @@ extern __inline__ void __raw_spin_lock(raw_spinlock_t *lock)
 	: "g2", "memory", "cc");
 	: "g2", "memory", "cc");
 }
 }
 
 
-extern __inline__ int __raw_spin_trylock(raw_spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 {
 {
 	unsigned int result;
 	unsigned int result;
 	__asm__ __volatile__("ldstub [%1], %0"
 	__asm__ __volatile__("ldstub [%1], %0"
@@ -47,7 +47,7 @@ extern __inline__ int __raw_spin_trylock(raw_spinlock_t *lock)
 	return (result == 0);
 	return (result == 0);
 }
 }
 
 
-extern __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
+static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 {
 {
 	__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
 	__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
 }
 }
@@ -78,7 +78,7 @@ extern __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
  *
  *
  * Unfortunately this scheme limits us to ~16,000,000 cpus.
  * Unfortunately this scheme limits us to ~16,000,000 cpus.
  */
  */
-extern __inline__ void __read_lock(raw_rwlock_t *rw)
+static inline void __read_lock(raw_rwlock_t *rw)
 {
 {
 	register raw_rwlock_t *lp asm("g1");
 	register raw_rwlock_t *lp asm("g1");
 	lp = rw;
 	lp = rw;
@@ -98,7 +98,7 @@ do {	unsigned long flags; \
 	local_irq_restore(flags); \
 	local_irq_restore(flags); \
 } while(0)
 } while(0)
 
 
-extern __inline__ void __read_unlock(raw_rwlock_t *rw)
+static inline void __read_unlock(raw_rwlock_t *rw)
 {
 {
 	register raw_rwlock_t *lp asm("g1");
 	register raw_rwlock_t *lp asm("g1");
 	lp = rw;
 	lp = rw;

+ 1 - 1
include/asm-sparc/system.h

@@ -204,7 +204,7 @@ static inline unsigned long getipl(void)
 BTFIXUPDEF_CALL(void, ___xchg32, void)
 BTFIXUPDEF_CALL(void, ___xchg32, void)
 #endif
 #endif
 
 
-extern __inline__ unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
+static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
 {
 {
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 	__asm__ __volatile__("swap [%2], %0"
 	__asm__ __volatile__("swap [%2], %0"

+ 1 - 1
include/asm-sparc/traps.h

@@ -22,7 +22,7 @@ struct tt_entry {
 /* We set this to _start in system setup. */
 /* We set this to _start in system setup. */
 extern struct tt_entry *sparc_ttable;
 extern struct tt_entry *sparc_ttable;
 
 
-extern __inline__ unsigned long get_tbr(void)
+static inline unsigned long get_tbr(void)
 {
 {
 	unsigned long tbr;
 	unsigned long tbr;
 
 

+ 10 - 13
include/asm-um/processor-generic.h

@@ -13,6 +13,7 @@ struct task_struct;
 #include "linux/config.h"
 #include "linux/config.h"
 #include "asm/ptrace.h"
 #include "asm/ptrace.h"
 #include "choose-mode.h"
 #include "choose-mode.h"
+#include "registers.h"
 
 
 struct mm_struct;
 struct mm_struct;
 
 
@@ -136,19 +137,15 @@ extern struct cpuinfo_um cpu_data[];
 #define current_cpu_data boot_cpu_data
 #define current_cpu_data boot_cpu_data
 #endif
 #endif
 
 
-#define KSTK_EIP(tsk) (PT_REGS_IP(&tsk->thread.regs))
-#define KSTK_ESP(tsk) (PT_REGS_SP(&tsk->thread.regs))
-#define get_wchan(p) (0)
 
 
+#ifdef CONFIG_MODE_SKAS
+#define KSTK_REG(tsk, reg) \
+	({ union uml_pt_regs regs; \
+	   get_thread_regs(&regs, tsk->thread.mode.skas.switch_buf); \
+	   UPT_REG(&regs, reg); })
+#else
+#define KSTK_REG(tsk, reg) (0xbadbabe)
 #endif
 #endif
+#define get_wchan(p) (0)
 
 
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
+#endif

+ 4 - 11
include/asm-um/processor-i386.h

@@ -43,17 +43,10 @@ static inline void rep_nop(void)
 #define ARCH_IS_STACKGROW(address) \
 #define ARCH_IS_STACKGROW(address) \
        (address + 32 >= UPT_SP(&current->thread.regs.regs))
        (address + 32 >= UPT_SP(&current->thread.regs.regs))
 
 
+#define KSTK_EIP(tsk) KSTK_REG(tsk, EIP)
+#define KSTK_ESP(tsk) KSTK_REG(tsk, UESP)
+#define KSTK_EBP(tsk) KSTK_REG(tsk, EBP)
+
 #include "asm/processor-generic.h"
 #include "asm/processor-generic.h"
 
 
 #endif
 #endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */

+ 3 - 11
include/asm-um/processor-x86_64.h

@@ -36,17 +36,9 @@ extern inline void rep_nop(void)
 #define ARCH_IS_STACKGROW(address) \
 #define ARCH_IS_STACKGROW(address) \
         (address + 128 >= UPT_SP(&current->thread.regs.regs))
         (address + 128 >= UPT_SP(&current->thread.regs.regs))
 
 
+#define KSTK_EIP(tsk) KSTK_REG(tsk, RIP)
+#define KSTK_ESP(tsk) KSTK_REG(tsk, RSP)
+
 #include "asm/processor-generic.h"
 #include "asm/processor-generic.h"
 
 
 #endif
 #endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */

部分文件因为文件数量过多而无法显示