Browse Source

Merge with mainline to remove plat-omap/Kconfig conflict

Conflicts:
	arch/arm/plat-omap/Kconfig
Tony Lindgren 15 years ago
parent
commit
d702d12167
100 changed files with 8403 additions and 1027 deletions
  1. 14 0
      Documentation/ABI/testing/sysfs-block
  2. 79 0
      Documentation/ABI/testing/sysfs-devices-power
  3. 13 0
      Documentation/ABI/testing/sysfs-power
  4. 2 1
      Documentation/DocBook/v4l/io.xml
  5. 23 17
      Documentation/DocBook/v4l/vidioc-qbuf.xml
  6. 4 3
      Documentation/DocBook/v4l/vidioc-querybuf.xml
  7. 13 23
      Documentation/DocBook/v4l/vidioc-reqbufs.xml
  8. 8 2
      Documentation/RCU/00-INDEX
  9. 57 4
      Documentation/RCU/RTFP.txt
  10. 126 82
      Documentation/RCU/checklist.txt
  11. 67 0
      Documentation/RCU/lockdep.txt
  12. 3 45
      Documentation/RCU/rcu.txt
  13. 58 0
      Documentation/RCU/stallwarn.txt
  14. 12 0
      Documentation/RCU/torture.txt
  15. 10 6
      Documentation/RCU/whatisRCU.txt
  16. 5 1
      Documentation/arm/memory.txt
  17. 5 5
      Documentation/block/queue-sysfs.txt
  18. 27 3
      Documentation/cachetlb.txt
  19. 0 1
      Documentation/dontdiff
  20. 21 2
      Documentation/dvb/get_dvb_firmware
  21. 11 15
      Documentation/feature-removal-schedule.txt
  22. 2 1
      Documentation/filesystems/dentry-locking.txt
  23. 33 2
      Documentation/kernel-parameters.txt
  24. 0 1
      Documentation/lguest/lguest.c
  25. 4 4
      Documentation/networking/ip-sysctl.txt
  26. 118 0
      Documentation/pcmcia/locking.txt
  27. 70 0
      Documentation/powerpc/dts-bindings/fsl/mpc5121-psc.txt
  28. 7 0
      Documentation/powerpc/dts-bindings/fsl/spi.txt
  29. 134 0
      Documentation/powerpc/ptrace.txt
  30. 6 0
      Documentation/s390/CommonIO
  31. 2 2
      Documentation/s390/driver-model.txt
  32. 16 0
      Documentation/scsi/ChangeLog.megaraid_sas
  33. 26 1
      Documentation/sound/alsa/ALSA-Configuration.txt
  34. 5 0
      Documentation/sound/alsa/HD-Audio-Models.txt
  35. 27 0
      Documentation/sound/alsa/HD-Audio.txt
  36. 2 3
      Documentation/trace/ftrace-design.txt
  37. 32 25
      Documentation/trace/kprobetrace.txt
  38. 1 0
      Documentation/video4linux/CARDLIST.cx23885
  39. 1 0
      Documentation/video4linux/CARDLIST.saa7134
  40. 1 0
      Documentation/video4linux/CARDLIST.tuner
  41. 47 0
      Documentation/video4linux/README.tlg2300
  42. 18 7
      Documentation/video4linux/gspca.txt
  43. 10 96
      Documentation/video4linux/v4l2-framework.txt
  44. 360 0
      Documentation/video4linux/videobuf
  45. 7 13
      Documentation/x86/x86_64/boot-options.txt
  46. 69 18
      MAINTAINERS
  47. 1 1
      Makefile
  48. 9 17
      arch/Kconfig
  49. 1 1
      arch/alpha/include/asm/pgtable.h
  50. 3 3
      arch/alpha/kernel/pci.c
  51. 51 0
      arch/arm/Kconfig
  52. 1 0
      arch/arm/Makefile
  53. 1 5
      arch/arm/boot/compressed/Makefile
  54. 45 0
      arch/arm/boot/compressed/decompress.c
  55. 11 98
      arch/arm/boot/compressed/misc.c
  56. 7 1
      arch/arm/boot/compressed/vmlinux.lds.in
  57. 10 0
      arch/arm/common/clkdev.c
  58. 3 1
      arch/arm/common/dmabounce.c
  59. 132 133
      arch/arm/common/vic.c
  60. 1640 0
      arch/arm/configs/at572d940hfek_defconfig
  61. 3 0
      arch/arm/configs/omap_4430sdp_defconfig
  62. 228 0
      arch/arm/include/asm/atomic.h
  63. 23 46
      arch/arm/include/asm/cacheflush.h
  64. 3 0
      arch/arm/include/asm/clkdev.h
  65. 60 19
      arch/arm/include/asm/dma-mapping.h
  66. 9 2
      arch/arm/include/asm/io.h
  67. 0 8
      arch/arm/include/asm/mach/time.h
  68. 13 10
      arch/arm/include/asm/memory.h
  69. 1 0
      arch/arm/include/asm/mmu.h
  70. 15 0
      arch/arm/include/asm/mmu_context.h
  71. 4 3
      arch/arm/include/asm/page.h
  72. 31 0
      arch/arm/include/asm/perf_event.h
  73. 2 2
      arch/arm/include/asm/pgtable-nommu.h
  74. 75 0
      arch/arm/include/asm/pmu.h
  75. 0 12
      arch/arm/include/asm/setup.h
  76. 5 0
      arch/arm/include/asm/smp_plat.h
  77. 23 13
      arch/arm/include/asm/spinlock.h
  78. 1 2
      arch/arm/include/asm/system.h
  79. 2 1
      arch/arm/include/asm/thread_info.h
  80. 2 1
      arch/arm/include/asm/tlbflush.h
  81. 3 0
      arch/arm/kernel/Makefile
  82. 5 0
      arch/arm/kernel/asm-offsets.c
  83. 5 3
      arch/arm/kernel/bios32.c
  84. 115 0
      arch/arm/kernel/leds.c
  85. 2276 0
      arch/arm/kernel/perf_event.c
  86. 103 0
      arch/arm/kernel/pmu.c
  87. 35 18
      arch/arm/kernel/ptrace.c
  88. 32 48
      arch/arm/kernel/setup.c
  89. 0 178
      arch/arm/kernel/time.c
  90. 25 10
      arch/arm/kernel/traps.c
  91. 0 4
      arch/arm/kernel/vmlinux.lds.S
  92. 23 0
      arch/arm/mach-at91/Kconfig
  93. 4 0
      arch/arm/mach-at91/Makefile
  94. 377 0
      arch/arm/mach-at91/at572d940hf.c
  95. 970 0
      arch/arm/mach-at91/at572d940hf_devices.c
  96. 328 0
      arch/arm/mach-at91/board-at572d940hf_ek.c
  97. 5 3
      arch/arm/mach-at91/clock.c
  98. 1 1
      arch/arm/mach-at91/clock.h
  99. 2 0
      arch/arm/mach-at91/generic.h
  100. 123 0
      arch/arm/mach-at91/include/mach/at572d940hf.h

+ 14 - 0
Documentation/ABI/testing/sysfs-block

@@ -128,3 +128,17 @@ Description:
 		preferred request size for workloads where sustained
 		throughput is desired.  If no optimal I/O size is
 		reported this file contains 0.
+
+What:		/sys/block/<disk>/queue/nomerges
+Date:		January 2010
+Contact:
+Description:
+		Standard I/O elevator operations include attempts to
+		merge contiguous I/Os. For known random I/O loads these
+		attempts will always fail and result in extra cycles
+		being spent in the kernel. This allows one to turn off
+		this behavior on one of two ways: When set to 1, complex
+		merge checks are disabled, but the simple one-shot merges
+		with the previous I/O request are enabled. When set to 2,
+		all merge tries are disabled. The default value is 0 -
+		which enables all types of merge tries.

+ 79 - 0
Documentation/ABI/testing/sysfs-devices-power

@@ -0,0 +1,79 @@
+What:		/sys/devices/.../power/
+Date:		January 2009
+Contact:	Rafael J. Wysocki <rjw@sisk.pl>
+Description:
+		The /sys/devices/.../power directory contains attributes
+		allowing the user space to check and modify some power
+		management related properties of given device.
+
+What:		/sys/devices/.../power/wakeup
+Date:		January 2009
+Contact:	Rafael J. Wysocki <rjw@sisk.pl>
+Description:
+		The /sys/devices/.../power/wakeup attribute allows the user
+		space to check if the device is enabled to wake up the system
+		from sleep states, such as the memory sleep state (suspend to
+		RAM) and hibernation (suspend to disk), and to enable or disable
+		it to do that as desired.
+
+		Some devices support "wakeup" events, which are hardware signals
+		used to activate the system from a sleep state.  Such devices
+		have one of the following two values for the sysfs power/wakeup
+		file:
+
+		+ "enabled\n" to issue the events;
+		+ "disabled\n" not to do so;
+
+		In that cases the user space can change the setting represented
+		by the contents of this file by writing either "enabled", or
+		"disabled" to it.
+
+		For the devices that are not capable of generating system wakeup
+		events this file contains "\n".  In that cases the user space
+		cannot modify the contents of this file and the device cannot be
+		enabled to wake up the system.
+
+What:		/sys/devices/.../power/control
+Date:		January 2009
+Contact:	Rafael J. Wysocki <rjw@sisk.pl>
+Description:
+		The /sys/devices/.../power/control attribute allows the user
+		space to control the run-time power management of the device.
+
+		All devices have one of the following two values for the
+		power/control file:
+
+		+ "auto\n" to allow the device to be power managed at run time;
+		+ "on\n" to prevent the device from being power managed;
+
+		The default for all devices is "auto", which means that they may
+		be subject to automatic power management, depending on their
+		drivers.  Changing this attribute to "on" prevents the driver
+		from power managing the device at run time.  Doing that while
+		the device is suspended causes it to be woken up.
+
+What:		/sys/devices/.../power/async
+Date:		January 2009
+Contact:	Rafael J. Wysocki <rjw@sisk.pl>
+Description:
+		The /sys/devices/.../async attribute allows the user space to
+		enable or diasble the device's suspend and resume callbacks to
+		be executed asynchronously (ie. in separate threads, in parallel
+		with the main suspend/resume thread) during system-wide power
+		transitions (eg. suspend to RAM, hibernation).
+
+		All devices have one of the following two values for the
+		power/async file:
+
+		+ "enabled\n" to permit the asynchronous suspend/resume;
+		+ "disabled\n" to forbid it;
+
+		The value of this attribute may be changed by writing either
+		"enabled", or "disabled" to it.
+
+		It generally is unsafe to permit the asynchronous suspend/resume
+		of a device unless it is certain that all of the PM dependencies
+		of the device are known to the PM core.  However, for some
+		devices this attribute is set to "enabled" by bus type code or
+		device drivers and in that cases it should be safe to leave the
+		default value.

+ 13 - 0
Documentation/ABI/testing/sysfs-power

@@ -101,3 +101,16 @@ Description:
 
 		CAUTION: Using it will cause your machine's real-time (CMOS)
 		clock to be set to a random invalid time after a resume.
+
+What:		/sys/power/pm_async
+Date:		January 2009
+Contact:	Rafael J. Wysocki <rjw@sisk.pl>
+Description:
+		The /sys/power/pm_async file controls the switch allowing the
+		user space to enable or disable asynchronous suspend and resume
+		of devices.  If enabled, this feature will cause some device
+		drivers' suspend and resume callbacks to be executed in parallel
+		with each other and with the main suspend thread.  It is enabled
+		if this file contains "1", which is the default.  It may be
+		disabled by writing "0" to this file, in which case all devices
+		will be suspended and resumed synchronously.

+ 2 - 1
Documentation/DocBook/v4l/io.xml

@@ -589,7 +589,8 @@ number of a video input as in &v4l2-input; field
 	    <entry></entry>
 	    <entry>A place holder for future extensions and custom
 (driver defined) buffer types
-<constant>V4L2_BUF_TYPE_PRIVATE</constant> and higher.</entry>
+<constant>V4L2_BUF_TYPE_PRIVATE</constant> and higher. Applications
+should set this to 0.</entry>
 	  </row>
 	</tbody>
       </tgroup>

+ 23 - 17
Documentation/DocBook/v4l/vidioc-qbuf.xml

@@ -54,12 +54,10 @@ to enqueue an empty (capturing) or filled (output) buffer in the
 driver's incoming queue. The semantics depend on the selected I/O
 method.</para>
 
-    <para>To enqueue a <link linkend="mmap">memory mapped</link>
-buffer applications set the <structfield>type</structfield> field of a
-&v4l2-buffer; to the same buffer type as previously &v4l2-format;
-<structfield>type</structfield> and &v4l2-requestbuffers;
-<structfield>type</structfield>, the <structfield>memory</structfield>
-field to <constant>V4L2_MEMORY_MMAP</constant> and the
+    <para>To enqueue a buffer applications set the <structfield>type</structfield>
+field of a &v4l2-buffer; to the same buffer type as was previously used
+with &v4l2-format; <structfield>type</structfield> and &v4l2-requestbuffers;
+<structfield>type</structfield>. Applications must also set the
 <structfield>index</structfield> field. Valid index numbers range from
 zero to the number of buffers allocated with &VIDIOC-REQBUFS;
 (&v4l2-requestbuffers; <structfield>count</structfield>) minus one. The
@@ -70,8 +68,19 @@ intended for output (<structfield>type</structfield> is
 <constant>V4L2_BUF_TYPE_VBI_OUTPUT</constant>) applications must also
 initialize the <structfield>bytesused</structfield>,
 <structfield>field</structfield> and
-<structfield>timestamp</structfield> fields. See <xref
-	linkend="buffer" /> for details. When
+<structfield>timestamp</structfield> fields, see <xref
+linkend="buffer" /> for details.
+Applications must also set <structfield>flags</structfield> to 0. If a driver
+supports capturing from specific video inputs and you want to specify a video
+input, then <structfield>flags</structfield> should be set to
+<constant>V4L2_BUF_FLAG_INPUT</constant> and the field
+<structfield>input</structfield> must be initialized to the desired input.
+The <structfield>reserved</structfield> field must be set to 0.
+</para>
+
+    <para>To enqueue a <link linkend="mmap">memory mapped</link>
+buffer applications set the <structfield>memory</structfield>
+field to <constant>V4L2_MEMORY_MMAP</constant>. When
 <constant>VIDIOC_QBUF</constant> is called with a pointer to this
 structure the driver sets the
 <constant>V4L2_BUF_FLAG_MAPPED</constant> and
@@ -81,14 +90,10 @@ structure the driver sets the
 &EINVAL;.</para>
 
     <para>To enqueue a <link linkend="userp">user pointer</link>
-buffer applications set the <structfield>type</structfield> field of a
-&v4l2-buffer; to the same buffer type as previously &v4l2-format;
-<structfield>type</structfield> and &v4l2-requestbuffers;
-<structfield>type</structfield>, the <structfield>memory</structfield>
-field to <constant>V4L2_MEMORY_USERPTR</constant> and the
+buffer applications set the <structfield>memory</structfield>
+field to <constant>V4L2_MEMORY_USERPTR</constant>, the
 <structfield>m.userptr</structfield> field to the address of the
-buffer and <structfield>length</structfield> to its size. When the
-buffer is intended for output additional fields must be set as above.
+buffer and <structfield>length</structfield> to its size.
 When <constant>VIDIOC_QBUF</constant> is called with a pointer to this
 structure the driver sets the <constant>V4L2_BUF_FLAG_QUEUED</constant>
 flag and clears the <constant>V4L2_BUF_FLAG_MAPPED</constant> and
@@ -96,13 +101,14 @@ flag and clears the <constant>V4L2_BUF_FLAG_MAPPED</constant> and
 <structfield>flags</structfield> field, or it returns an error code.
 This ioctl locks the memory pages of the buffer in physical memory,
 they cannot be swapped out to disk. Buffers remain locked until
-dequeued, until the &VIDIOC-STREAMOFF; or &VIDIOC-REQBUFS; ioctl are
+dequeued, until the &VIDIOC-STREAMOFF; or &VIDIOC-REQBUFS; ioctl is
 called, or until the device is closed.</para>
 
     <para>Applications call the <constant>VIDIOC_DQBUF</constant>
 ioctl to dequeue a filled (capturing) or displayed (output) buffer
 from the driver's outgoing queue. They just set the
-<structfield>type</structfield> and <structfield>memory</structfield>
+<structfield>type</structfield>, <structfield>memory</structfield>
+and <structfield>reserved</structfield>
 fields of a &v4l2-buffer; as above, when <constant>VIDIOC_DQBUF</constant>
 is called with a pointer to this structure the driver fills the
 remaining fields or returns an error code.</para>

+ 4 - 3
Documentation/DocBook/v4l/vidioc-querybuf.xml

@@ -54,12 +54,13 @@ buffer at any time after buffers have been allocated with the
 &VIDIOC-REQBUFS; ioctl.</para>
 
     <para>Applications set the <structfield>type</structfield> field
-    of a &v4l2-buffer; to the same buffer type as previously
+    of a &v4l2-buffer; to the same buffer type as was previously used with
 &v4l2-format; <structfield>type</structfield> and &v4l2-requestbuffers;
 <structfield>type</structfield>, and the <structfield>index</structfield>
     field. Valid index numbers range from zero
 to the number of buffers allocated with &VIDIOC-REQBUFS;
     (&v4l2-requestbuffers; <structfield>count</structfield>) minus one.
+The <structfield>reserved</structfield> field should to set to 0.
 After calling <constant>VIDIOC_QUERYBUF</constant> with a pointer to
     this structure drivers return an error code or fill the rest of
 the structure.</para>
@@ -68,8 +69,8 @@ the structure.</para>
 <constant>V4L2_BUF_FLAG_MAPPED</constant>,
 <constant>V4L2_BUF_FLAG_QUEUED</constant> and
 <constant>V4L2_BUF_FLAG_DONE</constant> flags will be valid. The
-<structfield>memory</structfield> field will be set to
-<constant>V4L2_MEMORY_MMAP</constant>, the <structfield>m.offset</structfield>
+<structfield>memory</structfield> field will be set to the current
+I/O method, the <structfield>m.offset</structfield>
 contains the offset of the buffer from the start of the device memory,
 the <structfield>length</structfield> field its size. The driver may
 or may not set the remaining fields and flags, they are meaningless in

+ 13 - 23
Documentation/DocBook/v4l/vidioc-reqbufs.xml

@@ -54,23 +54,23 @@ I/O. Memory mapped buffers are located in device memory and must be
 allocated with this ioctl before they can be mapped into the
 application's address space. User buffers are allocated by
 applications themselves, and this ioctl is merely used to switch the
-driver into user pointer I/O mode.</para>
+driver into user pointer I/O mode and to setup some internal structures.</para>
 
-    <para>To allocate device buffers applications initialize three
-fields of a <structname>v4l2_requestbuffers</structname> structure.
+    <para>To allocate device buffers applications initialize all
+fields of the <structname>v4l2_requestbuffers</structname> structure.
 They set the <structfield>type</structfield> field to the respective
 stream or buffer type, the <structfield>count</structfield> field to
-the desired number of buffers, and <structfield>memory</structfield>
-must be set to <constant>V4L2_MEMORY_MMAP</constant>. When the ioctl
-is called with a pointer to this structure the driver attempts to
-allocate the requested number of buffers and stores the actual number
+the desired number of buffers, <structfield>memory</structfield>
+must be set to the requested I/O method and the reserved array
+must be zeroed. When the ioctl
+is called with a pointer to this structure the driver will attempt to allocate
+the requested number of buffers and it stores the actual number
 allocated in the <structfield>count</structfield> field. It can be
 smaller than the number requested, even zero, when the driver runs out
-of free memory. A larger number is possible when the driver requires
-more buffers to function correctly.<footnote>
-	<para>For example video output requires at least two buffers,
+of free memory. A larger number is also possible when the driver requires
+more buffers to function correctly. For example video output requires at least two buffers,
 one displayed and one filled by the application.</para>
-	</footnote> When memory mapping I/O is not supported the ioctl
+    <para>When the I/O method is not supported the ioctl
 returns an &EINVAL;.</para>
 
     <para>Applications can call <constant>VIDIOC_REQBUFS</constant>
@@ -81,14 +81,6 @@ in progress, an implicit &VIDIOC-STREAMOFF;. <!-- mhs: I see no
 reason why munmap()ping one or even all buffers must imply
 streamoff.--></para>
 
-    <para>To negotiate user pointer I/O, applications initialize only
-the <structfield>type</structfield> field and set
-<structfield>memory</structfield> to
-<constant>V4L2_MEMORY_USERPTR</constant>. When the ioctl is called
-with a pointer to this structure the driver prepares for user pointer
-I/O, when this I/O method is not supported the ioctl returns an
-&EINVAL;.</para>
-
     <table pgwide="1" frame="none" id="v4l2-requestbuffers">
       <title>struct <structname>v4l2_requestbuffers</structname></title>
       <tgroup cols="3">
@@ -97,9 +89,7 @@ I/O, when this I/O method is not supported the ioctl returns an
 	  <row>
 	    <entry>__u32</entry>
 	    <entry><structfield>count</structfield></entry>
-	    <entry>The number of buffers requested or granted. This
-field is only used when <structfield>memory</structfield> is set to
-<constant>V4L2_MEMORY_MMAP</constant>.</entry>
+	    <entry>The number of buffers requested or granted.</entry>
 	  </row>
 	  <row>
 	    <entry>&v4l2-buf-type;</entry>
@@ -120,7 +110,7 @@ as the &v4l2-format; <structfield>type</structfield> field. See <xref
 	    <entry><structfield>reserved</structfield>[2]</entry>
 	    <entry>A place holder for future extensions and custom
 (driver defined) buffer types <constant>V4L2_BUF_TYPE_PRIVATE</constant> and
-higher.</entry>
+higher. This array should be zeroed by applications.</entry>
 	  </row>
 	</tbody>
       </tgroup>

+ 8 - 2
Documentation/RCU/00-INDEX

@@ -6,16 +6,22 @@ checklist.txt
 	- Review Checklist for RCU Patches
 listRCU.txt
 	- Using RCU to Protect Read-Mostly Linked Lists
+lockdep.txt
+	- RCU and lockdep checking
 NMI-RCU.txt
 	- Using RCU to Protect Dynamic NMI Handlers
+rcubarrier.txt
+	- RCU and Unloadable Modules
+rculist_nulls.txt
+	- RCU list primitives for use with SLAB_DESTROY_BY_RCU
 rcuref.txt
 	- Reference-count design for elements of lists/arrays protected by RCU
 rcu.txt
 	- RCU Concepts
-rcubarrier.txt
-	- Unloading modules that use RCU callbacks
 RTFP.txt
 	- List of RCU papers (bibliography) going back to 1980.
+stallwarn.txt
+	- RCU CPU stall warnings (CONFIG_RCU_CPU_STALL_DETECTOR)
 torture.txt
 	- RCU Torture Test Operation (CONFIG_RCU_TORTURE_TEST)
 trace.txt

+ 57 - 4
Documentation/RCU/RTFP.txt

@@ -25,10 +25,10 @@ to be referencing the data structure.  However, this mechanism was not
 optimized for modern computer systems, which is not surprising given
 that these overheads were not so expensive in the mid-80s.  Nonetheless,
 passive serialization appears to be the first deferred-destruction
-mechanism to be used in production.  Furthermore, the relevant patent has
-lapsed, so this approach may be used in non-GPL software, if desired.
-(In contrast, use of RCU is permitted only in software licensed under
-GPL.  Sorry!!!)
+mechanism to be used in production.  Furthermore, the relevant patent
+has lapsed, so this approach may be used in non-GPL software, if desired.
+(In contrast, implementation of RCU is permitted only in software licensed
+under either GPL or LGPL.  Sorry!!!)
 
 In 1990, Pugh [Pugh90] noted that explicitly tracking which threads
 were reading a given data structure permitted deferred free to operate
@@ -150,6 +150,18 @@ preemptible RCU [PaulEMcKenney2007PreemptibleRCU], and the three-part
 LWN "What is RCU?" series [PaulEMcKenney2007WhatIsRCUFundamentally,
 PaulEMcKenney2008WhatIsRCUUsage, and PaulEMcKenney2008WhatIsRCUAPI].
 
+2008 saw a journal paper on real-time RCU [DinakarGuniguntala2008IBMSysJ],
+a history of how Linux changed RCU more than RCU changed Linux
+[PaulEMcKenney2008RCUOSR], and a design overview of hierarchical RCU
+[PaulEMcKenney2008HierarchicalRCU].
+
+2009 introduced user-level RCU algorithms [PaulEMcKenney2009MaliciousURCU],
+which Mathieu Desnoyers is now maintaining [MathieuDesnoyers2009URCU]
+[MathieuDesnoyersPhD].  TINY_RCU [PaulEMcKenney2009BloatWatchRCU] made
+its appearance, as did expedited RCU [PaulEMcKenney2009expeditedRCU].
+The problem of resizeable RCU-protected hash tables may now be on a path
+to a solution [JoshTriplett2009RPHash].
+
 Bibtex Entries
 
 @article{Kung80
@@ -730,6 +742,11 @@ Revised:
 "
 }
 
+#
+#	"What is RCU?" LWN series.
+#
+########################################################################
+
 @article{DinakarGuniguntala2008IBMSysJ
 ,author="D. Guniguntala and P. E. McKenney and J. Triplett and J. Walpole"
 ,title="The read-copy-update mechanism for supporting real-time applications on shared-memory multiprocessor systems with {Linux}"
@@ -820,3 +837,39 @@ Revised:
 	Uniprocessor assumptions allow simplified RCU implementation.
 "
 }
+
+@unpublished{PaulEMcKenney2009expeditedRCU
+,Author="Paul E. McKenney"
+,Title="[{PATCH} -tip 0/3] expedited 'big hammer' {RCU} grace periods"
+,month="June"
+,day="25"
+,year="2009"
+,note="Available:
+\url{http://lkml.org/lkml/2009/6/25/306}
+[Viewed August 16, 2009]"
+,annotation="
+	First posting of expedited RCU to be accepted into -tip.
+"
+}
+
+@unpublished{JoshTriplett2009RPHash
+,Author="Josh Triplett"
+,Title="Scalable concurrent hash tables via relativistic programming"
+,month="September"
+,year="2009"
+,note="Linux Plumbers Conference presentation"
+,annotation="
+	RP fun with hash tables.
+"
+}
+
+@phdthesis{MathieuDesnoyersPhD
+, title  = "Low-Impact Operating System Tracing"
+, author = "Mathieu Desnoyers"
+, school = "Ecole Polytechnique de Montr\'{e}al"
+, month  = "December"
+, year   = 2009
+,note="Available:
+\url{http://www.lttng.org/pub/thesis/desnoyers-dissertation-2009-12.pdf}
+[Viewed December 9, 2009]"
+}

+ 126 - 82
Documentation/RCU/checklist.txt

@@ -8,13 +8,12 @@ would cause.  This list is based on experiences reviewing such patches
 over a rather long period of time, but improvements are always welcome!
 
 0.	Is RCU being applied to a read-mostly situation?  If the data
-	structure is updated more than about 10% of the time, then
-	you should strongly consider some other approach, unless
-	detailed performance measurements show that RCU is nonetheless
-	the right tool for the job.  Yes, you might think of RCU
-	as simply cutting overhead off of the readers and imposing it
-	on the writers.  That is exactly why normal uses of RCU will
-	do much more reading than updating.
+	structure is updated more than about 10% of the time, then you
+	should strongly consider some other approach, unless detailed
+	performance measurements show that RCU is nonetheless the right
+	tool for the job.  Yes, RCU does reduce read-side overhead by
+	increasing write-side overhead, which is exactly why normal uses
+	of RCU will do much more reading than updating.
 
 	Another exception is where performance is not an issue, and RCU
 	provides a simpler implementation.  An example of this situation
@@ -35,13 +34,13 @@ over a rather long period of time, but improvements are always welcome!
 
 	If you choose #b, be prepared to describe how you have handled
 	memory barriers on weakly ordered machines (pretty much all of
-	them -- even x86 allows reads to be reordered), and be prepared
-	to explain why this added complexity is worthwhile.  If you
-	choose #c, be prepared to explain how this single task does not
-	become a major bottleneck on big multiprocessor machines (for
-	example, if the task is updating information relating to itself
-	that other tasks can read, there by definition can be no
-	bottleneck).
+	them -- even x86 allows later loads to be reordered to precede
+	earlier stores), and be prepared to explain why this added
+	complexity is worthwhile.  If you choose #c, be prepared to
+	explain how this single task does not become a major bottleneck on
+	big multiprocessor machines (for example, if the task is updating
+	information relating to itself that other tasks can read, there
+	by definition can be no bottleneck).
 
 2.	Do the RCU read-side critical sections make proper use of
 	rcu_read_lock() and friends?  These primitives are needed
@@ -51,8 +50,10 @@ over a rather long period of time, but improvements are always welcome!
 	actuarial risk of your kernel.
 
 	As a rough rule of thumb, any dereference of an RCU-protected
-	pointer must be covered by rcu_read_lock() or rcu_read_lock_bh()
-	or by the appropriate update-side lock.
+	pointer must be covered by rcu_read_lock(), rcu_read_lock_bh(),
+	rcu_read_lock_sched(), or by the appropriate update-side lock.
+	Disabling of preemption can serve as rcu_read_lock_sched(), but
+	is less readable.
 
 3.	Does the update code tolerate concurrent accesses?
 
@@ -62,25 +63,27 @@ over a rather long period of time, but improvements are always welcome!
 	of ways to handle this concurrency, depending on the situation:
 
 	a.	Use the RCU variants of the list and hlist update
-		primitives to add, remove, and replace elements on an
-		RCU-protected list.  Alternatively, use the RCU-protected
-		trees that have been added to the Linux kernel.
+		primitives to add, remove, and replace elements on
+		an RCU-protected list.	Alternatively, use the other
+		RCU-protected data structures that have been added to
+		the Linux kernel.
 
 		This is almost always the best approach.
 
 	b.	Proceed as in (a) above, but also maintain per-element
 		locks (that are acquired by both readers and writers)
 		that guard per-element state.  Of course, fields that
-		the readers refrain from accessing can be guarded by the
-		update-side lock.
+		the readers refrain from accessing can be guarded by
+		some other lock acquired only by updaters, if desired.
 
 		This works quite well, also.
 
 	c.	Make updates appear atomic to readers.  For example,
-		pointer updates to properly aligned fields will appear
-		atomic, as will individual atomic primitives.  Operations
-		performed under a lock and sequences of multiple atomic
-		primitives will -not- appear to be atomic.
+		pointer updates to properly aligned fields will
+		appear atomic, as will individual atomic primitives.
+		Sequences of perations performed under a lock will -not-
+		appear to be atomic to RCU readers, nor will sequences
+		of multiple atomic primitives.
 
 		This can work, but is starting to get a bit tricky.
 
@@ -98,9 +101,9 @@ over a rather long period of time, but improvements are always welcome!
 		a new structure containing updated values.
 
 4.	Weakly ordered CPUs pose special challenges.  Almost all CPUs
-	are weakly ordered -- even i386 CPUs allow reads to be reordered.
-	RCU code must take all of the following measures to prevent
-	memory-corruption problems:
+	are weakly ordered -- even x86 CPUs allow later loads to be
+	reordered to precede earlier stores.  RCU code must take all of
+	the following measures to prevent memory-corruption problems:
 
 	a.	Readers must maintain proper ordering of their memory
 		accesses.  The rcu_dereference() primitive ensures that
@@ -113,14 +116,25 @@ over a rather long period of time, but improvements are always welcome!
 		The rcu_dereference() primitive is also an excellent
 		documentation aid, letting the person reading the code
 		know exactly which pointers are protected by RCU.
-
-		The rcu_dereference() primitive is used by the various
-		"_rcu()" list-traversal primitives, such as the
-		list_for_each_entry_rcu().  Note that it is perfectly
-		legal (if redundant) for update-side code to use
-		rcu_dereference() and the "_rcu()" list-traversal
-		primitives.  This is particularly useful in code
-		that is common to readers and updaters.
+		Please note that compilers can also reorder code, and
+		they are becoming increasingly aggressive about doing
+		just that.  The rcu_dereference() primitive therefore
+		also prevents destructive compiler optimizations.
+
+		The rcu_dereference() primitive is used by the
+		various "_rcu()" list-traversal primitives, such
+		as the list_for_each_entry_rcu().  Note that it is
+		perfectly legal (if redundant) for update-side code to
+		use rcu_dereference() and the "_rcu()" list-traversal
+		primitives.  This is particularly useful in code that
+		is common to readers and updaters.  However, lockdep
+		will complain if you access rcu_dereference() outside
+		of an RCU read-side critical section.  See lockdep.txt
+		to learn what to do about this.
+
+		Of course, neither rcu_dereference() nor the "_rcu()"
+		list-traversal primitives can substitute for a good
+		concurrency design coordinating among multiple updaters.
 
 	b.	If the list macros are being used, the list_add_tail_rcu()
 		and list_add_rcu() primitives must be used in order
@@ -135,11 +149,14 @@ over a rather long period of time, but improvements are always welcome!
 		readers.  Similarly, if the hlist macros are being used,
 		the hlist_del_rcu() primitive is required.
 
-		The list_replace_rcu() primitive may be used to
-		replace an old structure with a new one in an
-		RCU-protected list.
+		The list_replace_rcu() and hlist_replace_rcu() primitives
+		may be used to replace an old structure with a new one
+		in their respective types of RCU-protected lists.
+
+	d.	Rules similar to (4b) and (4c) apply to the "hlist_nulls"
+		type of RCU-protected linked lists.
 
-	d.	Updates must ensure that initialization of a given
+	e.	Updates must ensure that initialization of a given
 		structure happens before pointers to that structure are
 		publicized.  Use the rcu_assign_pointer() primitive
 		when publicizing a pointer to a structure that can
@@ -151,16 +168,31 @@ over a rather long period of time, but improvements are always welcome!
 	it cannot block.
 
 6.	Since synchronize_rcu() can block, it cannot be called from
-	any sort of irq context.  Ditto for synchronize_sched() and
-	synchronize_srcu().
-
-7.	If the updater uses call_rcu(), then the corresponding readers
-	must use rcu_read_lock() and rcu_read_unlock().  If the updater
-	uses call_rcu_bh(), then the corresponding readers must use
-	rcu_read_lock_bh() and rcu_read_unlock_bh().  If the updater
-	uses call_rcu_sched(), then the corresponding readers must
-	disable preemption.  Mixing things up will result in confusion
-	and broken kernels.
+	any sort of irq context.  The same rule applies for
+	synchronize_rcu_bh(), synchronize_sched(), synchronize_srcu(),
+	synchronize_rcu_expedited(), synchronize_rcu_bh_expedited(),
+	synchronize_sched_expedite(), and synchronize_srcu_expedited().
+
+	The expedited forms of these primitives have the same semantics
+	as the non-expedited forms, but expediting is both expensive
+	and unfriendly to real-time workloads.	Use of the expedited
+	primitives should be restricted to rare configuration-change
+	operations that would not normally be undertaken while a real-time
+	workload is running.
+
+7.	If the updater uses call_rcu() or synchronize_rcu(), then the
+	corresponding readers must use rcu_read_lock() and
+	rcu_read_unlock().  If the updater uses call_rcu_bh() or
+	synchronize_rcu_bh(), then the corresponding readers must
+	use rcu_read_lock_bh() and rcu_read_unlock_bh().  If the
+	updater uses call_rcu_sched() or synchronize_sched(), then
+	the corresponding readers must disable preemption, possibly
+	by calling rcu_read_lock_sched() and rcu_read_unlock_sched().
+	If the updater uses synchronize_srcu(), the the corresponding
+	readers must use srcu_read_lock() and srcu_read_unlock(),
+	and with the same srcu_struct.	The rules for the expedited
+	primitives are the same as for their non-expedited counterparts.
+	Mixing things up will result in confusion and broken kernels.
 
 	One exception to this rule: rcu_read_lock() and rcu_read_unlock()
 	may be substituted for rcu_read_lock_bh() and rcu_read_unlock_bh()
@@ -212,6 +244,8 @@ over a rather long period of time, but improvements are always welcome!
 	e.	Periodically invoke synchronize_rcu(), permitting a limited
 		number of updates per grace period.
 
+	The same cautions apply to call_rcu_bh() and call_rcu_sched().
+
 9.	All RCU list-traversal primitives, which include
 	rcu_dereference(), list_for_each_entry_rcu(),
 	list_for_each_continue_rcu(), and list_for_each_safe_rcu(),
@@ -219,7 +253,9 @@ over a rather long period of time, but improvements are always welcome!
 	must be protected by appropriate update-side locks.  RCU
 	read-side critical sections are delimited by rcu_read_lock()
 	and rcu_read_unlock(), or by similar primitives such as
-	rcu_read_lock_bh() and rcu_read_unlock_bh().
+	rcu_read_lock_bh() and rcu_read_unlock_bh(), in which case
+	the matching rcu_dereference() primitive must be used in order
+	to keep lockdep happy, in this case, rcu_dereference_bh().
 
 	The reason that it is permissible to use RCU list-traversal
 	primitives when the update-side lock is held is that doing so
@@ -229,7 +265,8 @@ over a rather long period of time, but improvements are always welcome!
 10.	Conversely, if you are in an RCU read-side critical section,
 	and you don't hold the appropriate update-side lock, you -must-
 	use the "_rcu()" variants of the list macros.  Failing to do so
-	will break Alpha and confuse people reading your code.
+	will break Alpha, cause aggressive compilers to generate bad code,
+	and confuse people trying to read your code.
 
 11.	Note that synchronize_rcu() -only- guarantees to wait until
 	all currently executing rcu_read_lock()-protected RCU read-side
@@ -239,15 +276,21 @@ over a rather long period of time, but improvements are always welcome!
 	rcu_read_lock()-protected read-side critical sections, do -not-
 	use synchronize_rcu().
 
-	If you want to wait for some of these other things, you might
-	instead need to use synchronize_irq() or synchronize_sched().
+	Similarly, disabling preemption is not an acceptable substitute
+	for rcu_read_lock().  Code that attempts to use preemption
+	disabling where it should be using rcu_read_lock() will break
+	in real-time kernel builds.
+
+	If you want to wait for interrupt handlers, NMI handlers, and
+	code under the influence of preempt_disable(), you instead
+	need to use synchronize_irq() or synchronize_sched().
 
 12.	Any lock acquired by an RCU callback must be acquired elsewhere
 	with softirq disabled, e.g., via spin_lock_irqsave(),
 	spin_lock_bh(), etc.  Failing to disable irq on a given
-	acquisition of that lock will result in deadlock as soon as the
-	RCU callback happens to interrupt that acquisition's critical
-	section.
+	acquisition of that lock will result in deadlock as soon as
+	the RCU softirq handler happens to run your RCU callback while
+	interrupting that acquisition's critical section.
 
 13.	RCU callbacks can be and are executed in parallel.  In many cases,
 	the callback code simply wrappers around kfree(), so that this
@@ -265,29 +308,30 @@ over a rather long period of time, but improvements are always welcome!
 	not the case, a self-spawning RCU callback would prevent the
 	victim CPU from ever going offline.)
 
-14.	SRCU (srcu_read_lock(), srcu_read_unlock(), and synchronize_srcu())
-	may only be invoked from process context.  Unlike other forms of
-	RCU, it -is- permissible to block in an SRCU read-side critical
-	section (demarked by srcu_read_lock() and srcu_read_unlock()),
-	hence the "SRCU": "sleepable RCU".  Please note that if you
-	don't need to sleep in read-side critical sections, you should
-	be using RCU rather than SRCU, because RCU is almost always
-	faster and easier to use than is SRCU.
+14.	SRCU (srcu_read_lock(), srcu_read_unlock(), srcu_dereference(),
+	synchronize_srcu(), and synchronize_srcu_expedited()) may only
+	be invoked from process context.  Unlike other forms of RCU, it
+	-is- permissible to block in an SRCU read-side critical section
+	(demarked by srcu_read_lock() and srcu_read_unlock()), hence the
+	"SRCU": "sleepable RCU".  Please note that if you don't need
+	to sleep in read-side critical sections, you should be using
+	RCU rather than SRCU, because RCU is almost always faster and
+	easier to use than is SRCU.
 
 	Also unlike other forms of RCU, explicit initialization
 	and cleanup is required via init_srcu_struct() and
 	cleanup_srcu_struct().	These are passed a "struct srcu_struct"
 	that defines the scope of a given SRCU domain.	Once initialized,
 	the srcu_struct is passed to srcu_read_lock(), srcu_read_unlock()
-	and synchronize_srcu().  A given synchronize_srcu() waits only
-	for SRCU read-side critical sections governed by srcu_read_lock()
-	and srcu_read_unlock() calls that have been passd the same
-	srcu_struct.  This property is what makes sleeping read-side
-	critical sections tolerable -- a given subsystem delays only
-	its own updates, not those of other subsystems using SRCU.
-	Therefore, SRCU is less prone to OOM the system than RCU would
-	be if RCU's read-side critical sections were permitted to
-	sleep.
+	synchronize_srcu(), and synchronize_srcu_expedited().  A given
+	synchronize_srcu() waits only for SRCU read-side critical
+	sections governed by srcu_read_lock() and srcu_read_unlock()
+	calls that have been passed the same srcu_struct.  This property
+	is what makes sleeping read-side critical sections tolerable --
+	a given subsystem delays only its own updates, not those of other
+	subsystems using SRCU.	Therefore, SRCU is less prone to OOM the
+	system than RCU would be if RCU's read-side critical sections
+	were permitted to sleep.
 
 	The ability to sleep in read-side critical sections does not
 	come for free.	First, corresponding srcu_read_lock() and
@@ -311,12 +355,12 @@ over a rather long period of time, but improvements are always welcome!
 	destructive operation, and -only- -then- invoke call_rcu(),
 	synchronize_rcu(), or friends.
 
-	Because these primitives only wait for pre-existing readers,
-	it is the caller's responsibility to guarantee safety to
-	any subsequent readers.
+	Because these primitives only wait for pre-existing readers, it
+	is the caller's responsibility to guarantee that any subsequent
+	readers will execute safely.
 
-16.	The various RCU read-side primitives do -not- contain memory
-	barriers.  The CPU (and in some cases, the compiler) is free
-	to reorder code into and out of RCU read-side critical sections.
-	It is the responsibility of the RCU update-side primitives to
-	deal with this.
+16.	The various RCU read-side primitives do -not- necessarily contain
+	memory barriers.  You should therefore plan for the CPU
+	and the compiler to freely reorder code into and out of RCU
+	read-side critical sections.  It is the responsibility of the
+	RCU update-side primitives to deal with this.

+ 67 - 0
Documentation/RCU/lockdep.txt

@@ -0,0 +1,67 @@
+RCU and lockdep checking
+
+All flavors of RCU have lockdep checking available, so that lockdep is
+aware of when each task enters and leaves any flavor of RCU read-side
+critical section.  Each flavor of RCU is tracked separately (but note
+that this is not the case in 2.6.32 and earlier).  This allows lockdep's
+tracking to include RCU state, which can sometimes help when debugging
+deadlocks and the like.
+
+In addition, RCU provides the following primitives that check lockdep's
+state:
+
+	rcu_read_lock_held() for normal RCU.
+	rcu_read_lock_bh_held() for RCU-bh.
+	rcu_read_lock_sched_held() for RCU-sched.
+	srcu_read_lock_held() for SRCU.
+
+These functions are conservative, and will therefore return 1 if they
+aren't certain (for example, if CONFIG_DEBUG_LOCK_ALLOC is not set).
+This prevents things like WARN_ON(!rcu_read_lock_held()) from giving false
+positives when lockdep is disabled.
+
+In addition, a separate kernel config parameter CONFIG_PROVE_RCU enables
+checking of rcu_dereference() primitives:
+
+	rcu_dereference(p):
+		Check for RCU read-side critical section.
+	rcu_dereference_bh(p):
+		Check for RCU-bh read-side critical section.
+	rcu_dereference_sched(p):
+		Check for RCU-sched read-side critical section.
+	srcu_dereference(p, sp):
+		Check for SRCU read-side critical section.
+	rcu_dereference_check(p, c):
+		Use explicit check expression "c".
+	rcu_dereference_raw(p)
+		Don't check.  (Use sparingly, if at all.)
+
+The rcu_dereference_check() check expression can be any boolean
+expression, but would normally include one of the rcu_read_lock_held()
+family of functions and a lockdep expression.  However, any boolean
+expression can be used.  For a moderately ornate example, consider
+the following:
+
+	file = rcu_dereference_check(fdt->fd[fd],
+				     rcu_read_lock_held() ||
+				     lockdep_is_held(&files->file_lock) ||
+				     atomic_read(&files->count) == 1);
+
+This expression picks up the pointer "fdt->fd[fd]" in an RCU-safe manner,
+and, if CONFIG_PROVE_RCU is configured, verifies that this expression
+is used in:
+
+1.	An RCU read-side critical section, or
+2.	with files->file_lock held, or
+3.	on an unshared files_struct.
+
+In case (1), the pointer is picked up in an RCU-safe manner for vanilla
+RCU read-side critical sections, in case (2) the ->file_lock prevents
+any change from taking place, and finally, in case (3) the current task
+is the only task accessing the file_struct, again preventing any change
+from taking place.
+
+There are currently only "universal" versions of the rcu_assign_pointer()
+and RCU list-/tree-traversal primitives, which do not (yet) check for
+being in an RCU read-side critical section.  In the future, separate
+versions of these primitives might be created.

+ 3 - 45
Documentation/RCU/rcu.txt

@@ -75,6 +75,8 @@ o	I hear that RCU is patented?  What is with that?
 	search for the string "Patent" in RTFP.txt to find them.
 	Of these, one was allowed to lapse by the assignee, and the
 	others have been contributed to the Linux kernel under GPL.
+	There are now also LGPL implementations of user-level RCU
+	available (http://lttng.org/?q=node/18).
 
 o	I hear that RCU needs work in order to support realtime kernels?
 
@@ -91,48 +93,4 @@ o	Where can I find more information on RCU?
 
 o	What are all these files in this directory?
 
-
-	NMI-RCU.txt
-
-		Describes how to use RCU to implement dynamic
-		NMI handlers, which can be revectored on the fly,
-		without rebooting.
-
-	RTFP.txt
-
-		List of RCU-related publications and web sites.
-
-	UP.txt
-
-		Discussion of RCU usage in UP kernels.
-
-	arrayRCU.txt
-
-		Describes how to use RCU to protect arrays, with
-		resizeable arrays whose elements reference other
-		data structures being of the most interest.
-
-	checklist.txt
-
-		Lists things to check for when inspecting code that
-		uses RCU.
-
-	listRCU.txt
-
-		Describes how to use RCU to protect linked lists.
-		This is the simplest and most common use of RCU
-		in the Linux kernel.
-
-	rcu.txt
-
-		You are reading it!
-
-	rcuref.txt
-
-		Describes how to combine use of reference counts
-		with RCU.
-
-	whatisRCU.txt
-
-		Overview of how the RCU implementation works.  Along
-		the way, presents a conceptual view of RCU.
+	See 00-INDEX for the list.

+ 58 - 0
Documentation/RCU/stallwarn.txt

@@ -0,0 +1,58 @@
+Using RCU's CPU Stall Detector
+
+The CONFIG_RCU_CPU_STALL_DETECTOR kernel config parameter enables
+RCU's CPU stall detector, which detects conditions that unduly delay
+RCU grace periods.  The stall detector's idea of what constitutes
+"unduly delayed" is controlled by a pair of C preprocessor macros:
+
+RCU_SECONDS_TILL_STALL_CHECK
+
+	This macro defines the period of time that RCU will wait from
+	the beginning of a grace period until it issues an RCU CPU
+	stall warning.	It is normally ten seconds.
+
+RCU_SECONDS_TILL_STALL_RECHECK
+
+	This macro defines the period of time that RCU will wait after
+	issuing a stall warning until it issues another stall warning.
+	It is normally set to thirty seconds.
+
+RCU_STALL_RAT_DELAY
+
+	The CPU stall detector tries to make the offending CPU rat on itself,
+	as this often gives better-quality stack traces.  However, if
+	the offending CPU does not detect its own stall in the number
+	of jiffies specified by RCU_STALL_RAT_DELAY, then other CPUs will
+	complain.  This is normally set to two jiffies.
+
+The following problems can result in an RCU CPU stall warning:
+
+o	A CPU looping in an RCU read-side critical section.
+	
+o	A CPU looping with interrupts disabled.
+
+o	A CPU looping with preemption disabled.
+
+o	For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the kernel
+	without invoking schedule().
+
+o	A bug in the RCU implementation.
+
+o	A hardware failure.  This is quite unlikely, but has occurred
+	at least once in a former life.  A CPU failed in a running system,
+	becoming unresponsive, but not causing an immediate crash.
+	This resulted in a series of RCU CPU stall warnings, eventually
+	leading the realization that the CPU had failed.
+
+The RCU, RCU-sched, and RCU-bh implementations have CPU stall warning.
+SRCU does not do so directly, but its calls to synchronize_sched() will
+result in RCU-sched detecting any CPU stalls that might be occurring.
+
+To diagnose the cause of the stall, inspect the stack traces.  The offending
+function will usually be near the top of the stack.  If you have a series
+of stall warnings from a single extended stall, comparing the stack traces
+can often help determine where the stall is occurring, which will usually
+be in the function nearest the top of the stack that stays the same from
+trace to trace.
+
+RCU bugs can often be debugged with the help of CONFIG_RCU_TRACE.

+ 12 - 0
Documentation/RCU/torture.txt

@@ -30,6 +30,18 @@ MODULE PARAMETERS
 
 This module has the following parameters:
 
+fqs_duration	Duration (in microseconds) of artificially induced bursts
+		of force_quiescent_state() invocations.  In RCU
+		implementations having force_quiescent_state(), these
+		bursts help force races between forcing a given grace
+		period and that grace period ending on its own.
+
+fqs_holdoff	Holdoff time (in microseconds) between consecutive calls
+		to force_quiescent_state() within a burst.
+
+fqs_stutter	Wait time (in seconds) between consecutive bursts
+		of calls to force_quiescent_state().
+
 irqreaders	Says to invoke RCU readers from irq level.  This is currently
 		done via timers.  Defaults to "1" for variants of RCU that
 		permit this.  (Or, more accurately, variants of RCU that do

+ 10 - 6
Documentation/RCU/whatisRCU.txt

@@ -323,14 +323,17 @@ used as follows:
 	Defer			Protect
 
 a.	synchronize_rcu()	rcu_read_lock() / rcu_read_unlock()
-	call_rcu()
+	call_rcu()		rcu_dereference()
 
 b.	call_rcu_bh()		rcu_read_lock_bh() / rcu_read_unlock_bh()
+				rcu_dereference_bh()
 
-c.	synchronize_sched()	preempt_disable() / preempt_enable()
+c.	synchronize_sched()	rcu_read_lock_sched() / rcu_read_unlock_sched()
+				preempt_disable() / preempt_enable()
 				local_irq_save() / local_irq_restore()
 				hardirq enter / hardirq exit
 				NMI enter / NMI exit
+				rcu_dereference_sched()
 
 These three mechanisms are used as follows:
 
@@ -780,9 +783,8 @@ Linux-kernel source code, but it helps to have a full list of the
 APIs, since there does not appear to be a way to categorize them
 in docbook.  Here is the list, by category.
 
-RCU pointer/list traversal:
+RCU list traversal:
 
-	rcu_dereference
 	list_for_each_entry_rcu
 	hlist_for_each_entry_rcu
 	hlist_nulls_for_each_entry_rcu
@@ -808,7 +810,7 @@ RCU:	Critical sections	Grace period		Barrier
 
 	rcu_read_lock		synchronize_net		rcu_barrier
 	rcu_read_unlock		synchronize_rcu
-				synchronize_rcu_expedited
+	rcu_dereference		synchronize_rcu_expedited
 				call_rcu
 
 
@@ -816,7 +818,7 @@ bh:	Critical sections	Grace period		Barrier
 
 	rcu_read_lock_bh	call_rcu_bh		rcu_barrier_bh
 	rcu_read_unlock_bh	synchronize_rcu_bh
-				synchronize_rcu_bh_expedited
+	rcu_dereference_bh	synchronize_rcu_bh_expedited
 
 
 sched:	Critical sections	Grace period		Barrier
@@ -825,12 +827,14 @@ sched:	Critical sections	Grace period		Barrier
 	rcu_read_unlock_sched	call_rcu_sched
 	[preempt_disable]	synchronize_sched_expedited
 	[and friends]
+	rcu_dereference_sched
 
 
 SRCU:	Critical sections	Grace period		Barrier
 
 	srcu_read_lock		synchronize_srcu	N/A
 	srcu_read_unlock	synchronize_srcu_expedited
+	srcu_dereference
 
 SRCU:	Initialization/cleanup
 	init_srcu_struct

+ 5 - 1
Documentation/arm/memory.txt

@@ -59,7 +59,11 @@ PAGE_OFFSET	high_memory-1	Kernel direct-mapped RAM region.
 				This maps the platforms RAM, and typically
 				maps all platform RAM in a 1:1 relationship.
 
-TASK_SIZE	PAGE_OFFSET-1	Kernel module space
+PKMAP_BASE	PAGE_OFFSET-1	Permanent kernel mappings
+				One way of mapping HIGHMEM pages into kernel
+				space.
+
+MODULES_VADDR	MODULES_END-1	Kernel module space
 				Kernel modules inserted via insmod are
 				placed here using dynamic mappings.
 

+ 5 - 5
Documentation/block/queue-sysfs.txt

@@ -25,11 +25,11 @@ size allowed by the hardware.
 
 nomerges (RW)
 -------------
-This enables the user to disable the lookup logic involved with IO merging
-requests in the block layer. Merging may still occur through a direct
-1-hit cache, since that comes for (almost) free. The IO scheduler will not
-waste cycles doing tree/hash lookups for merges if nomerges is 1. Defaults
-to 0, enabling all merges.
+This enables the user to disable the lookup logic involved with IO
+merging requests in the block layer. By default (0) all merges are
+enabled. When set to 1 only simple one-hit merges will be tried. When
+set to 2 no merge algorithms will be tried (including one-hit or more
+complex tree/hash lookups).
 
 nr_requests (RW)
 ----------------

+ 27 - 3
Documentation/cachetlb.txt

@@ -88,12 +88,12 @@ changes occur:
 	This is used primarily during fault processing.
 
 5) void update_mmu_cache(struct vm_area_struct *vma,
-			 unsigned long address, pte_t pte)
+			 unsigned long address, pte_t *ptep)
 
 	At the end of every page fault, this routine is invoked to
 	tell the architecture specific code that a translation
-	described by "pte" now exists at virtual address "address"
-	for address space "vma->vm_mm", in the software page tables.
+	now exists at virtual address "address" for address space
+	"vma->vm_mm", in the software page tables.
 
 	A port may use this information in any way it so chooses.
 	For example, it could use this event to pre-load TLB
@@ -377,3 +377,27 @@ maps this page at its virtual address.
 	All the functionality of flush_icache_page can be implemented in
 	flush_dcache_page and update_mmu_cache. In 2.7 the hope is to
 	remove this interface completely.
+
+The final category of APIs is for I/O to deliberately aliased address
+ranges inside the kernel.  Such aliases are set up by use of the
+vmap/vmalloc API.  Since kernel I/O goes via physical pages, the I/O
+subsystem assumes that the user mapping and kernel offset mapping are
+the only aliases.  This isn't true for vmap aliases, so anything in
+the kernel trying to do I/O to vmap areas must manually manage
+coherency.  It must do this by flushing the vmap range before doing
+I/O and invalidating it after the I/O returns.
+
+  void flush_kernel_vmap_range(void *vaddr, int size)
+       flushes the kernel cache for a given virtual address range in
+       the vmap area.  This is to make sure that any data the kernel
+       modified in the vmap range is made visible to the physical
+       page.  The design is to make this area safe to perform I/O on.
+       Note that this API does *not* also flush the offset map alias
+       of the area.
+
+  void invalidate_kernel_vmap_range(void *vaddr, int size) invalidates
+       the cache for a given virtual address range in the vmap area
+       which prevents the processor from making the cache stale by
+       speculatively reading data while the I/O was occurring to the
+       physical pages.  This is only necessary for data reads into the
+       vmap area.

+ 0 - 1
Documentation/dontdiff

@@ -69,7 +69,6 @@ av_permissions.h
 bbootsect
 bin2c
 binkernel.spec
-binoffset
 bootsect
 bounds.h
 bsetup

+ 21 - 2
Documentation/dvb/get_dvb_firmware

@@ -26,7 +26,7 @@ use IO::Handle;
 		"dec3000s", "vp7041", "dibusb", "nxt2002", "nxt2004",
 		"or51211", "or51132_qam", "or51132_vsb", "bluebird",
 		"opera1", "cx231xx", "cx18", "cx23885", "pvrusb2", "mpc718",
-		"af9015");
+		"af9015", "ngene");
 
 # Check args
 syntax() if (scalar(@ARGV) != 1);
@@ -39,7 +39,7 @@ for ($i=0; $i < scalar(@components); $i++) {
 	die $@ if $@;
 	print STDERR <<EOF;
 Firmware(s) $outfile extracted successfully.
-Now copy it(they) to either /usr/lib/hotplug/firmware or /lib/firmware
+Now copy it(them) to either /usr/lib/hotplug/firmware or /lib/firmware
 (depending on configuration of firmware hotplug).
 EOF
 	exit(0);
@@ -549,6 +549,24 @@ sub af9015 {
 	close INFILE;
 }
 
+sub ngene {
+    my $url = "http://www.digitaldevices.de/download/";
+    my $file1 = "ngene_15.fw";
+    my $hash1 = "d798d5a757121174f0dbc5f2833c0c85";
+    my $file2 = "ngene_17.fw";
+    my $hash2 = "26b687136e127b8ac24b81e0eeafc20b";
+
+    checkstandard();
+
+    wgetfile($file1, $url . $file1);
+    verify($file1, $hash1);
+
+    wgetfile($file2, $url . $file2);
+    verify($file2, $hash2);
+
+    "$file1, $file2";
+}
+
 # ---------------------------------------------------------------
 # Utilities
 
@@ -667,6 +685,7 @@ sub delzero{
 sub syntax() {
     print STDERR "syntax: get_dvb_firmware <component>\n";
     print STDERR "Supported components:\n";
+    @components = sort @components;
     for($i=0; $i < scalar(@components); $i++) {
 	print STDERR "\t" . $components[$i] . "\n";
     }

+ 11 - 15
Documentation/feature-removal-schedule.txt

@@ -6,21 +6,6 @@ be removed from this file.
 
 ---------------------------
 
-What:	USER_SCHED
-When:	2.6.34
-
-Why:	USER_SCHED was implemented as a proof of concept for group scheduling.
-	The effect of USER_SCHED can already be achieved from userspace with
-	the help of libcgroup. The removal of USER_SCHED will also simplify
-	the scheduler code with the removal of one major ifdef. There are also
-	issues USER_SCHED has with USER_NS. A decision was taken not to fix
-	those and instead remove USER_SCHED. Also new group scheduling
-	features will not be implemented for USER_SCHED.
-
-Who:	Dhaval Giani <dhaval@linux.vnet.ibm.com>
-
----------------------------
-
 What:	PRISM54
 When:	2.6.34
 
@@ -64,6 +49,17 @@ Who:	Robin Getz <rgetz@blackfin.uclinux.org> & Matt Mackall <mpm@selenic.com>
 
 ---------------------------
 
+What:	Deprecated snapshot ioctls
+When:	2.6.36
+
+Why:	The ioctls in kernel/power/user.c were marked as deprecated long time
+	ago. Now they notify users about that so that they need to replace
+	their userspace. After some more time, remove them completely.
+
+Who:	Jiri Slaby <jirislaby@gmail.com>
+
+---------------------------
+
 What:	The ieee80211_regdom module parameter
 When:	March 2010 / desktop catchup
 

+ 2 - 1
Documentation/filesystems/dentry-locking.txt

@@ -62,7 +62,8 @@ changes are :
 2. Insertion of a dentry into the hash table is done using
    hlist_add_head_rcu() which take care of ordering the writes - the
    writes to the dentry must be visible before the dentry is
-   inserted. This works in conjunction with hlist_for_each_rcu() while
+   inserted. This works in conjunction with hlist_for_each_rcu(),
+   which has since been replaced by hlist_for_each_entry_rcu(), while
    walking the hash chain. The only requirement is that all
    initialization to the dentry must be done before
    hlist_add_head_rcu() since we don't have dcache_lock protection

+ 33 - 2
Documentation/kernel-parameters.txt

@@ -199,6 +199,10 @@ and is between 256 and 4096 characters. It is defined in the file
 			acpi_display_output=video
 			See above.
 
+	acpi_early_pdc_eval	[HW,ACPI] Evaluate processor _PDC methods
+				early. Needed on some platforms to properly
+				initialize the EC.
+
 	acpi_irq_balance [HW,ACPI]
 			ACPI will balance active IRQs
 			default in APIC mode
@@ -311,6 +315,11 @@ and is between 256 and 4096 characters. It is defined in the file
 	aic79xx=	[HW,SCSI]
 			See Documentation/scsi/aic79xx.txt.
 
+	alignment=	[KNL,ARM]
+			Allow the default userspace alignment fault handler
+			behaviour to be specified.  Bit 0 enables warnings,
+			bit 1 enables fixups, and bit 2 sends a segfault.
+
 	amd_iommu=	[HW,X86-84]
 			Pass parameters to the AMD IOMMU driver in the system.
 			Possible values are:
@@ -1729,6 +1738,9 @@ and is between 256 and 4096 characters. It is defined in the file
 	nomfgpt		[X86-32] Disable Multi-Function General Purpose
 			Timer usage (for AMD Geode machines).
 
+	nopat		[X86] Disable PAT (page attribute table extension of
+			pagetables) support.
+
 	norandmaps	Don't use address space randomization.  Equivalent to
 			echo 0 > /proc/sys/kernel/randomize_va_space
 
@@ -1939,8 +1951,12 @@ and is between 256 and 4096 characters. It is defined in the file
 				IRQ routing is enabled.
 		noacpi		[X86] Do not use ACPI for IRQ routing
 				or for PCI scanning.
-		use_crs		[X86] Use _CRS for PCI resource
-				allocation.
+		use_crs		[X86] Use PCI host bridge window information
+				from ACPI.  On BIOSes from 2008 or later, this
+				is enabled by default.  If you need to use this,
+				please report a bug.
+		nocrs		[X86] Ignore PCI host bridge windows from ACPI.
+			        If you need to use this, please report a bug.
 		routeirq	Do IRQ routing for all PCI devices.
 				This is normally done in pci_enable_device(),
 				so this option is a temporary workaround
@@ -1989,6 +2005,14 @@ and is between 256 and 4096 characters. It is defined in the file
 		force	Enable ASPM even on devices that claim not to support it.
 			WARNING: Forcing ASPM on may cause system lockups.
 
+	pcie_pme=	[PCIE,PM] Native PCIe PME signaling options:
+		off	Do not use native PCIe PME signaling.
+		force	Use native PCIe PME signaling even if the BIOS refuses
+			to allow the kernel to control the relevant PCIe config
+			registers.
+		nomsi	Do not use MSI for native PCIe PME signaling (this makes
+			all PCIe root ports use INTx for everything).
+
 	pcmv=		[HW,PCMCIA] BadgePAD 4
 
 	pd.		[PARIDE]
@@ -2694,6 +2718,13 @@ and is between 256 and 4096 characters. It is defined in the file
 					medium is write-protected).
 			Example: quirks=0419:aaf5:rl,0421:0433:rc
 
+	userpte=
+			[X86] Flags controlling user PTE allocations.
+
+				nohigh = do not allocate PTE pages in
+					HIGHMEM regardless of setting
+					of CONFIG_HIGHPTE.
+
 	vdso=		[X86,SH]
 			vdso=2: enable compat VDSO (default with COMPAT_VDSO)
 			vdso=1: enable VDSO (default)

+ 0 - 1
Documentation/lguest/lguest.c

@@ -34,7 +34,6 @@
 #include <sys/uio.h>
 #include <termios.h>
 #include <getopt.h>
-#include <zlib.h>
 #include <assert.h>
 #include <sched.h>
 #include <limits.h>

+ 4 - 4
Documentation/networking/ip-sysctl.txt

@@ -1074,10 +1074,10 @@ regen_max_retry - INTEGER
 	Default: 5
 
 max_addresses - INTEGER
-	Number of maximum addresses per interface.  0 disables limitation.
-	It is recommended not set too large value (or 0) because it would
-	be too easy way to crash kernel to allow to create too much of
-	autoconfigured addresses.
+	Maximum number of autoconfigured addresses per interface.  Setting
+	to zero disables the limitation.  It is not recommended to set this
+	value too large (or to zero) because it would be an easy way to
+	crash the kernel by allowing too many addresses to be created.
 	Default: 16
 
 disable_ipv6 - BOOLEAN

+ 118 - 0
Documentation/pcmcia/locking.txt

@@ -0,0 +1,118 @@
+This file explains the locking and exclusion scheme used in the PCCARD
+and PCMCIA subsystems.
+
+
+A) Overview, Locking Hierarchy:
+===============================
+
+pcmcia_socket_list_rwsem	- protects only the list of sockets
+- skt_mutex			- serializes card insert / ejection
+  - ops_mutex			- serializes socket operation
+
+
+B) Exclusion
+============
+
+The following functions and callbacks to struct pcmcia_socket must
+be called with "skt_mutex" held:
+
+	socket_detect_change()
+	send_event()
+	socket_reset()
+	socket_shutdown()
+	socket_setup()
+	socket_remove()
+	socket_insert()
+	socket_early_resume()
+	socket_late_resume()
+	socket_resume()
+	socket_suspend()
+
+	struct pcmcia_callback	*callback
+
+The following functions and callbacks to struct pcmcia_socket must
+be called with "ops_mutex" held:
+
+	socket_reset()
+	socket_setup()
+
+	struct pccard_operations	*ops
+	struct pccard_resource_ops	*resource_ops;
+
+Note that send_event() and struct pcmcia_callback *callback must not be
+called with "ops_mutex" held.
+
+
+C) Protection
+=============
+
+1. Global Data:
+---------------
+struct list_head	pcmcia_socket_list;
+
+protected by pcmcia_socket_list_rwsem;
+
+
+2. Per-Socket Data:
+-------------------
+The resource_ops and their data are protected by ops_mutex.
+
+The "main" struct pcmcia_socket is protected as follows (read-only fields
+or single-use fields not mentioned):
+
+- by pcmcia_socket_list_rwsem:
+	struct list_head	socket_list;
+
+- by thread_lock:
+	unsigned int		thread_events;
+
+- by skt_mutex:
+	u_int			suspended_state;
+	void			(*tune_bridge);
+	struct pcmcia_callback	*callback;
+	int			resume_status;
+
+- by ops_mutex:
+	socket_state_t		socket;
+	u_int			state;
+	u_short			lock_count;
+	pccard_mem_map		cis_mem;
+	void __iomem 		*cis_virt;
+	struct { }		irq;
+	io_window_t		io[];
+	pccard_mem_map		win[];
+	struct list_head	cis_cache;
+	size_t			fake_cis_len;
+	u8			*fake_cis;
+	u_int			irq_mask;
+	void 			(*zoom_video);
+	int 			(*power_hook);
+	u8			resource...;
+	struct list_head	devices_list;
+	u8			device_count;
+	struct 			pcmcia_state;
+
+
+3. Per PCMCIA-device Data:
+--------------------------
+
+The "main" struct pcmcia_devie is protected as follows (read-only fields
+or single-use fields not mentioned):
+
+
+- by pcmcia_socket->ops_mutex:
+	struct list_head	socket_device_list;
+	struct config_t		*function_config;
+	u16			_irq:1;
+	u16			_io:1;
+	u16			_win:4;
+	u16			_locked:1;
+	u16			allow_func_id_match:1;
+	u16			suspended:1;
+	u16			_removed:1;
+
+- by the PCMCIA driver:
+	io_req_t		io;
+	irq_req_t		irq;
+	config_req_t		conf;
+	window_handle_t		win;

+ 70 - 0
Documentation/powerpc/dts-bindings/fsl/mpc5121-psc.txt

@@ -0,0 +1,70 @@
+MPC5121 PSC Device Tree Bindings
+
+PSC in UART mode
+----------------
+
+For PSC in UART mode the needed PSC serial devices
+are specified by fsl,mpc5121-psc-uart nodes in the
+fsl,mpc5121-immr SoC node. Additionally the PSC FIFO
+Controller node fsl,mpc5121-psc-fifo is requered there:
+
+fsl,mpc5121-psc-uart nodes
+--------------------------
+
+Required properties :
+ - compatible : Should contain "fsl,mpc5121-psc-uart" and "fsl,mpc5121-psc"
+ - cell-index : Index of the PSC in hardware
+ - reg : Offset and length of the register set for the PSC device
+ - interrupts : <a b> where a is the interrupt number of the
+   PSC FIFO Controller and b is a field that represents an
+   encoding of the sense and level information for the interrupt.
+ - interrupt-parent : the phandle for the interrupt controller that
+   services interrupts for this device.
+
+Recommended properties :
+ - fsl,rx-fifo-size : the size of the RX fifo slice (a multiple of 4)
+ - fsl,tx-fifo-size : the size of the TX fifo slice (a multiple of 4)
+
+
+fsl,mpc5121-psc-fifo node
+-------------------------
+
+Required properties :
+ - compatible : Should be "fsl,mpc5121-psc-fifo"
+ - reg : Offset and length of the register set for the PSC
+         FIFO Controller
+ - interrupts : <a b> where a is the interrupt number of the
+   PSC FIFO Controller and b is a field that represents an
+   encoding of the sense and level information for the interrupt.
+ - interrupt-parent : the phandle for the interrupt controller that
+   services interrupts for this device.
+
+
+Example for a board using PSC0 and PSC1 devices in serial mode:
+
+serial@11000 {
+	compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
+	cell-index = <0>;
+	reg = <0x11000 0x100>;
+	interrupts = <40 0x8>;
+	interrupt-parent = < &ipic >;
+	fsl,rx-fifo-size = <16>;
+	fsl,tx-fifo-size = <16>;
+};
+
+serial@11100 {
+	compatible = "fsl,mpc5121-psc-uart", "fsl,mpc5121-psc";
+	cell-index = <1>;
+	reg = <0x11100 0x100>;
+	interrupts = <40 0x8>;
+	interrupt-parent = < &ipic >;
+	fsl,rx-fifo-size = <16>;
+	fsl,tx-fifo-size = <16>;
+};
+
+pscfifo@11f00 {
+	compatible = "fsl,mpc5121-psc-fifo";
+	reg = <0x11f00 0x100>;
+	interrupts = <40 0x8>;
+	interrupt-parent = < &ipic >;
+};

+ 7 - 0
Documentation/powerpc/dts-bindings/fsl/spi.txt

@@ -13,6 +13,11 @@ Required properties:
 - interrupt-parent : the phandle for the interrupt controller that
   services interrupts for this device.
 
+Optional properties:
+- gpios : specifies the gpio pins to be used for chipselects.
+  The gpios will be referred to as reg = <index> in the SPI child nodes.
+  If unspecified, a single SPI device without a chip select can be used.
+
 Example:
 	spi@4c0 {
 		cell-index = <0>;
@@ -21,4 +26,6 @@ Example:
 		interrupts = <82 0>;
 		interrupt-parent = <700>;
 		mode = "cpu";
+		gpios = <&gpio 18 1	// device reg=<0>
+			 &gpio 19 1>;	// device reg=<1>
 	};

+ 134 - 0
Documentation/powerpc/ptrace.txt

@@ -0,0 +1,134 @@
+GDB intends to support the following hardware debug features of BookE
+processors:
+
+4 hardware breakpoints (IAC)
+2 hardware watchpoints (read, write and read-write) (DAC)
+2 value conditions for the hardware watchpoints (DVC)
+
+For that, we need to extend ptrace so that GDB can query and set these
+resources. Since we're extending, we're trying to create an interface
+that's extendable and that covers both BookE and server processors, so
+that GDB doesn't need to special-case each of them. We added the
+following 3 new ptrace requests.
+
+1. PTRACE_PPC_GETHWDEBUGINFO
+
+Query for GDB to discover the hardware debug features. The main info to
+be returned here is the minimum alignment for the hardware watchpoints.
+BookE processors don't have restrictions here, but server processors have
+an 8-byte alignment restriction for hardware watchpoints. We'd like to avoid
+adding special cases to GDB based on what it sees in AUXV.
+
+Since we're at it, we added other useful info that the kernel can return to
+GDB: this query will return the number of hardware breakpoints, hardware
+watchpoints and whether it supports a range of addresses and a condition.
+The query will fill the following structure provided by the requesting process:
+
+struct ppc_debug_info {
+       unit32_t version;
+       unit32_t num_instruction_bps;
+       unit32_t num_data_bps;
+       unit32_t num_condition_regs;
+       unit32_t data_bp_alignment;
+       unit32_t sizeof_condition; /* size of the DVC register */
+       uint64_t features; /* bitmask of the individual flags */
+};
+
+features will have bits indicating whether there is support for:
+
+#define PPC_DEBUG_FEATURE_INSN_BP_RANGE		0x1
+#define PPC_DEBUG_FEATURE_INSN_BP_MASK		0x2
+#define PPC_DEBUG_FEATURE_DATA_BP_RANGE		0x4
+#define PPC_DEBUG_FEATURE_DATA_BP_MASK		0x8
+
+2. PTRACE_SETHWDEBUG
+
+Sets a hardware breakpoint or watchpoint, according to the provided structure:
+
+struct ppc_hw_breakpoint {
+        uint32_t version;
+#define PPC_BREAKPOINT_TRIGGER_EXECUTE  0x1
+#define PPC_BREAKPOINT_TRIGGER_READ     0x2
+#define PPC_BREAKPOINT_TRIGGER_WRITE    0x4
+        uint32_t trigger_type;       /* only some combinations allowed */
+#define PPC_BREAKPOINT_MODE_EXACT               0x0
+#define PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE     0x1
+#define PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE     0x2
+#define PPC_BREAKPOINT_MODE_MASK                0x3
+        uint32_t addr_mode;          /* address match mode */
+
+#define PPC_BREAKPOINT_CONDITION_MODE   0x3
+#define PPC_BREAKPOINT_CONDITION_NONE   0x0
+#define PPC_BREAKPOINT_CONDITION_AND    0x1
+#define PPC_BREAKPOINT_CONDITION_EXACT  0x1	/* different name for the same thing as above */
+#define PPC_BREAKPOINT_CONDITION_OR     0x2
+#define PPC_BREAKPOINT_CONDITION_AND_OR 0x3
+#define PPC_BREAKPOINT_CONDITION_BE_ALL 0x00ff0000	/* byte enable bits */
+#define PPC_BREAKPOINT_CONDITION_BE(n)  (1<<((n)+16))
+        uint32_t condition_mode;     /* break/watchpoint condition flags */
+
+        uint64_t addr;
+        uint64_t addr2;
+        uint64_t condition_value;
+};
+
+A request specifies one event, not necessarily just one register to be set.
+For instance, if the request is for a watchpoint with a condition, both the
+DAC and DVC registers will be set in the same request.
+
+With this GDB can ask for all kinds of hardware breakpoints and watchpoints
+that the BookE supports. COMEFROM breakpoints available in server processors
+are not contemplated, but that is out of the scope of this work.
+
+ptrace will return an integer (handle) uniquely identifying the breakpoint or
+watchpoint just created. This integer will be used in the PTRACE_DELHWDEBUG
+request to ask for its removal. Return -ENOSPC if the requested breakpoint
+can't be allocated on the registers.
+
+Some examples of using the structure to:
+
+- set a breakpoint in the first breakpoint register
+
+  p.version         = PPC_DEBUG_CURRENT_VERSION;
+  p.trigger_type    = PPC_BREAKPOINT_TRIGGER_EXECUTE;
+  p.addr_mode       = PPC_BREAKPOINT_MODE_EXACT;
+  p.condition_mode  = PPC_BREAKPOINT_CONDITION_NONE;
+  p.addr            = (uint64_t) address;
+  p.addr2           = 0;
+  p.condition_value = 0;
+
+- set a watchpoint which triggers on reads in the second watchpoint register
+
+  p.version         = PPC_DEBUG_CURRENT_VERSION;
+  p.trigger_type    = PPC_BREAKPOINT_TRIGGER_READ;
+  p.addr_mode       = PPC_BREAKPOINT_MODE_EXACT;
+  p.condition_mode  = PPC_BREAKPOINT_CONDITION_NONE;
+  p.addr            = (uint64_t) address;
+  p.addr2           = 0;
+  p.condition_value = 0;
+
+- set a watchpoint which triggers only with a specific value
+
+  p.version         = PPC_DEBUG_CURRENT_VERSION;
+  p.trigger_type    = PPC_BREAKPOINT_TRIGGER_READ;
+  p.addr_mode       = PPC_BREAKPOINT_MODE_EXACT;
+  p.condition_mode  = PPC_BREAKPOINT_CONDITION_AND | PPC_BREAKPOINT_CONDITION_BE_ALL;
+  p.addr            = (uint64_t) address;
+  p.addr2           = 0;
+  p.condition_value = (uint64_t) condition;
+
+- set a ranged hardware breakpoint
+
+  p.version         = PPC_DEBUG_CURRENT_VERSION;
+  p.trigger_type    = PPC_BREAKPOINT_TRIGGER_EXECUTE;
+  p.addr_mode       = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE;
+  p.condition_mode  = PPC_BREAKPOINT_CONDITION_NONE;
+  p.addr            = (uint64_t) begin_range;
+  p.addr2           = (uint64_t) end_range;
+  p.condition_value = 0;
+
+3. PTRACE_DELHWDEBUG
+
+Takes an integer which identifies an existing breakpoint or watchpoint
+(i.e., the value returned from PTRACE_SETHWDEBUG), and deletes the
+corresponding breakpoint or watchpoint..

+ 6 - 0
Documentation/s390/CommonIO

@@ -87,6 +87,12 @@ Command line parameters
   compatibility, by the device number in hexadecimal (0xabcd or abcd). Device
   numbers given as 0xabcd will be interpreted as 0.0.abcd.
 
+* /proc/cio_settle
+
+  A write request to this file is blocked until all queued cio actions are
+  handled. This will allow userspace to wait for pending work affecting
+  device availability after changing cio_ignore or the hardware configuration.
+
 * For some of the information present in the /proc filesystem in 2.4 (namely,
   /proc/subchannels and /proc/chpids), see driver-model.txt.
   Information formerly in /proc/irq_count is now in /proc/interrupts.

+ 2 - 2
Documentation/s390/driver-model.txt

@@ -223,8 +223,8 @@ touched by the driver - it should use the ccwgroup device's driver_data for its
 private data.
 
 To implement a ccwgroup driver, please refer to include/asm/ccwgroup.h. Keep in
-mind that most drivers will need to implement both a ccwgroup and a ccw driver
-(unless you have a meta ccw driver, like cu3088 for lcs and ctc).
+mind that most drivers will need to implement both a ccwgroup and a ccw
+driver.
 
 
 2. Channel paths

+ 16 - 0
Documentation/scsi/ChangeLog.megaraid_sas

@@ -1,3 +1,19 @@
+1 Release Date    : Thur.  Oct 29, 2009 09:12:45 PST 2009 -
+			(emaild-id:megaraidlinux@lsi.com)
+			Bo Yang
+
+2 Current Version : 00.00.04.17.1-rc1
+3 Older Version   : 00.00.04.12
+
+1.	Add the pad_0 in mfi frame structure to 0 to fix the
+	context value larger than 32bit value issue.
+
+2.	Add the logic drive list to the driver.  Driver will
+	keep the logic drive list internal after driver load.
+
+3.	driver fixed the device update issue after get the AEN
+	PD delete/ADD, LD add/delete from FW.
+
 1 Release Date    : Tues.  July 28, 2009 10:12:45 PST 2009 -
 			(emaild-id:megaraidlinux@lsi.com)
 			Bo Yang

+ 26 - 1
Documentation/sound/alsa/ALSA-Configuration.txt

@@ -482,6 +482,9 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
 
     reference_rate	- reference sample rate, 44100 or 48000 (default)
     multiple		- multiple to ref. sample rate, 1 or 2 (default)
+    subsystem		- override the PCI SSID for probing; the value
+			  consists of SSVID << 16 | SSDID.  The default is
+			  zero, which means no override.
 
     This module supports multiple cards.
 
@@ -1123,6 +1126,21 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
 
     This module supports multiple cards, autoprobe and ISA PnP.
 
+  Module snd-jazz16
+  -------------------
+
+    Module for Media Vision Jazz16 chipset. The chipset consists of 3 chips:
+    MVD1216 + MVA416 + MVA514.
+
+    port	- port # for SB DSP chip (0x210,0x220,0x230,0x240,0x250,0x260)
+    irq		- IRQ # for SB DSP chip (3,5,7,9,10,15)
+    dma8	- DMA # for SB DSP chip (1,3)
+    dma16	- DMA # for SB DSP chip (5,7)
+    mpu_port	- MPU-401 port # (0x300,0x310,0x320,0x330)
+    mpu_irq	- MPU-401 irq # (2,3,5,7)
+
+    This module supports multiple cards.
+
   Module snd-korg1212
   -------------------
 
@@ -1791,6 +1809,13 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
 
     The power-management is supported.
 
+  Module snd-ua101
+  ----------------
+
+    Module for the Edirol UA-101 audio/MIDI interface.
+
+    This module supports multiple devices, autoprobe and hotplugging.
+
   Module snd-usb-audio
   --------------------
 
@@ -1923,7 +1948,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
   -------------------
 
     Module for sound cards based on the Asus AV100/AV200 chips,
-    i.e., Xonar D1, DX, D2, D2X, HDAV1.3 (Deluxe), Essence ST
+    i.e., Xonar D1, DX, D2, D2X, DS, HDAV1.3 (Deluxe), Essence ST
     (Deluxe) and Essence STX.
 
     This module supports autoprobe and multiple cards.

+ 5 - 0
Documentation/sound/alsa/HD-Audio-Models.txt

@@ -124,6 +124,8 @@ ALC882/883/885/888/889
   asus-a7m	ASUS A7M
   macpro	MacPro support
   mb5		Macbook 5,1
+  macmini3	Macmini 3,1
+  mba21		Macbook Air 2,1
   mbp3		Macbook Pro rev3
   imac24	iMac 24'' with jack detection
   imac91	iMac 9,1
@@ -279,13 +281,16 @@ Conexant 5051
   laptop	Basic Laptop config (default)
   hp		HP Spartan laptop
   hp-dv6736	HP dv6736
+  hp-f700	HP Compaq Presario F700
   lenovo-x200	Lenovo X200 laptop
+  toshiba	Toshiba Satellite M300
 
 Conexant 5066
 =============
   laptop	Basic Laptop config (default)
   dell-laptop	Dell laptops
   olpc-xo-1_5	OLPC XO 1.5
+  ideapad       Lenovo IdeaPad U150
 
 STAC9200
 ========

+ 27 - 0
Documentation/sound/alsa/HD-Audio.txt

@@ -452,6 +452,33 @@ Similarly, the lines after `[verb]` are parsed as `init_verbs`
 sysfs entries, and the lines after `[hint]` are parsed as `hints`
 sysfs entries, respectively.
 
+Another example to override the codec vendor id from 0x12345678 to
+0xdeadbeef is like below:
+------------------------------------------------------------------------
+  [codec]
+  0x12345678 0xabcd1234 2
+
+  [vendor_id]
+  0xdeadbeef
+------------------------------------------------------------------------
+
+In the similar way, you can override the codec subsystem_id via
+`[subsystem_id]`, the revision id via `[revision_id]` line.
+Also, the codec chip name can be rewritten via `[chip_name]` line.
+------------------------------------------------------------------------
+  [codec]
+  0x12345678 0xabcd1234 2
+
+  [subsystem_id]
+  0xffff1111
+
+  [revision_id]
+  0x10
+
+  [chip_name]
+  My-own NEWS-0002
+------------------------------------------------------------------------
+
 The hd-audio driver reads the file via request_firmware().  Thus,
 a patch file has to be located on the appropriate firmware path,
 typically, /lib/firmware.  For example, when you pass the option

+ 2 - 3
Documentation/trace/ftrace-design.txt

@@ -238,11 +238,10 @@ HAVE_SYSCALL_TRACEPOINTS
 
 You need very few things to get the syscalls tracing in an arch.
 
+- Support HAVE_ARCH_TRACEHOOK (see arch/Kconfig).
 - Have a NR_syscalls variable in <asm/unistd.h> that provides the number
   of syscalls supported by the arch.
-- Implement arch_syscall_addr() that resolves a syscall address from a
-  syscall number.
-- Support the TIF_SYSCALL_TRACEPOINT thread flags
+- Support the TIF_SYSCALL_TRACEPOINT thread flags.
 - Put the trace_sys_enter() and trace_sys_exit() tracepoints calls from ptrace
   in the ptrace syscalls tracing path.
 - Tag this arch as HAVE_SYSCALL_TRACEPOINTS.

+ 32 - 25
Documentation/trace/kprobetrace.txt

@@ -24,6 +24,7 @@ Synopsis of kprobe_events
 -------------------------
   p[:[GRP/]EVENT] SYMBOL[+offs]|MEMADDR [FETCHARGS]	: Set a probe
   r[:[GRP/]EVENT] SYMBOL[+0] [FETCHARGS]		: Set a return probe
+  -:[GRP/]EVENT						: Clear a probe
 
  GRP		: Group name. If omitted, use "kprobes" for it.
  EVENT		: Event name. If omitted, the event name is generated
@@ -37,15 +38,12 @@ Synopsis of kprobe_events
   @SYM[+|-offs]	: Fetch memory at SYM +|- offs (SYM should be a data symbol)
   $stackN	: Fetch Nth entry of stack (N >= 0)
   $stack	: Fetch stack address.
-  $argN		: Fetch function argument. (N >= 0)(*)
-  $retval	: Fetch return value.(**)
-  +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(***)
+  $retval	: Fetch return value.(*)
+  +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**)
   NAME=FETCHARG: Set NAME as the argument name of FETCHARG.
 
-  (*) aN may not correct on asmlinkaged functions and at the middle of
-      function body.
-  (**) only for return probe.
-  (***) this is useful for fetching a field of data structures.
+  (*) only for return probe.
+  (**) this is useful for fetching a field of data structures.
 
 
 Per-Probe Event Filtering
@@ -82,13 +80,16 @@ Usage examples
 To add a probe as a new event, write a new definition to kprobe_events
 as below.
 
-  echo p:myprobe do_sys_open dfd=$arg0 filename=$arg1 flags=$arg2 mode=$arg3 > /sys/kernel/debug/tracing/kprobe_events
+  echo 'p:myprobe do_sys_open dfd=%ax filename=%dx flags=%cx mode=+4($stack)' > /sys/kernel/debug/tracing/kprobe_events
 
  This sets a kprobe on the top of do_sys_open() function with recording
-1st to 4th arguments as "myprobe" event. As this example shows, users can
-choose more familiar names for each arguments.
+1st to 4th arguments as "myprobe" event. Note, which register/stack entry is
+assigned to each function argument depends on arch-specific ABI. If you unsure
+the ABI, please try to use probe subcommand of perf-tools (you can find it
+under tools/perf/).
+As this example shows, users can choose more familiar names for each arguments.
 
-  echo r:myretprobe do_sys_open $retval >> /sys/kernel/debug/tracing/kprobe_events
+  echo 'r:myretprobe do_sys_open $retval' >> /sys/kernel/debug/tracing/kprobe_events
 
  This sets a kretprobe on the return point of do_sys_open() function with
 recording return value as "myretprobe" event.
@@ -97,23 +98,24 @@ recording return value as "myretprobe" event.
 
   cat /sys/kernel/debug/tracing/events/kprobes/myprobe/format
 name: myprobe
-ID: 75
+ID: 780
 format:
-	field:unsigned short common_type;	offset:0;	size:2;
-	field:unsigned char common_flags;	offset:2;	size:1;
-	field:unsigned char common_preempt_count;	offset:3;	size:1;
-	field:int common_pid;	offset:4;	size:4;
-	field:int common_tgid;	offset:8;	size:4;
+        field:unsigned short common_type;       offset:0;       size:2; signed:0;
+        field:unsigned char common_flags;       offset:2;       size:1; signed:0;
+        field:unsigned char common_preempt_count;       offset:3; size:1;signed:0;
+        field:int common_pid;   offset:4;       size:4; signed:1;
+        field:int common_lock_depth;    offset:8;       size:4; signed:1;
 
-	field: unsigned long ip;	offset:16;tsize:8;
-	field: int nargs;	offset:24;tsize:4;
-	field: unsigned long dfd;	offset:32;tsize:8;
-	field: unsigned long filename;	offset:40;tsize:8;
-	field: unsigned long flags;	offset:48;tsize:8;
-	field: unsigned long mode;	offset:56;tsize:8;
+        field:unsigned long __probe_ip; offset:12;      size:4; signed:0;
+        field:int __probe_nargs;        offset:16;      size:4; signed:1;
+        field:unsigned long dfd;        offset:20;      size:4; signed:0;
+        field:unsigned long filename;   offset:24;      size:4; signed:0;
+        field:unsigned long flags;      offset:28;      size:4; signed:0;
+        field:unsigned long mode;       offset:32;      size:4; signed:0;
 
-print fmt: "(%lx) dfd=%lx filename=%lx flags=%lx mode=%lx", REC->ip, REC->dfd, REC->filename, REC->flags, REC->mode
 
+print fmt: "(%lx) dfd=%lx filename=%lx flags=%lx mode=%lx", REC->__probe_ip,
+REC->dfd, REC->filename, REC->flags, REC->mode
 
  You can see that the event has 4 arguments as in the expressions you specified.
 
@@ -121,6 +123,12 @@ print fmt: "(%lx) dfd=%lx filename=%lx flags=%lx mode=%lx", REC->ip, REC->dfd, R
 
  This clears all probe points.
 
+ Or,
+
+  echo -:myprobe >> kprobe_events
+
+ This clears probe points selectively.
+
  Right after definition, each event is disabled by default. For tracing these
 events, you need to enable it.
 
@@ -146,4 +154,3 @@ events, you need to enable it.
 returns from SYMBOL(e.g. "sys_open+0x1b/0x1d <- do_sys_open" means kernel
 returns from do_sys_open to sys_open+0x1b).
 
-

+ 1 - 0
Documentation/video4linux/CARDLIST.cx23885

@@ -26,3 +26,4 @@
  25 -> Compro VideoMate E800                               [1858:e800]
  26 -> Hauppauge WinTV-HVR1290                             [0070:8551]
  27 -> Mygica X8558 PRO DMB-TH                             [14f1:8578]
+ 28 -> LEADTEK WinFast PxTV1200                            [107d:6f22]

+ 1 - 0
Documentation/video4linux/CARDLIST.saa7134

@@ -174,3 +174,4 @@
 173 -> Zolid Hybrid TV Tuner PCI                [1131:2004]
 174 -> Asus Europa Hybrid OEM                   [1043:4847]
 175 -> Leadtek Winfast DTV1000S                 [107d:6655]
+176 -> Beholder BeholdTV 505 RDS                [0000:5051]

+ 1 - 0
Documentation/video4linux/CARDLIST.tuner

@@ -81,3 +81,4 @@ tuner=80 - Philips FQ1216LME MK3 PAL/SECAM w/active loopthrough
 tuner=81 - Partsnic (Daewoo) PTI-5NF05
 tuner=82 - Philips CU1216L
 tuner=83 - NXP TDA18271
+tuner=84 - Sony BTF-Pxn01Z

+ 47 - 0
Documentation/video4linux/README.tlg2300

@@ -0,0 +1,47 @@
+tlg2300 release notes
+====================
+
+This is a v4l2/dvb device driver for the tlg2300 chip.
+
+
+current status
+==============
+
+video
+	- support mmap and read().(no overlay)
+
+audio
+	- The driver will register a ALSA card for the audio input.
+
+vbi
+	- Works for almost TV norms.
+
+dvb-t
+	- works for DVB-T
+
+FM
+	- Works for radio.
+
+---------------------------------------------------------------------------
+TESTED APPLICATIONS:
+
+-VLC1.0.4 test the video and dvb. The GUI is friendly to use.
+
+-Mplayer test the video.
+
+-Mplayer test the FM. The mplayer should be compiled with --enable-radio and
+	 --enable-radio-capture.
+	The command runs as this(The alsa audio registers to card 1):
+	#mplayer radio://103.7/capture/ -radio adevice=hw=1,0:arate=48000 \
+		-rawaudio rate=48000:channels=2
+
+---------------------------------------------------------------------------
+KNOWN PROBLEMS:
+about preemphasis:
+	You can set the preemphasis for radio by the following command:
+	#v4l2-ctl -d /dev/radio0 --set-ctrl=pre_emphasis_settings=1
+
+	"pre_emphasis_settings=1" means that you select the 50us. If you want
+	to select the 75us, please use "pre_emphasis_settings=2"
+
+

+ 18 - 7
Documentation/video4linux/gspca.txt

@@ -42,6 +42,7 @@ ov519		041e:4064	Creative Live! VISTA VF0420
 ov519		041e:4067	Creative Live! Cam Video IM (VF0350)
 ov519		041e:4068	Creative Live! VISTA VF0470
 spca561		0458:7004	Genius VideoCAM Express V2
+sn9c2028	0458:7005	Genius Smart 300, version 2
 sunplus		0458:7006	Genius Dsc 1.3 Smart
 zc3xx		0458:7007	Genius VideoCam V2
 zc3xx		0458:700c	Genius VideoCam V3
@@ -109,6 +110,7 @@ sunplus		04a5:3003	Benq DC 1300
 sunplus		04a5:3008	Benq DC 1500
 sunplus		04a5:300a	Benq DC 3410
 spca500		04a5:300c	Benq DC 1016
+benq		04a5:3035	Benq DC E300
 finepix		04cb:0104	Fujifilm FinePix 4800
 finepix		04cb:0109	Fujifilm FinePix A202
 finepix		04cb:010b	Fujifilm FinePix A203
@@ -142,6 +144,7 @@ sunplus		04fc:5360	Sunplus Generic
 spca500		04fc:7333	PalmPixDC85
 sunplus		04fc:ffff	Pure DigitalDakota
 spca501		0506:00df	3Com HomeConnect Lite
+sunplus		052b:1507	Megapixel 5 Pretec DC-1007
 sunplus		052b:1513	Megapix V4
 sunplus		052b:1803	MegaImage VI
 tv8532		0545:808b	Veo Stingray
@@ -151,6 +154,7 @@ sunplus		0546:3191	Polaroid Ion 80
 sunplus		0546:3273	Polaroid PDC2030
 ov519		054c:0154	Sonny toy4
 ov519		054c:0155	Sonny toy5
+cpia1		0553:0002	CPIA CPiA (version1) based cameras
 zc3xx		055f:c005	Mustek Wcam300A
 spca500		055f:c200	Mustek Gsmart 300
 sunplus		055f:c211	Kowa Bs888e Microcamera
@@ -188,8 +192,7 @@ spca500		06bd:0404	Agfa CL20
 spca500		06be:0800	Optimedia
 sunplus		06d6:0031	Trust 610 LCD PowerC@m Zoom
 spca506		06e1:a190	ADS Instant VCD
-ov534		06f8:3002	Hercules Blog Webcam
-ov534		06f8:3003	Hercules Dualpix HD Weblog
+ov534_9		06f8:3003	Hercules Dualpix HD Weblog
 sonixj		06f8:3004	Hercules Classic Silver
 sonixj		06f8:3008	Hercules Deluxe Optical Glass
 pac7302		06f8:3009	Hercules Classic Link
@@ -204,6 +207,7 @@ sunplus		0733:2221	Mercury Digital Pro 3.1p
 sunplus		0733:3261	Concord 3045 spca536a
 sunplus		0733:3281	Cyberpix S550V
 spca506		0734:043b	3DeMon USB Capture aka
+cpia1		0813:0001	QX3 camera
 ov519		0813:0002	Dual Mode USB Camera Plus
 spca500		084d:0003	D-Link DSC-350
 spca500		08ca:0103	Aiptek PocketDV
@@ -225,7 +229,8 @@ sunplus		08ca:2050	Medion MD 41437
 sunplus		08ca:2060	Aiptek PocketDV5300
 tv8532		0923:010f	ICM532 cams
 mars		093a:050f	Mars-Semi Pc-Camera
-mr97310a	093a:010f	Sakar Digital no. 77379
+mr97310a	093a:010e	All known CIF cams with this ID
+mr97310a	093a:010f	All known VGA cams with this ID
 pac207		093a:2460	Qtec Webcam 100
 pac207		093a:2461	HP Webcam
 pac207		093a:2463	Philips SPC 220 NC
@@ -302,6 +307,7 @@ sonixj		0c45:613b	Surfer SN-206
 sonixj		0c45:613c	Sonix Pccam168
 sonixj		0c45:6143	Sonix Pccam168
 sonixj		0c45:6148	Digitus DA-70811/ZSMC USB PC Camera ZS211/Microdia
+sonixj		0c45:614a	Frontech E-Ccam (JIL-2225)
 sn9c20x		0c45:6240	PC Camera (SN9C201 + MT9M001)
 sn9c20x		0c45:6242	PC Camera (SN9C201 + MT9M111)
 sn9c20x		0c45:6248	PC Camera (SN9C201 + OV9655)
@@ -324,6 +330,10 @@ sn9c20x		0c45:62b0	PC Camera (SN9C202 + MT9V011/MT9V111/MT9V112)
 sn9c20x		0c45:62b3	PC Camera (SN9C202 + OV9655)
 sn9c20x		0c45:62bb	PC Camera (SN9C202 + OV7660)
 sn9c20x		0c45:62bc	PC Camera (SN9C202 + HV7131R)
+sn9c2028	0c45:8001	Wild Planet Digital Spy Camera
+sn9c2028	0c45:8003	Sakar #11199, #6637x, #67480 keychain cams
+sn9c2028	0c45:8008	Mini-Shotz ms-350
+sn9c2028	0c45:800a	Vivitar Vivicam 3350B
 sunplus		0d64:0303	Sunplus FashionCam DXG
 ov519		0e96:c001	TRUST 380 USB2 SPACEC@M
 etoms		102c:6151	Qcam Sangha CIF
@@ -341,10 +351,11 @@ spca501		1776:501c	Arowana 300K CMOS Camera
 t613		17a1:0128	TASCORP JPEG Webcam, NGS Cyclops
 vc032x		17ef:4802	Lenovo Vc0323+MI1310_SOC
 pac207		2001:f115	D-Link DSB-C120
-sq905c		2770:9050	sq905c
-sq905c		2770:905c	DualCamera
-sq905		2770:9120	Argus Digital Camera DC1512
-sq905c		2770:913d	sq905c
+sq905c		2770:9050	Disney pix micro (CIF)
+sq905c		2770:9052	Disney pix micro 2 (VGA)
+sq905c		2770:905c	All 11 known cameras with this ID
+sq905		2770:9120	All 24 known cameras with this ID
+sq905c		2770:913d	All 4 known cameras with this ID
 spca500		2899:012c	Toptro Industrial
 ov519		8020:ef04	ov519
 spca508		8086:0110	Intel Easy PC Camera

+ 10 - 96
Documentation/video4linux/v4l2-framework.txt

@@ -599,99 +599,13 @@ video_device::minor fields.
 video buffer helper functions
 -----------------------------
 
-The v4l2 core API provides a standard method for dealing with video
-buffers. Those methods allow a driver to implement read(), mmap() and
-overlay() on a consistent way.
-
-There are currently methods for using video buffers on devices that
-supports DMA with scatter/gather method (videobuf-dma-sg), DMA with
-linear access (videobuf-dma-contig), and vmalloced buffers, mostly
-used on USB drivers (videobuf-vmalloc).
-
-Any driver using videobuf should provide operations (callbacks) for
-four handlers:
-
-ops->buf_setup   - calculates the size of the video buffers and avoid they
-		   to waste more than some maximum limit of RAM;
-ops->buf_prepare - fills the video buffer structs and calls
-		   videobuf_iolock() to alloc and prepare mmaped memory;
-ops->buf_queue   - advices the driver that another buffer were
-		   requested (by read() or by QBUF);
-ops->buf_release - frees any buffer that were allocated.
-
-In order to use it, the driver need to have a code (generally called at
-interrupt context) that will properly handle the buffer request lists,
-announcing that a new buffer were filled.
-
-The irq handling code should handle the videobuf task lists, in order
-to advice videobuf that a new frame were filled, in order to honor to a
-request. The code is generally like this one:
-	if (list_empty(&dma_q->active))
-		return;
-
-	buf = list_entry(dma_q->active.next, struct vbuffer, vb.queue);
-
-	if (!waitqueue_active(&buf->vb.done))
-		return;
-
-	/* Some logic to handle the buf may be needed here */
-
-	list_del(&buf->vb.queue);
-	do_gettimeofday(&buf->vb.ts);
-	wake_up(&buf->vb.done);
-
-Those are the videobuffer functions used on drivers, implemented on
-videobuf-core:
-
-- Videobuf init functions
-  videobuf_queue_sg_init()
-      Initializes the videobuf infrastructure. This function should be
-      called before any other videobuf function on drivers that uses DMA
-      Scatter/Gather buffers.
-
-  videobuf_queue_dma_contig_init
-      Initializes the videobuf infrastructure. This function should be
-      called before any other videobuf function on drivers that need DMA
-      contiguous buffers.
-
-  videobuf_queue_vmalloc_init()
-      Initializes the videobuf infrastructure. This function should be
-      called before any other videobuf function on USB (and other drivers)
-      that need a vmalloced type of videobuf.
-
-- videobuf_iolock()
-  Prepares the videobuf memory for the proper method (read, mmap, overlay).
-
-- videobuf_queue_is_busy()
-  Checks if a videobuf is streaming.
-
-- videobuf_queue_cancel()
-  Stops video handling.
-
-- videobuf_mmap_free()
-  frees mmap buffers.
-
-- videobuf_stop()
-  Stops video handling, ends mmap and frees mmap and other buffers.
-
-- V4L2 api functions. Those functions correspond to VIDIOC_foo ioctls:
-   videobuf_reqbufs(), videobuf_querybuf(), videobuf_qbuf(),
-   videobuf_dqbuf(), videobuf_streamon(), videobuf_streamoff().
-
-- V4L1 api function (corresponds to VIDIOCMBUF ioctl):
-   videobuf_cgmbuf()
-      This function is used to provide backward compatibility with V4L1
-      API.
-
-- Some help functions for read()/poll() operations:
-   videobuf_read_stream()
-      For continuous stream read()
-   videobuf_read_one()
-      For snapshot read()
-   videobuf_poll_stream()
-      polling help function
-
-The better way to understand it is to take a look at vivi driver. One
-of the main reasons for vivi is to be a videobuf usage example. the
-vivi_thread_tick() does the task that the IRQ callback would do on PCI
-drivers (or the irq callback on USB).
+The v4l2 core API provides a set of standard methods (called "videobuf")
+for dealing with video buffers. Those methods allow a driver to implement
+read(), mmap() and overlay() in a consistent way.  There are currently
+methods for using video buffers on devices that supports DMA with
+scatter/gather method (videobuf-dma-sg), DMA with linear access
+(videobuf-dma-contig), and vmalloced buffers, mostly used on USB drivers
+(videobuf-vmalloc).
+
+Please see Documentation/video4linux/videobuf for more information on how
+to use the videobuf layer.

+ 360 - 0
Documentation/video4linux/videobuf

@@ -0,0 +1,360 @@
+An introduction to the videobuf layer
+Jonathan Corbet <corbet@lwn.net>
+Current as of 2.6.33
+
+The videobuf layer functions as a sort of glue layer between a V4L2 driver
+and user space.  It handles the allocation and management of buffers for
+the storage of video frames.  There is a set of functions which can be used
+to implement many of the standard POSIX I/O system calls, including read(),
+poll(), and, happily, mmap().  Another set of functions can be used to
+implement the bulk of the V4L2 ioctl() calls related to streaming I/O,
+including buffer allocation, queueing and dequeueing, and streaming
+control.  Using videobuf imposes a few design decisions on the driver
+author, but the payback comes in the form of reduced code in the driver and
+a consistent implementation of the V4L2 user-space API.
+
+Buffer types
+
+Not all video devices use the same kind of buffers.  In fact, there are (at
+least) three common variations:
+
+ - Buffers which are scattered in both the physical and (kernel) virtual
+   address spaces.  (Almost) all user-space buffers are like this, but it
+   makes great sense to allocate kernel-space buffers this way as well when
+   it is possible.  Unfortunately, it is not always possible; working with
+   this kind of buffer normally requires hardware which can do
+   scatter/gather DMA operations.
+
+ - Buffers which are physically scattered, but which are virtually
+   contiguous; buffers allocated with vmalloc(), in other words.  These
+   buffers are just as hard to use for DMA operations, but they can be
+   useful in situations where DMA is not available but virtually-contiguous
+   buffers are convenient.
+
+ - Buffers which are physically contiguous.  Allocation of this kind of
+   buffer can be unreliable on fragmented systems, but simpler DMA
+   controllers cannot deal with anything else.
+
+Videobuf can work with all three types of buffers, but the driver author
+must pick one at the outset and design the driver around that decision.
+
+[It's worth noting that there's a fourth kind of buffer: "overlay" buffers
+which are located within the system's video memory.  The overlay
+functionality is considered to be deprecated for most use, but it still
+shows up occasionally in system-on-chip drivers where the performance
+benefits merit the use of this technique.  Overlay buffers can be handled
+as a form of scattered buffer, but there are very few implementations in
+the kernel and a description of this technique is currently beyond the
+scope of this document.]
+
+Data structures, callbacks, and initialization
+
+Depending on which type of buffers are being used, the driver should
+include one of the following files:
+
+    <media/videobuf-dma-sg.h>		/* Physically scattered */
+    <media/videobuf-vmalloc.h>		/* vmalloc() buffers	*/
+    <media/videobuf-dma-contig.h>	/* Physically contiguous */
+
+The driver's data structure describing a V4L2 device should include a
+struct videobuf_queue instance for the management of the buffer queue,
+along with a list_head for the queue of available buffers.  There will also
+need to be an interrupt-safe spinlock which is used to protect (at least)
+the queue.
+
+The next step is to write four simple callbacks to help videobuf deal with
+the management of buffers:
+
+    struct videobuf_queue_ops {
+	int (*buf_setup)(struct videobuf_queue *q,
+			 unsigned int *count, unsigned int *size);
+	int (*buf_prepare)(struct videobuf_queue *q,
+			   struct videobuf_buffer *vb,
+			   enum v4l2_field field);
+	void (*buf_queue)(struct videobuf_queue *q,
+			  struct videobuf_buffer *vb);
+	void (*buf_release)(struct videobuf_queue *q,
+			    struct videobuf_buffer *vb);
+    };
+
+buf_setup() is called early in the I/O process, when streaming is being
+initiated; its purpose is to tell videobuf about the I/O stream.  The count
+parameter will be a suggested number of buffers to use; the driver should
+check it for rationality and adjust it if need be.  As a practical rule, a
+minimum of two buffers are needed for proper streaming, and there is
+usually a maximum (which cannot exceed 32) which makes sense for each
+device.  The size parameter should be set to the expected (maximum) size
+for each frame of data.
+
+Each buffer (in the form of a struct videobuf_buffer pointer) will be
+passed to buf_prepare(), which should set the buffer's size, width, height,
+and field fields properly.  If the buffer's state field is
+VIDEOBUF_NEEDS_INIT, the driver should pass it to:
+
+    int videobuf_iolock(struct videobuf_queue* q, struct videobuf_buffer *vb,
+			struct v4l2_framebuffer *fbuf);
+
+Among other things, this call will usually allocate memory for the buffer.
+Finally, the buf_prepare() function should set the buffer's state to
+VIDEOBUF_PREPARED.
+
+When a buffer is queued for I/O, it is passed to buf_queue(), which should
+put it onto the driver's list of available buffers and set its state to
+VIDEOBUF_QUEUED.  Note that this function is called with the queue spinlock
+held; if it tries to acquire it as well things will come to a screeching
+halt.  Yes, this is the voice of experience.  Note also that videobuf may
+wait on the first buffer in the queue; placing other buffers in front of it
+could again gum up the works.  So use list_add_tail() to enqueue buffers.
+
+Finally, buf_release() is called when a buffer is no longer intended to be
+used.  The driver should ensure that there is no I/O active on the buffer,
+then pass it to the appropriate free routine(s):
+
+    /* Scatter/gather drivers */
+    int videobuf_dma_unmap(struct videobuf_queue *q,
+			   struct videobuf_dmabuf *dma);
+    int videobuf_dma_free(struct videobuf_dmabuf *dma);
+
+    /* vmalloc drivers */
+    void videobuf_vmalloc_free (struct videobuf_buffer *buf);
+
+    /* Contiguous drivers */
+    void videobuf_dma_contig_free(struct videobuf_queue *q,
+				  struct videobuf_buffer *buf);
+
+One way to ensure that a buffer is no longer under I/O is to pass it to:
+
+    int videobuf_waiton(struct videobuf_buffer *vb, int non_blocking, int intr);
+
+Here, vb is the buffer, non_blocking indicates whether non-blocking I/O
+should be used (it should be zero in the buf_release() case), and intr
+controls whether an interruptible wait is used.
+
+File operations
+
+At this point, much of the work is done; much of the rest is slipping
+videobuf calls into the implementation of the other driver callbacks.  The
+first step is in the open() function, which must initialize the
+videobuf queue.  The function to use depends on the type of buffer used:
+
+    void videobuf_queue_sg_init(struct videobuf_queue *q,
+				struct videobuf_queue_ops *ops,
+				struct device *dev,
+				spinlock_t *irqlock,
+				enum v4l2_buf_type type,
+				enum v4l2_field field,
+				unsigned int msize,
+				void *priv);
+
+    void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
+				struct videobuf_queue_ops *ops,
+				struct device *dev,
+				spinlock_t *irqlock,
+				enum v4l2_buf_type type,
+				enum v4l2_field field,
+				unsigned int msize,
+				void *priv);
+
+    void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
+				       struct videobuf_queue_ops *ops,
+				       struct device *dev,
+				       spinlock_t *irqlock,
+				       enum v4l2_buf_type type,
+				       enum v4l2_field field,
+				       unsigned int msize,
+				       void *priv);
+
+In each case, the parameters are the same: q is the queue structure for the
+device, ops is the set of callbacks as described above, dev is the device
+structure for this video device, irqlock is an interrupt-safe spinlock to
+protect access to the data structures, type is the buffer type used by the
+device (cameras will use V4L2_BUF_TYPE_VIDEO_CAPTURE, for example), field
+describes which field is being captured (often V4L2_FIELD_NONE for
+progressive devices), msize is the size of any containing structure used
+around struct videobuf_buffer, and priv is a private data pointer which
+shows up in the priv_data field of struct videobuf_queue.  Note that these
+are void functions which, evidently, are immune to failure.
+
+V4L2 capture drivers can be written to support either of two APIs: the
+read() system call and the rather more complicated streaming mechanism.  As
+a general rule, it is necessary to support both to ensure that all
+applications have a chance of working with the device.  Videobuf makes it
+easy to do that with the same code.  To implement read(), the driver need
+only make a call to one of:
+
+    ssize_t videobuf_read_one(struct videobuf_queue *q,
+			      char __user *data, size_t count,
+			      loff_t *ppos, int nonblocking);
+
+    ssize_t videobuf_read_stream(struct videobuf_queue *q,
+				 char __user *data, size_t count,
+				 loff_t *ppos, int vbihack, int nonblocking);
+
+Either one of these functions will read frame data into data, returning the
+amount actually read; the difference is that videobuf_read_one() will only
+read a single frame, while videobuf_read_stream() will read multiple frames
+if they are needed to satisfy the count requested by the application.  A
+typical driver read() implementation will start the capture engine, call
+one of the above functions, then stop the engine before returning (though a
+smarter implementation might leave the engine running for a little while in
+anticipation of another read() call happening in the near future).
+
+The poll() function can usually be implemented with a direct call to:
+
+    unsigned int videobuf_poll_stream(struct file *file,
+				      struct videobuf_queue *q,
+				      poll_table *wait);
+
+Note that the actual wait queue eventually used will be the one associated
+with the first available buffer.
+
+When streaming I/O is done to kernel-space buffers, the driver must support
+the mmap() system call to enable user space to access the data.  In many
+V4L2 drivers, the often-complex mmap() implementation simplifies to a
+single call to:
+
+    int videobuf_mmap_mapper(struct videobuf_queue *q,
+			     struct vm_area_struct *vma);
+
+Everything else is handled by the videobuf code.
+
+The release() function requires two separate videobuf calls:
+
+    void videobuf_stop(struct videobuf_queue *q);
+    int videobuf_mmap_free(struct videobuf_queue *q);
+
+The call to videobuf_stop() terminates any I/O in progress - though it is
+still up to the driver to stop the capture engine.  The call to
+videobuf_mmap_free() will ensure that all buffers have been unmapped; if
+so, they will all be passed to the buf_release() callback.  If buffers
+remain mapped, videobuf_mmap_free() returns an error code instead.  The
+purpose is clearly to cause the closing of the file descriptor to fail if
+buffers are still mapped, but every driver in the 2.6.32 kernel cheerfully
+ignores its return value.
+
+ioctl() operations
+
+The V4L2 API includes a very long list of driver callbacks to respond to
+the many ioctl() commands made available to user space.  A number of these
+- those associated with streaming I/O - turn almost directly into videobuf
+calls.  The relevant helper functions are:
+
+    int videobuf_reqbufs(struct videobuf_queue *q,
+			 struct v4l2_requestbuffers *req);
+    int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b);
+    int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b);
+    int videobuf_dqbuf(struct videobuf_queue *q, struct v4l2_buffer *b,
+		       int nonblocking);
+    int videobuf_streamon(struct videobuf_queue *q);
+    int videobuf_streamoff(struct videobuf_queue *q);
+    int videobuf_cgmbuf(struct videobuf_queue *q, struct video_mbuf *mbuf,
+			int count);
+
+So, for example, a VIDIOC_REQBUFS call turns into a call to the driver's
+vidioc_reqbufs() callback which, in turn, usually only needs to locate the
+proper struct videobuf_queue pointer and pass it to videobuf_reqbufs().
+These support functions can replace a great deal of buffer management
+boilerplate in a lot of V4L2 drivers.
+
+The vidioc_streamon() and vidioc_streamoff() functions will be a bit more
+complex, of course, since they will also need to deal with starting and
+stopping the capture engine.  videobuf_cgmbuf(), called from the driver's
+vidiocgmbuf() function, only exists if the V4L1 compatibility module has
+been selected with CONFIG_VIDEO_V4L1_COMPAT, so its use must be surrounded
+with #ifdef directives.
+
+Buffer allocation
+
+Thus far, we have talked about buffers, but have not looked at how they are
+allocated.  The scatter/gather case is the most complex on this front.  For
+allocation, the driver can leave buffer allocation entirely up to the
+videobuf layer; in this case, buffers will be allocated as anonymous
+user-space pages and will be very scattered indeed.  If the application is
+using user-space buffers, no allocation is needed; the videobuf layer will
+take care of calling get_user_pages() and filling in the scatterlist array.
+
+If the driver needs to do its own memory allocation, it should be done in
+the vidioc_reqbufs() function, *after* calling videobuf_reqbufs().  The
+first step is a call to:
+
+    struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf);
+
+The returned videobuf_dmabuf structure (defined in
+<media/videobuf-dma-sg.h>) includes a couple of relevant fields:
+
+    struct scatterlist  *sglist;
+    int                 sglen;
+
+The driver must allocate an appropriately-sized scatterlist array and
+populate it with pointers to the pieces of the allocated buffer; sglen
+should be set to the length of the array.
+
+Drivers using the vmalloc() method need not (and cannot) concern themselves
+with buffer allocation at all; videobuf will handle those details.  The
+same is normally true of contiguous-DMA drivers as well; videobuf will
+allocate the buffers (with dma_alloc_coherent()) when it sees fit.  That
+means that these drivers may be trying to do high-order allocations at any
+time, an operation which is not always guaranteed to work.  Some drivers
+play tricks by allocating DMA space at system boot time; videobuf does not
+currently play well with those drivers.
+
+As of 2.6.31, contiguous-DMA drivers can work with a user-supplied buffer,
+as long as that buffer is physically contiguous.  Normal user-space
+allocations will not meet that criterion, but buffers obtained from other
+kernel drivers, or those contained within huge pages, will work with these
+drivers.
+
+Filling the buffers
+
+The final part of a videobuf implementation has no direct callback - it's
+the portion of the code which actually puts frame data into the buffers,
+usually in response to interrupts from the device.  For all types of
+drivers, this process works approximately as follows:
+
+ - Obtain the next available buffer and make sure that somebody is actually
+   waiting for it.
+
+ - Get a pointer to the memory and put video data there.
+
+ - Mark the buffer as done and wake up the process waiting for it.
+
+Step (1) above is done by looking at the driver-managed list_head structure
+- the one which is filled in the buf_queue() callback.  Because starting
+the engine and enqueueing buffers are done in separate steps, it's possible
+for the engine to be running without any buffers available - in the
+vmalloc() case especially.  So the driver should be prepared for the list
+to be empty.  It is equally possible that nobody is yet interested in the
+buffer; the driver should not remove it from the list or fill it until a
+process is waiting on it.  That test can be done by examining the buffer's
+done field (a wait_queue_head_t structure) with waitqueue_active().
+
+A buffer's state should be set to VIDEOBUF_ACTIVE before being mapped for
+DMA; that ensures that the videobuf layer will not try to do anything with
+it while the device is transferring data.
+
+For scatter/gather drivers, the needed memory pointers will be found in the
+scatterlist structure described above.  Drivers using the vmalloc() method
+can get a memory pointer with:
+
+    void *videobuf_to_vmalloc(struct videobuf_buffer *buf);
+
+For contiguous DMA drivers, the function to use is:
+
+    dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf);
+
+The contiguous DMA API goes out of its way to hide the kernel-space address
+of the DMA buffer from drivers.
+
+The final step is to set the size field of the relevant videobuf_buffer
+structure to the actual size of the captured image, set state to
+VIDEOBUF_DONE, then call wake_up() on the done queue.  At this point, the
+buffer is owned by the videobuf layer and the driver should not touch it
+again.
+
+Developers who are interested in more information can go into the relevant
+header files; there are a few low-level functions declared there which have
+not been talked about here.  Also worthwhile is the vivi driver
+(drivers/media/video/vivi.c), which is maintained as an example of how V4L2
+drivers should be written.  Vivi only uses the vmalloc() API, but it's good
+enough to get started with.  Note also that all of these calls are exported
+GPL-only, so they will not be available to non-GPL kernel modules.

+ 7 - 13
Documentation/x86/x86_64/boot-options.txt

@@ -166,19 +166,13 @@ NUMA
 
   numa=noacpi   Don't parse the SRAT table for NUMA setup
 
-  numa=fake=CMDLINE
-		If a number, fakes CMDLINE nodes and ignores NUMA setup of the
-		actual machine.  Otherwise, system memory is configured
-		depending on the sizes and coefficients listed.  For example:
-			numa=fake=2*512,1024,4*256,*128
-		gives two 512M nodes, a 1024M node, four 256M nodes, and the
-		rest split into 128M chunks.  If the last character of CMDLINE
-		is a *, the remaining memory is divided up equally among its
-		coefficient:
-			numa=fake=2*512,2*
-		gives two 512M nodes and the rest split into two nodes.
-		Otherwise, the remaining system RAM is allocated to an
-		additional node.
+  numa=fake=<size>[MG]
+		If given as a memory unit, fills all system RAM with nodes of
+		size interleaved over physical nodes.
+
+  numa=fake=<N>
+		If given as an integer, fills all system RAM with N fake nodes
+		interleaved over physical nodes.
 
 ACPI
 

+ 69 - 18
MAINTAINERS

@@ -221,6 +221,7 @@ F:	drivers/net/acenic*
 
 ACER ASPIRE ONE TEMPERATURE AND FAN DRIVER
 M:	Peter Feuerer <peter@piie.net>
+L:	platform-driver-x86@vger.kernel.org
 W:	http://piie.net/?section=acerhdf
 S:	Maintained
 F:	drivers/platform/x86/acerhdf.c
@@ -228,6 +229,7 @@ F:	drivers/platform/x86/acerhdf.c
 ACER WMI LAPTOP EXTRAS
 M:	Carlos Corbacho <carlos@strangeworlds.co.uk>
 L:	aceracpi@googlegroups.com (subscribers-only)
+L:	platform-driver-x86@vger.kernel.org
 W:	http://code.google.com/p/aceracpi
 S:	Maintained
 F:	drivers/platform/x86/acer-wmi.c
@@ -288,7 +290,7 @@ F:	drivers/acpi/video.c
 
 ACPI WMI DRIVER
 M:	Carlos Corbacho <carlos@strangeworlds.co.uk>
-L:	linux-acpi@vger.kernel.org
+L:	platform-driver-x86@vger.kernel.org
 W:	http://www.lesswatts.org/projects/acpi/
 S:	Maintained
 F:	drivers/platform/x86/wmi.c
@@ -616,10 +618,10 @@ M:	Richard Purdie <rpurdie@rpsys.net>
 S:	Maintained
 
 ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE
-M:	Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+M:	Paulius Zaleckas <paulius.zaleckas@gmail.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:	git git://gitorious.org/linux-gemini/mainline.git
-S:	Maintained
+S:	Odd Fixes
 F:	arch/arm/mach-gemini/
 
 ARM/EBSA110 MACHINE SUPPORT
@@ -641,9 +643,9 @@ T:	topgit git://git.openezx.org/openezx.git
 F:	arch/arm/mach-pxa/ezx.c
 
 ARM/FARADAY FA526 PORT
-M:	Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+M:	Paulius Zaleckas <paulius.zaleckas@gmail.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:	Maintained
+S:	Odd Fixes
 F:	arch/arm/mm/*-fa*
 
 ARM/FOOTBRIDGE ARCHITECTURE
@@ -968,6 +970,7 @@ ASUS ACPI EXTRAS DRIVER
 M:	Corentin Chary <corentincj@iksaif.net>
 M:	Karol Kozimor <sziwan@users.sourceforge.net>
 L:	acpi4asus-user@lists.sourceforge.net
+L:	platform-driver-x86@vger.kernel.org
 W:	http://acpi4asus.sf.net
 S:	Maintained
 F:	drivers/platform/x86/asus_acpi.c
@@ -981,6 +984,7 @@ F:	drivers/hwmon/asb100.c
 ASUS LAPTOP EXTRAS DRIVER
 M:	Corentin Chary <corentincj@iksaif.net>
 L:	acpi4asus-user@lists.sourceforge.net
+L:	platform-driver-x86@vger.kernel.org
 W:	http://acpi4asus.sf.net
 S:	Maintained
 F:	drivers/platform/x86/asus-laptop.c
@@ -1473,6 +1477,7 @@ F:	drivers/scsi/fnic/
 CMPC ACPI DRIVER
 M:	Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com>
 M:	Daniel Oliveira Nascimento <don@syst.com.br>
+L:	platform-driver-x86@vger.kernel.org
 S:	Supported
 F:	drivers/platform/x86/classmate-laptop.c
 
@@ -1516,6 +1521,7 @@ F:	drivers/pci/hotplug/cpcihp_generic.c
 
 COMPAL LAPTOP SUPPORT
 M:	Cezary Jackiewicz <cezary.jackiewicz@gmail.com>
+L:	platform-driver-x86@vger.kernel.org
 S:	Maintained
 F:	drivers/platform/x86/compal-laptop.c
 
@@ -1733,10 +1739,9 @@ F:	include/linux/tfrc.h
 F:	net/dccp/
 
 DECnet NETWORK LAYER
-M:	Christine Caulfield <christine.caulfield@googlemail.com>
 W:	http://linux-decnet.sourceforge.net
 L:	linux-decnet-user@lists.sourceforge.net
-S:	Maintained
+S:	Orphan
 F:	Documentation/networking/decnet.txt
 F:	net/decnet/
 
@@ -1747,6 +1752,7 @@ F:	drivers/net/defxx.*
 
 DELL LAPTOP DRIVER
 M:	Matthew Garrett <mjg59@srcf.ucam.org>
+L:	platform-driver-x86@vger.kernel.org
 S:	Maintained
 F:	drivers/platform/x86/dell-laptop.c
 
@@ -2029,6 +2035,7 @@ F:	drivers/edac/r82600_edac.c
 EEEPC LAPTOP EXTRAS DRIVER
 M:	Corentin Chary <corentincj@iksaif.net>
 L:	acpi4asus-user@lists.sourceforge.net
+L:	platform-driver-x86@vger.kernel.org
 W:	http://acpi4asus.sf.net
 S:	Maintained
 F:	drivers/platform/x86/eeepc-laptop.c
@@ -2142,6 +2149,17 @@ S:	Supported
 F:	Documentation/fault-injection/
 F:	lib/fault-inject.c
 
+FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
+M:	Robert Love <robert.w.love@intel.com>
+L:	devel@open-fcoe.org
+W:	www.Open-FCoE.org
+S:	Supported
+F:	drivers/scsi/libfc/
+F:	drivers/scsi/fcoe/
+F:	include/scsi/fc/
+F:	include/scsi/libfc.h
+F:	include/scsi/libfcoe.h
+
 FILE LOCKING (flock() and fcntl()/lockf())
 M:	Matthew Wilcox <matthew@wil.cx>
 L:	linux-fsdevel@vger.kernel.org
@@ -2296,7 +2314,7 @@ F:	arch/frv/
 
 FUJITSU LAPTOP EXTRAS
 M:	Jonathan Woithe <jwoithe@physics.adelaide.edu.au>
-L:	linux-acpi@vger.kernel.org
+L:	platform-driver-x86@vger.kernel.org
 S:	Maintained
 F:	drivers/platform/x86/fujitsu-laptop.c
 
@@ -2394,6 +2412,18 @@ L:	linuxppc-dev@ozlabs.org
 S:	Odd Fixes
 F:	drivers/char/hvc_*
 
+VIRTIO CONSOLE DRIVER
+M:	Amit Shah <amit.shah@redhat.com>
+L:	virtualization@lists.linux-foundation.org
+S:	Maintained
+F:	drivers/char/virtio_console.c
+
+iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER
+M:	Peter Jones <pjones@redhat.com>
+M:	Konrad Rzeszutek Wilk <konrad@kernel.org>
+S:	Maintained
+F:	drivers/firmware/iscsi_ibft*
+
 GSPCA FINEPIX SUBDRIVER
 M:	Frank Zago <frank@zago.net>
 L:	linux-media@vger.kernel.org
@@ -2562,6 +2592,7 @@ F:	drivers/net/wireless/hostap/
 
 HP COMPAQ TC1100 TABLET WMI EXTRAS DRIVER
 M:	Carlos Corbacho <carlos@strangeworlds.co.uk>
+L:	platform-driver-x86@vger.kernel.org
 S:	Odd Fixes
 F:	drivers/platform/x86/tc1100-wmi.c
 
@@ -2772,7 +2803,7 @@ F:	drivers/video/i810/
 
 INTEL MENLOW THERMAL DRIVER
 M:	Sujith Thomas <sujith.thomas@intel.com>
-L:	linux-acpi@vger.kernel.org
+L:	platform-driver-x86@vger.kernel.org
 W:	http://www.lesswatts.org/projects/acpi/
 S:	Supported
 F:	drivers/platform/x86/intel_menlow.c
@@ -3490,9 +3521,9 @@ S:	Maintained
 F:	drivers/net/wireless/libertas/
 
 MARVELL MV643XX ETHERNET DRIVER
-M:	Lennert Buytenhek <buytenh@marvell.com>
+M:	Lennert Buytenhek <buytenh@wantstofly.org>
 L:	netdev@vger.kernel.org
-S:	Supported
+S:	Maintained
 F:	drivers/net/mv643xx_eth.*
 F:	include/linux/mv643xx.h
 
@@ -3638,6 +3669,7 @@ F:	drivers/char/mxser.*
 
 MSI LAPTOP SUPPORT
 M:	Lennart Poettering <mzxreary@0pointer.de>
+L:	platform-driver-x86@vger.kernel.org
 W:	https://tango.0pointer.de/mailman/listinfo/s270-linux
 W:	http://0pointer.de/lennart/tchibo.html
 S:	Maintained
@@ -3645,6 +3677,7 @@ F:	drivers/platform/x86/msi-laptop.c
 
 MSI WMI SUPPORT
 M:	Anisse Astier <anisse@astier.eu>
+L:	platform-driver-x86@vger.kernel.org
 S:	Supported
 F:	drivers/platform/x86/msi-wmi.c
 
@@ -4097,6 +4130,7 @@ F:	drivers/i2c/busses/i2c-pasemi.c
 
 PANASONIC LAPTOP ACPI EXTRAS DRIVER
 M:	Harald Welte <laforge@gnumonks.org>
+L:	platform-driver-x86@vger.kernel.org
 S:	Maintained
 F:	drivers/platform/x86/panasonic-laptop.c
 
@@ -4510,7 +4544,7 @@ F:	drivers/net/wireless/ray*
 RCUTORTURE MODULE
 M:	Josh Triplett <josh@freedesktop.org>
 M:	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
-S:	Maintained
+S:	Supported
 F:	Documentation/RCU/torture.txt
 F:	kernel/rcutorture.c
 
@@ -4535,11 +4569,12 @@ M:	Dipankar Sarma <dipankar@in.ibm.com>
 M:	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 W:	http://www.rdrop.com/users/paulmck/rclock/
 S:	Supported
-F:	Documentation/RCU/rcu.txt
-F:	Documentation/RCU/rcuref.txt
-F:	include/linux/rcupdate.h
-F:	include/linux/srcu.h
-F:	kernel/rcupdate.c
+F:	Documentation/RCU/
+F:	include/linux/rcu*
+F:	include/linux/srcu*
+F:	kernel/rcu*
+F:	kernel/srcu*
+X:	kernel/rcutorture.c
 
 REAL TIME CLOCK DRIVER
 M:	Paul Gortmaker <p_gortmaker@yahoo.com>
@@ -4677,6 +4712,13 @@ F:	drivers/media/common/saa7146*
 F:	drivers/media/video/*7146*
 F:	include/media/*7146*
 
+TLG2300 VIDEO4LINUX-2 DRIVER
+M:	Huang Shijie <shijie8@gmail.com>
+M:	Kang Yong <kangyong@telegent.com>
+M:	Zhang Xiaobing <xbzhang@telegent.com>
+S:	Supported
+F:	drivers/media/video/tlg2300
+
 SC1200 WDT DRIVER
 M:	Zwane Mwaikambo <zwane@arm.linux.org.uk>
 S:	Maintained
@@ -5035,7 +5077,7 @@ F:	include/linux/ssb/
 
 SONY VAIO CONTROL DEVICE DRIVER
 M:	Mattia Dongili <malattia@linux.it>
-L:	linux-acpi@vger.kernel.org
+L:	platform-driver-x86@vger.kernel.org
 W:	http://www.linux.it/~malattia/wiki/index.php/Sony_drivers
 S:	Maintained
 F:	Documentation/laptops/sony-laptop.txt
@@ -5241,6 +5283,7 @@ F:	arch/xtensa/
 THINKPAD ACPI EXTRAS DRIVER
 M:	Henrique de Moraes Holschuh <ibm-acpi@hmh.eng.br>
 L:	ibm-acpi-devel@lists.sourceforge.net
+L:	platform-driver-x86@vger.kernel.org
 W:	http://ibm-acpi.sourceforge.net
 W:	http://thinkwiki.org/wiki/Ibm-acpi
 T:	git git://repo.or.cz/linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git
@@ -5294,10 +5337,12 @@ F:	security/tomoyo/
 
 TOPSTAR LAPTOP EXTRAS DRIVER
 M:	Herton Ronaldo Krzesinski <herton@mandriva.com.br>
+L:	platform-driver-x86@vger.kernel.org
 S:	Maintained
 F:	drivers/platform/x86/topstar-laptop.c
 
 TOSHIBA ACPI EXTRAS DRIVER
+L:	platform-driver-x86@vger.kernel.org
 S:	Orphan
 F:	drivers/platform/x86/toshiba_acpi.c
 
@@ -6025,6 +6070,12 @@ S:	Maintained
 F:	Documentation/x86/
 F:	arch/x86/
 
+X86 PLATFORM DRIVERS
+M:	Matthew Garrett <mjg@redhat.com>
+L:	platform-driver-x86@vger.kernel.org
+S:	Maintained
+F:	drivers/platform/x86
+
 XEN HYPERVISOR INTERFACE
 M:	Jeremy Fitzhardinge <jeremy@xensource.com>
 M:	Chris Wright <chrisw@sous-sol.org>

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 33
-EXTRAVERSION = -rc8
+EXTRAVERSION =
 NAME = Man-Eating Seals of Antiquity
 
 # *DOCUMENTATION*

+ 9 - 17
arch/Kconfig

@@ -3,11 +3,9 @@
 #
 
 config OPROFILE
-	tristate "OProfile system profiling (EXPERIMENTAL)"
+	tristate "OProfile system profiling"
 	depends on PROFILING
 	depends on HAVE_OPROFILE
-	depends on TRACING_SUPPORT
-	select TRACING
 	select RING_BUFFER
 	select RING_BUFFER_ALLOW_SWAP
 	help
@@ -17,20 +15,6 @@ config OPROFILE
 
 	  If unsure, say N.
 
-config OPROFILE_IBS
-	bool "OProfile AMD IBS support (EXPERIMENTAL)"
-	default n
-	depends on OPROFILE && SMP && X86
-	help
-          Instruction-Based Sampling (IBS) is a new profiling
-          technique that provides rich, precise program performance
-          information. IBS is introduced by AMD Family10h processors
-          (AMD Opteron Quad-Core processor "Barcelona") to overcome
-          the limitations of conventional performance counter
-          sampling.
-
-	  If unsure, say N.
-
 config OPROFILE_EVENT_MULTIPLEX
 	bool "OProfile multiplexing support (EXPERIMENTAL)"
 	default n
@@ -121,6 +105,14 @@ config HAVE_DMA_ATTRS
 config USE_GENERIC_SMP_HELPERS
 	bool
 
+config HAVE_REGS_AND_STACK_ACCESS_API
+	bool
+	help
+	  This symbol should be selected by an architecure if it supports
+	  the API needed to access registers and stack entries from pt_regs,
+	  declared in asm/ptrace.h
+	  For example the kprobes-based event tracer needs this API.
+
 config HAVE_CLK
 	bool
 	help

+ 1 - 1
arch/alpha/include/asm/pgtable.h

@@ -329,7 +329,7 @@ extern pgd_t swapper_pg_dir[1024];
  * tables contain all the necessary information.
  */
 extern inline void update_mmu_cache(struct vm_area_struct * vma,
-	unsigned long address, pte_t pte)
+	unsigned long address, pte_t *ptep)
 {
 }
 

+ 3 - 3
arch/alpha/kernel/pci.c

@@ -126,8 +126,8 @@ DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);
 #define MB			(1024*KB)
 #define GB			(1024*MB)
 
-void
-pcibios_align_resource(void *data, struct resource *res,
+resource_size_t
+pcibios_align_resource(void *data, const struct resource *res,
 		       resource_size_t size, resource_size_t align)
 {
 	struct pci_dev *dev = data;
@@ -184,7 +184,7 @@ pcibios_align_resource(void *data, struct resource *res,
 		}
 	}
 
-	res->start = start;
+	return start;
 }
 #undef KB
 #undef MB

+ 51 - 0
arch/arm/Kconfig

@@ -12,6 +12,7 @@ config ARM
 	select HAVE_IDE
 	select RTC_LIB
 	select SYS_SUPPORTS_APM_EMULATION
+	select GENERIC_ATOMIC64 if (!CPU_32v6K)
 	select HAVE_OPROFILE
 	select HAVE_ARCH_KGDB
 	select HAVE_KPROBES if (!XIP_KERNEL)
@@ -20,6 +21,8 @@ config ARM
 	select HAVE_GENERIC_DMA_COHERENT
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_LZO
+	select HAVE_PERF_EVENTS
+	select PERF_USE_VMALLOC
 	help
 	  The ARM series is a line of low-power-consumption RISC chip designs
 	  licensed by ARM Ltd and targeted at embedded applications and
@@ -52,6 +55,9 @@ config HAVE_TCM
 	bool
 	select GENERIC_ALLOCATOR
 
+config HAVE_PROC_CPU
+	bool
+
 config NO_IOPORT
 	bool
 
@@ -161,6 +167,11 @@ config ARCH_MTD_XIP
 config GENERIC_HARDIRQS_NO__DO_IRQ
 	def_bool y
 
+config ARM_L1_CACHE_SHIFT_6
+	bool
+	help
+	  Setting ARM L1 cache line size to 64 Bytes.
+
 if OPROFILE
 
 config OPROFILE_ARMV6
@@ -550,10 +561,20 @@ config ARCH_W90X900
 	  <http://www.nuvoton.com/hq/enu/ProductAndSales/ProductLines/
 		ConsumerElectronicsIC/ARMMicrocontroller/ARMMicrocontroller>
 
+config ARCH_NUC93X
+	bool "Nuvoton NUC93X CPU"
+	select CPU_ARM926T
+	select HAVE_CLK
+	select COMMON_CLKDEV
+	help
+	  Support for Nuvoton (Winbond logic dept.) NUC93X MCU,The NUC93X is a
+	  low-power and high performance MPEG-4/JPEG multimedia controller chip.
+
 config ARCH_PNX4008
 	bool "Philips Nexperia PNX4008 Mobile"
 	select CPU_ARM926T
 	select HAVE_CLK
+	select COMMON_CLKDEV
 	help
 	  This enables support for Philips PNX4008 mobile platform.
 
@@ -638,6 +659,7 @@ config ARCH_S5PC1XX
 	select GENERIC_GPIO
 	select HAVE_CLK
 	select CPU_V7
+	select ARM_L1_CACHE_SHIFT_6
 	help
 	  Samsung S5PC1XX series based systems
 
@@ -785,6 +807,8 @@ source "arch/arm/plat-nomadik/Kconfig"
 
 source "arch/arm/mach-ns9xxx/Kconfig"
 
+source "arch/arm/mach-nuc93x/Kconfig"
+
 source "arch/arm/plat-omap/Kconfig"
 
 source "arch/arm/mach-omap1/Kconfig"
@@ -867,6 +891,11 @@ config XSCALE_PMU
 	depends on CPU_XSCALE && !XSCALE_PMU_TIMER
 	default y
 
+config CPU_HAS_PMU
+	depends on CPU_V6 || CPU_V7 || XSCALE_PMU
+	default y
+	bool
+
 if !MMU
 source "arch/arm/Kconfig-nommu"
 endif
@@ -921,6 +950,19 @@ config ARM_ERRATA_460075
 	  ACTLR register. Note that setting specific bits in the ACTLR register
 	  may not be available in non-secure mode.
 
+config PL310_ERRATA_588369
+	bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
+	depends on CACHE_L2X0 && ARCH_OMAP4
+	help
+	   The PL310 L2 cache controller implements three types of Clean &
+	   Invalidate maintenance operations: by Physical Address
+	   (offset 0x7F0), by Index/Way (0x7F8) and by Way (0x7FC).
+	   They are architecturally defined to behave as the execution of a
+	   clean operation followed immediately by an invalidate operation,
+	   both performing to the same memory location. This functionality
+	   is not correctly implemented in PL310 as clean lines are not
+	   invalidated as a result of these operations. Note that this errata
+	   uses Texas Instrument's secure monitor api.
 endmenu
 
 source "arch/arm/common/Kconfig"
@@ -1171,6 +1213,14 @@ config HIGHPTE
 	depends on HIGHMEM
 	depends on !OUTER_CACHE
 
+config HW_PERF_EVENTS
+	bool "Enable hardware performance counter support for perf events"
+	depends on PERF_EVENTS && CPU_HAS_PMU && (CPU_V6 || CPU_V7)
+	default y
+	help
+	  Enable hardware performance counter support for perf events. If
+	  disabled, perf events will use software events only.
+
 source "mm/Kconfig"
 
 config LEDS
@@ -1230,6 +1280,7 @@ config ALIGNMENT_TRAP
 	bool
 	depends on CPU_CP15_MMU
 	default y if !ARCH_EBSA110
+	select HAVE_PROC_CPU if PROC_FS
 	help
 	  ARM processors cannot fetch/store information which is not
 	  naturally aligned on the bus, i.e., a 4 byte fetch must start at an

+ 1 - 0
arch/arm/Makefile

@@ -171,6 +171,7 @@ machine-$(CONFIG_ARCH_U300)		:= u300
 machine-$(CONFIG_ARCH_U8500)		:= ux500
 machine-$(CONFIG_ARCH_VERSATILE)	:= versatile
 machine-$(CONFIG_ARCH_W90X900)		:= w90x900
+machine-$(CONFIG_ARCH_NUC93X)		:= nuc93x
 machine-$(CONFIG_FOOTBRIDGE)		:= footbridge
 
 # Platform directory name.  This list is sorted alphanumerically

+ 1 - 5
arch/arm/boot/compressed/Makefile

@@ -5,7 +5,7 @@
 #
 
 HEAD	= head.o
-OBJS	= misc.o
+OBJS	= misc.o decompress.o
 FONTC	= $(srctree)/drivers/video/console/font_acorn_8x8.c
 
 #
@@ -106,10 +106,6 @@ lib1funcs = $(obj)/lib1funcs.o
 $(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S FORCE
 	$(call cmd,shipped)
 
-# Don't allow any static data in misc.o, which
-# would otherwise mess up our GOT table
-CFLAGS_misc.o := -Dstatic=
-
 $(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \
 	 	$(addprefix $(obj)/, $(OBJS)) $(lib1funcs) FORCE
 	$(call if_changed,ld)

+ 45 - 0
arch/arm/boot/compressed/decompress.c

@@ -0,0 +1,45 @@
+#define _LINUX_STRING_H_
+
+#include <linux/compiler.h>	/* for inline */
+#include <linux/types.h>	/* for size_t */
+#include <linux/stddef.h>	/* for NULL */
+#include <linux/linkage.h>
+#include <asm/string.h>
+
+extern unsigned long free_mem_ptr;
+extern unsigned long free_mem_end_ptr;
+extern void error(char *);
+
+#define STATIC static
+
+#define ARCH_HAS_DECOMP_WDOG
+
+/* Diagnostic functions */
+#ifdef DEBUG
+#  define Assert(cond,msg) {if(!(cond)) error(msg);}
+#  define Trace(x) fprintf x
+#  define Tracev(x) {if (verbose) fprintf x ;}
+#  define Tracevv(x) {if (verbose>1) fprintf x ;}
+#  define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
+#  define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
+#else
+#  define Assert(cond,msg)
+#  define Trace(x)
+#  define Tracev(x)
+#  define Tracevv(x)
+#  define Tracec(c,x)
+#  define Tracecv(c,x)
+#endif
+
+#ifdef CONFIG_KERNEL_GZIP
+#include "../../../../lib/decompress_inflate.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZO
+#include "../../../../lib/decompress_unlzo.c"
+#endif
+
+void do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x))
+{
+	decompress(input, len, NULL, NULL, output, NULL, error);
+}

+ 11 - 98
arch/arm/boot/compressed/misc.c

@@ -23,8 +23,8 @@ unsigned int __machine_arch_type;
 #include <linux/compiler.h>	/* for inline */
 #include <linux/types.h>	/* for size_t */
 #include <linux/stddef.h>	/* for NULL */
-#include <asm/string.h>
 #include <linux/linkage.h>
+#include <asm/string.h>
 
 #include <asm/unaligned.h>
 
@@ -117,57 +117,7 @@ static void putstr(const char *ptr)
 
 #endif
 
-#define __ptr_t void *
-
-#define memzero(s,n) __memzero(s,n)
-
-/*
- * Optimised C version of memzero for the ARM.
- */
-void __memzero (__ptr_t s, size_t n)
-{
-	union { void *vp; unsigned long *ulp; unsigned char *ucp; } u;
-	int i;
-
-	u.vp = s;
-
-	for (i = n >> 5; i > 0; i--) {
-		*u.ulp++ = 0;
-		*u.ulp++ = 0;
-		*u.ulp++ = 0;
-		*u.ulp++ = 0;
-		*u.ulp++ = 0;
-		*u.ulp++ = 0;
-		*u.ulp++ = 0;
-		*u.ulp++ = 0;
-	}
-
-	if (n & 1 << 4) {
-		*u.ulp++ = 0;
-		*u.ulp++ = 0;
-		*u.ulp++ = 0;
-		*u.ulp++ = 0;
-	}
-
-	if (n & 1 << 3) {
-		*u.ulp++ = 0;
-		*u.ulp++ = 0;
-	}
-
-	if (n & 1 << 2)
-		*u.ulp++ = 0;
-
-	if (n & 1 << 1) {
-		*u.ucp++ = 0;
-		*u.ucp++ = 0;
-	}
-
-	if (n & 1)
-		*u.ucp++ = 0;
-}
-
-static inline __ptr_t memcpy(__ptr_t __dest, __const __ptr_t __src,
-			    size_t __n)
+void *memcpy(void *__dest, __const void *__src, size_t __n)
 {
 	int i = 0;
 	unsigned char *d = (unsigned char *)__dest, *s = (unsigned char *)__src;
@@ -204,59 +154,20 @@ static inline __ptr_t memcpy(__ptr_t __dest, __const __ptr_t __src,
 /*
  * gzip delarations
  */
-#define STATIC static
-
-/* Diagnostic functions */
-#ifdef DEBUG
-#  define Assert(cond,msg) {if(!(cond)) error(msg);}
-#  define Trace(x) fprintf x
-#  define Tracev(x) {if (verbose) fprintf x ;}
-#  define Tracevv(x) {if (verbose>1) fprintf x ;}
-#  define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
-#  define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
-#else
-#  define Assert(cond,msg)
-#  define Trace(x)
-#  define Tracev(x)
-#  define Tracevv(x)
-#  define Tracec(c,x)
-#  define Tracecv(c,x)
-#endif
-
-static void error(char *m);
-
 extern char input_data[];
 extern char input_data_end[];
 
-static unsigned char *output_data;
-static unsigned long output_ptr;
-
-static void error(char *m);
+unsigned char *output_data;
+unsigned long output_ptr;
 
-static void putstr(const char *);
-
-static unsigned long free_mem_ptr;
-static unsigned long free_mem_end_ptr;
-
-#ifdef STANDALONE_DEBUG
-#define NO_INFLATE_MALLOC
-#endif
-
-#define ARCH_HAS_DECOMP_WDOG
-
-#ifdef CONFIG_KERNEL_GZIP
-#include "../../../../lib/decompress_inflate.c"
-#endif
-
-#ifdef CONFIG_KERNEL_LZO
-#include "../../../../lib/decompress_unlzo.c"
-#endif
+unsigned long free_mem_ptr;
+unsigned long free_mem_end_ptr;
 
 #ifndef arch_error
 #define arch_error(x)
 #endif
 
-static void error(char *x)
+void error(char *x)
 {
 	arch_error(x);
 
@@ -272,6 +183,8 @@ asmlinkage void __div0(void)
 	error("Attempting division by 0!");
 }
 
+extern void do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x));
+
 #ifndef STANDALONE_DEBUG
 
 unsigned long
@@ -292,8 +205,8 @@ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p,
 	output_ptr = get_unaligned_le32(tmp);
 
 	putstr("Uncompressing Linux...");
-	decompress(input_data, input_data_end - input_data,
-			NULL, NULL, output_data, NULL, error);
+	do_decompress(input_data, input_data_end - input_data,
+			output_data, error);
 	putstr(" done, booting the kernel.\n");
 	return output_ptr;
 }

+ 7 - 1
arch/arm/boot/compressed/vmlinux.lds.in

@@ -14,6 +14,13 @@ SECTIONS
   /DISCARD/ : {
     *(.ARM.exidx*)
     *(.ARM.extab*)
+    /*
+     * Discard any r/w data - this produces a link error if we have any,
+     * which is required for PIC decompression.  Local data generates
+     * GOTOFF relocations, which prevents it being relocated independently
+     * of the text/got segments.
+     */
+    *(.data)
   }
 
   . = TEXT_START;
@@ -40,7 +47,6 @@ SECTIONS
   .got			: { *(.got) }
   _got_end = .;
   .got.plt		: { *(.got.plt) }
-  .data			: { *(.data) }
   _edata = .;
 
   . = BSS_START;

+ 10 - 0
arch/arm/common/clkdev.c

@@ -99,6 +99,16 @@ void clkdev_add(struct clk_lookup *cl)
 }
 EXPORT_SYMBOL(clkdev_add);
 
+void __init clkdev_add_table(struct clk_lookup *cl, size_t num)
+{
+	mutex_lock(&clocks_mutex);
+	while (num--) {
+		list_add_tail(&cl->node, &clocks);
+		cl++;
+	}
+	mutex_unlock(&clocks_mutex);
+}
+
 #define MAX_DEV_ID	20
 #define MAX_CON_ID	16
 

+ 3 - 1
arch/arm/common/dmabounce.c

@@ -277,7 +277,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
 		 * We don't need to sync the DMA buffer since
 		 * it was allocated via the coherent allocators.
 		 */
-		dma_cache_maint(ptr, size, dir);
+		__dma_single_cpu_to_dev(ptr, size, dir);
 	}
 
 	return dma_addr;
@@ -315,6 +315,8 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
 			__cpuc_flush_dcache_area(ptr, size);
 		}
 		free_safe_buffer(dev->archdata.dmabounce, buf);
+	} else {
+		__dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
 	}
 }
 

+ 132 - 133
arch/arm/common/vic.c

@@ -18,6 +18,7 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
+
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/io.h>
@@ -28,48 +29,6 @@
 #include <asm/mach/irq.h>
 #include <asm/hardware/vic.h>
 
-static void vic_ack_irq(unsigned int irq)
-{
-	void __iomem *base = get_irq_chip_data(irq);
-	irq &= 31;
-	writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
-	/* moreover, clear the soft-triggered, in case it was the reason */
-	writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
-}
-
-static void vic_mask_irq(unsigned int irq)
-{
-	void __iomem *base = get_irq_chip_data(irq);
-	irq &= 31;
-	writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
-}
-
-static void vic_unmask_irq(unsigned int irq)
-{
-	void __iomem *base = get_irq_chip_data(irq);
-	irq &= 31;
-	writel(1 << irq, base + VIC_INT_ENABLE);
-}
-
-/**
- * vic_init2 - common initialisation code
- * @base: Base of the VIC.
- *
- * Common initialisation code for registeration
- * and resume.
-*/
-static void vic_init2(void __iomem *base)
-{
-	int i;
-
-	for (i = 0; i < 16; i++) {
-		void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
-		writel(VIC_VECT_CNTL_ENABLE | i, reg);
-	}
-
-	writel(32, base + VIC_PL190_DEF_VECT_ADDR);
-}
-
 #if defined(CONFIG_PM)
 /**
  * struct vic_device - VIC PM device
@@ -99,13 +58,34 @@ struct vic_device {
 /* we cannot allocate memory when VICs are initially registered */
 static struct vic_device vic_devices[CONFIG_ARM_VIC_NR];
 
+static int vic_id;
+
 static inline struct vic_device *to_vic(struct sys_device *sys)
 {
 	return container_of(sys, struct vic_device, sysdev);
 }
+#endif /* CONFIG_PM */
 
-static int vic_id;
+/**
+ * vic_init2 - common initialisation code
+ * @base: Base of the VIC.
+ *
+ * Common initialisation code for registeration
+ * and resume.
+*/
+static void vic_init2(void __iomem *base)
+{
+	int i;
+
+	for (i = 0; i < 16; i++) {
+		void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
+		writel(VIC_VECT_CNTL_ENABLE | i, reg);
+	}
+
+	writel(32, base + VIC_PL190_DEF_VECT_ADDR);
+}
 
+#if defined(CONFIG_PM)
 static int vic_class_resume(struct sys_device *dev)
 {
 	struct vic_device *vic = to_vic(dev);
@@ -158,31 +138,6 @@ struct sysdev_class vic_class = {
 	.resume		= vic_class_resume,
 };
 
-/**
- * vic_pm_register - Register a VIC for later power management control
- * @base: The base address of the VIC.
- * @irq: The base IRQ for the VIC.
- * @resume_sources: bitmask of interrupts allowed for resume sources.
- *
- * Register the VIC with the system device tree so that it can be notified
- * of suspend and resume requests and ensure that the correct actions are
- * taken to re-instate the settings on resume.
- */
-static void __init vic_pm_register(void __iomem *base, unsigned int irq, u32 resume_sources)
-{
-	struct vic_device *v;
-
-	if (vic_id >= ARRAY_SIZE(vic_devices))
-		printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__);
-	else {
-		v = &vic_devices[vic_id];
-		v->base = base;
-		v->resume_sources = resume_sources;
-		v->irq = irq;
-		vic_id++;
-	}
-}
-
 /**
  * vic_pm_init - initicall to register VIC pm
  *
@@ -219,9 +174,60 @@ static int __init vic_pm_init(void)
 
 	return 0;
 }
-
 late_initcall(vic_pm_init);
 
+/**
+ * vic_pm_register - Register a VIC for later power management control
+ * @base: The base address of the VIC.
+ * @irq: The base IRQ for the VIC.
+ * @resume_sources: bitmask of interrupts allowed for resume sources.
+ *
+ * Register the VIC with the system device tree so that it can be notified
+ * of suspend and resume requests and ensure that the correct actions are
+ * taken to re-instate the settings on resume.
+ */
+static void __init vic_pm_register(void __iomem *base, unsigned int irq, u32 resume_sources)
+{
+	struct vic_device *v;
+
+	if (vic_id >= ARRAY_SIZE(vic_devices))
+		printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__);
+	else {
+		v = &vic_devices[vic_id];
+		v->base = base;
+		v->resume_sources = resume_sources;
+		v->irq = irq;
+		vic_id++;
+	}
+}
+#else
+static inline void vic_pm_register(void __iomem *base, unsigned int irq, u32 arg1) { }
+#endif /* CONFIG_PM */
+
+static void vic_ack_irq(unsigned int irq)
+{
+	void __iomem *base = get_irq_chip_data(irq);
+	irq &= 31;
+	writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
+	/* moreover, clear the soft-triggered, in case it was the reason */
+	writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
+}
+
+static void vic_mask_irq(unsigned int irq)
+{
+	void __iomem *base = get_irq_chip_data(irq);
+	irq &= 31;
+	writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
+}
+
+static void vic_unmask_irq(unsigned int irq)
+{
+	void __iomem *base = get_irq_chip_data(irq);
+	irq &= 31;
+	writel(1 << irq, base + VIC_INT_ENABLE);
+}
+
+#if defined(CONFIG_PM)
 static struct vic_device *vic_from_irq(unsigned int irq)
 {
         struct vic_device *v = vic_devices;
@@ -255,10 +261,7 @@ static int vic_set_wake(unsigned int irq, unsigned int on)
 
 	return 0;
 }
-
 #else
-static inline void vic_pm_register(void __iomem *base, unsigned int irq, u32 arg1) { }
-
 #define vic_set_wake NULL
 #endif /* CONFIG_PM */
 
@@ -270,9 +273,62 @@ static struct irq_chip vic_chip = {
 	.set_wake = vic_set_wake,
 };
 
-/* The PL190 cell from ARM has been modified by ST, so handle both here */
-static void vik_init_st(void __iomem *base, unsigned int irq_start,
-			 u32 vic_sources);
+/*
+ * The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
+ * The original cell has 32 interrupts, while the modified one has 64,
+ * replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case
+ * the probe function is called twice, with base set to offset 000
+ *  and 020 within the page. We call this "second block".
+ */
+static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
+				u32 vic_sources)
+{
+	unsigned int i;
+	int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0;
+
+	/* Disable all interrupts initially. */
+
+	writel(0, base + VIC_INT_SELECT);
+	writel(0, base + VIC_INT_ENABLE);
+	writel(~0, base + VIC_INT_ENABLE_CLEAR);
+	writel(0, base + VIC_IRQ_STATUS);
+	writel(0, base + VIC_ITCR);
+	writel(~0, base + VIC_INT_SOFT_CLEAR);
+
+	/*
+	 * Make sure we clear all existing interrupts. The vector registers
+	 * in this cell are after the second block of general registers,
+	 * so we can address them using standard offsets, but only from
+	 * the second base address, which is 0x20 in the page
+	 */
+	if (vic_2nd_block) {
+		writel(0, base + VIC_PL190_VECT_ADDR);
+		for (i = 0; i < 19; i++) {
+			unsigned int value;
+
+			value = readl(base + VIC_PL190_VECT_ADDR);
+			writel(value, base + VIC_PL190_VECT_ADDR);
+		}
+		/* ST has 16 vectors as well, but we don't enable them by now */
+		for (i = 0; i < 16; i++) {
+			void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
+			writel(0, reg);
+		}
+
+		writel(32, base + VIC_PL190_DEF_VECT_ADDR);
+	}
+
+	for (i = 0; i < 32; i++) {
+		if (vic_sources & (1 << i)) {
+			unsigned int irq = irq_start + i;
+
+			set_irq_chip(irq, &vic_chip);
+			set_irq_chip_data(irq, base);
+			set_irq_handler(irq, handle_level_irq);
+			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		}
+	}
+}
 
 /**
  * vic_init - initialise a vectored interrupt controller
@@ -299,7 +355,7 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
 
 	switch(vendor) {
 	case AMBA_VENDOR_ST:
-		vik_init_st(base, irq_start, vic_sources);
+		vic_init_st(base, irq_start, vic_sources);
 		return;
 	default:
 		printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n");
@@ -343,60 +399,3 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
 
 	vic_pm_register(base, irq_start, resume_sources);
 }
-
-/*
- * The PL190 cell from ARM has been modified by ST to handle 64 interrupts.
- * The original cell has 32 interrupts, while the modified one has 64,
- * replocating two blocks 0x00..0x1f in 0x20..0x3f. In that case
- * the probe function is called twice, with base set to offset 000
- *  and 020 within the page. We call this "second block".
- */
-static void __init vik_init_st(void __iomem *base, unsigned int irq_start,
-				u32 vic_sources)
-{
-	unsigned int i;
-	int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0;
-
-	/* Disable all interrupts initially. */
-
-	writel(0, base + VIC_INT_SELECT);
-	writel(0, base + VIC_INT_ENABLE);
-	writel(~0, base + VIC_INT_ENABLE_CLEAR);
-	writel(0, base + VIC_IRQ_STATUS);
-	writel(0, base + VIC_ITCR);
-	writel(~0, base + VIC_INT_SOFT_CLEAR);
-
-	/*
-	 * Make sure we clear all existing interrupts. The vector registers
-	 * in this cell are after the second block of general registers,
-	 * so we can address them using standard offsets, but only from
-	 * the second base address, which is 0x20 in the page
-	 */
-	if (vic_2nd_block) {
-		writel(0, base + VIC_PL190_VECT_ADDR);
-		for (i = 0; i < 19; i++) {
-			unsigned int value;
-
-			value = readl(base + VIC_PL190_VECT_ADDR);
-			writel(value, base + VIC_PL190_VECT_ADDR);
-		}
-		/* ST has 16 vectors as well, but we don't enable them by now */
-		for (i = 0; i < 16; i++) {
-			void __iomem *reg = base + VIC_VECT_CNTL0 + (i * 4);
-			writel(0, reg);
-		}
-
-		writel(32, base + VIC_PL190_DEF_VECT_ADDR);
-	}
-
-	for (i = 0; i < 32; i++) {
-		if (vic_sources & (1 << i)) {
-			unsigned int irq = irq_start + i;
-
-			set_irq_chip(irq, &vic_chip);
-			set_irq_chip_data(irq, base);
-			set_irq_handler(irq, handle_level_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-		}
-	}
-}

+ 1640 - 0
arch/arm/configs/at572d940hfek_defconfig

@@ -0,0 +1,1640 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.28-rc7
+# Fri Dec  5 10:58:47 2008
+#
+CONFIG_ARM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_MMU=y
+# CONFIG_NO_IOPORT is not set
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION="-AT572D940HF"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+# CONFIG_TASK_DELAY_ACCT is not set
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_AUDIT=y
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=17
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+# CONFIG_CGROUP_NS is not set
+# CONFIG_CGROUP_FREEZER is not set
+# CONFIG_CGROUP_DEVICE is not set
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+# CONFIG_USER_SCHED is not set
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUP_CPUACCT=y
+# CONFIG_RESOURCE_COUNTERS is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+CONFIG_MARKERS=y
+CONFIG_OPROFILE=m
+CONFIG_HAVE_OPROFILE=y
+CONFIG_KPROBES=y
+CONFIG_KRETPROBES=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_KMOD=y
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_CLASSIC_RCU=y
+# CONFIG_FREEZER is not set
+
+#
+# System Type
+#
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+CONFIG_ARCH_AT91=y
+# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_MSM is not set
+
+#
+# Boot options
+#
+
+#
+# Power management
+#
+
+#
+# Atmel AT91 System-on-Chip
+#
+# CONFIG_ARCH_AT91RM9200 is not set
+# CONFIG_ARCH_AT91SAM9260 is not set
+# CONFIG_ARCH_AT91SAM9261 is not set
+# CONFIG_ARCH_AT91SAM9263 is not set
+# CONFIG_ARCH_AT91SAM9RL is not set
+# CONFIG_ARCH_AT91SAM9G20 is not set
+# CONFIG_ARCH_AT91CAP9 is not set
+# CONFIG_ARCH_AT91X40 is not set
+CONFIG_ARCH_AT572D940HF=y
+CONFIG_AT91_PMC_UNIT=y
+
+#
+# AT572D940HF Board Type
+#
+CONFIG_MACH_AT572D940HFEB=y
+
+#
+# AT91 Board Options
+#
+# CONFIG_MTD_AT91_DATAFLASH_CARD is not set
+# CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16 is not set
+CONFIG_NUM_SERIAL=3
+
+#
+# AT91 Feature Selections
+#
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+CONFIG_AT91_TIMER_HZ=100
+CONFIG_AT91_EARLY_DBGU=y
+# CONFIG_AT91_EARLY_USART0 is not set
+# CONFIG_AT91_EARLY_USART1 is not set
+# CONFIG_AT91_EARLY_USART2 is not set
+# CONFIG_AT91_EARLY_USART3 is not set
+# CONFIG_AT91_EARLY_USART4 is not set
+# CONFIG_AT91_EARLY_USART5 is not set
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM926T=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5TJ=y
+CONFIG_CPU_PABRT_NOIFAR=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+# CONFIG_CPU_CACHE_ROUND_ROBIN is not set
+# CONFIG_OUTER_CACHE is not set
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT=y
+CONFIG_HZ=100
+# CONFIG_AEABI is not set
+CONFIG_ARCH_FLATMEM_HAS_HOLES=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_RESOURCES_64BIT=y
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
+# CONFIG_LEDS is not set
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+CONFIG_CMDLINE="mem=48M console=ttyS0 initrd=0x21100000,3145728 root=/dev/ram0 rw ip=172.16.1.181"
+# CONFIG_XIP_KERNEL is not set
+CONFIG_KEXEC=y
+CONFIG_ATAGS_PROC=y
+
+#
+# CPU Power Management
+#
+# CONFIG_CPU_IDLE is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+CONFIG_FPE_NWFPE_XP=y
+# CONFIG_FPE_FASTFPE is not set
+# CONFIG_VFP is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_ARTHUR is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=m
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+CONFIG_IP_SCTP=m
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+# CONFIG_SCTP_HMAC_NONE is not set
+# CONFIG_SCTP_HMAC_SHA1 is not set
+CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+CONFIG_NET_PKTGEN=m
+CONFIG_NET_TCPPROBE=m
+# CONFIG_HAMRADIO is not set
+CONFIG_CAN=m
+CONFIG_CAN_RAW=m
+CONFIG_CAN_BCM=m
+
+#
+# CAN Device Drivers
+#
+CONFIG_CAN_VCAN=m
+CONFIG_CAN_DEBUG_DEVICES=y
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_PHONET is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_OLD_REGULATORY=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+# CONFIG_MAC80211 is not set
+CONFIG_IEEE80211=m
+# CONFIG_IEEE80211_DEBUG is not set
+CONFIG_IEEE80211_CRYPT_WEP=m
+# CONFIG_IEEE80211_CRYPT_CCMP is not set
+# CONFIG_IEEE80211_CRYPT_TKIP is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_CONNECTOR=m
+CONFIG_MTD=m
+CONFIG_MTD_DEBUG=y
+CONFIG_MTD_DEBUG_VERBOSE=1
+CONFIG_MTD_CONCAT=m
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=m
+CONFIG_MTD_BLKDEVS=m
+CONFIG_MTD_BLOCK=m
+CONFIG_MTD_BLOCK_RO=m
+CONFIG_FTL=m
+CONFIG_NFTL=m
+CONFIG_NFTL_RW=y
+CONFIG_INFTL=m
+CONFIG_RFD_FTL=m
+CONFIG_SSFDC=m
+CONFIG_MTD_OOPS=m
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=m
+CONFIG_MTD_JEDECPROBE=m
+CONFIG_MTD_GEN_PROBE=m
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=m
+CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_CFI_STAA=m
+CONFIG_MTD_CFI_UTIL=m
+CONFIG_MTD_RAM=m
+CONFIG_MTD_ROM=m
+CONFIG_MTD_ABSENT=m
+
+#
+# Mapping drivers for chip access
+#
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=m
+CONFIG_MTD_PHYSMAP_START=0x8000000
+CONFIG_MTD_PHYSMAP_LEN=0x4000000
+CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_IMPA7 is not set
+CONFIG_MTD_PLATRAM=m
+
+#
+# Self-contained MTD device drivers
+#
+CONFIG_MTD_DATAFLASH=m
+# CONFIG_MTD_DATAFLASH_WRITE_VERIFY is not set
+# CONFIG_MTD_DATAFLASH_OTP is not set
+CONFIG_MTD_M25P80=m
+CONFIG_M25PXX_USE_FAST_READ=y
+CONFIG_MTD_SLRAM=m
+CONFIG_MTD_PHRAM=m
+CONFIG_MTD_MTDRAM=m
+CONFIG_MTDRAM_TOTAL_SIZE=4096
+CONFIG_MTDRAM_ERASE_SIZE=128
+CONFIG_MTD_BLOCK2MTD=m
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=m
+CONFIG_MTD_NAND_VERIFY_WRITE=y
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+# CONFIG_MTD_NAND_GPIO is not set
+CONFIG_MTD_NAND_IDS=m
+CONFIG_MTD_NAND_DISKONCHIP=m
+# CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADVANCED is not set
+CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS=0
+# CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE is not set
+# CONFIG_MTD_NAND_ATMEL is not set
+CONFIG_MTD_NAND_NANDSIM=m
+CONFIG_MTD_NAND_PLATFORM=m
+CONFIG_MTD_ALAUDA=m
+# CONFIG_MTD_ONENAND is not set
+
+#
+# UBI - Unsorted block images
+#
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+CONFIG_MTD_UBI_GLUEBI=y
+
+#
+# UBI debugging options
+#
+# CONFIG_MTD_UBI_DEBUG is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=65536
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+CONFIG_ATMEL_TCLIB=y
+CONFIG_ATMEL_TCB_CLKSRC=y
+CONFIG_ATMEL_TCB_CLKSRC_BLOCK=0
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ICS932S401 is not set
+CONFIG_ATMEL_SSC=m
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_C2PORT is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=m
+CONFIG_SCSI_DMA=y
+CONFIG_SCSI_TGT=m
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_PROC_FS is not set
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+CONFIG_BLK_DEV_SR=m
+# CONFIG_BLK_DEV_SR_VENDOR is not set
+CONFIG_CHR_DEV_SG=m
+CONFIG_CHR_DEV_SCH=m
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+CONFIG_SCSI_ISCSI_ATTRS=m
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_BONDING=m
+CONFIG_MACVLAN=m
+CONFIG_EQUALIZER=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_FIXED_PHY is not set
+CONFIG_MDIO_BITBANG=m
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=m
+CONFIG_MACB=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_SMC911X is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+CONFIG_WLAN_PRE80211=y
+CONFIG_STRIP=m
+CONFIG_WLAN_80211=y
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_USB=m
+CONFIG_LIBERTAS_SDIO=m
+# CONFIG_LIBERTAS_DEBUG is not set
+CONFIG_USB_ZD1201=m
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_IWLWIFI_LEDS is not set
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+
+#
+# USB Network Adapters
+#
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_AX8817X=m
+CONFIG_USB_NET_CDCETHER=m
+CONFIG_USB_NET_DM9601=m
+# CONFIG_USB_NET_SMSC95XX is not set
+CONFIG_USB_NET_GL620A=m
+CONFIG_USB_NET_NET1080=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_USB_NET_RNDIS_HOST=m
+CONFIG_USB_NET_CDC_SUBSET=m
+CONFIG_USB_ALI_M5632=y
+CONFIG_USB_AN2720=y
+CONFIG_USB_BELKIN=y
+CONFIG_USB_ARMLINUX=y
+CONFIG_USB_EPSON2888=y
+CONFIG_USB_KC2190=y
+# CONFIG_USB_NET_ZAURUS is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+CONFIG_INPUT_POLLDEV=m
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=m
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=m
+CONFIG_INPUT_EVBUG=m
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+CONFIG_KEYBOARD_SUNKBD=m
+CONFIG_KEYBOARD_LKKBD=m
+CONFIG_KEYBOARD_XTKBD=m
+CONFIG_KEYBOARD_NEWTON=m
+CONFIG_KEYBOARD_STOWAWAY=m
+CONFIG_KEYBOARD_GPIO=m
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=m
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_LIFEBOOK=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+CONFIG_MOUSE_SERIAL=m
+CONFIG_MOUSE_APPLETOUCH=m
+# CONFIG_MOUSE_BCM5974 is not set
+CONFIG_MOUSE_VSXXXAA=m
+CONFIG_MOUSE_GPIO=m
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_UINPUT=m
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=m
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=m
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_DEVKMEM=y
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_N_HDLC=m
+# CONFIG_RISCOM8 is not set
+CONFIG_SPECIALIX=m
+CONFIG_RIO=m
+# CONFIG_RIO_OLDPCI is not set
+CONFIG_STALDRV=y
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_ATMEL=y
+CONFIG_SERIAL_ATMEL_CONSOLE=y
+CONFIG_SERIAL_ATMEL_PDC=y
+# CONFIG_SERIAL_ATMEL_TTYAT is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+CONFIG_IPMI_HANDLER=m
+# CONFIG_IPMI_PANIC_EVENT is not set
+CONFIG_IPMI_DEVICE_INTERFACE=m
+CONFIG_IPMI_SI=m
+CONFIG_IPMI_WATCHDOG=m
+CONFIG_IPMI_POWEROFF=m
+CONFIG_HW_RANDOM=y
+CONFIG_NVRAM=m
+CONFIG_R3964=m
+CONFIG_RAW_DRIVER=m
+CONFIG_MAX_RAW_DEVS=256
+CONFIG_TCG_TPM=m
+CONFIG_TCG_NSC=m
+CONFIG_TCG_ATMEL=m
+CONFIG_I2C=m
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+CONFIG_DS1682=m
+# CONFIG_AT24 is not set
+CONFIG_SENSORS_EEPROM=m
+CONFIG_SENSORS_PCF8574=m
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+CONFIG_SENSORS_PCF8591=m
+CONFIG_SENSORS_MAX6875=m
+CONFIG_SENSORS_TSL2550=m
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_ATMEL=y
+CONFIG_SPI_BITBANG=m
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_AT25=m
+CONFIG_SPI_SPIDEV=m
+# CONFIG_SPI_TLE62X0 is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_SOUND=m
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SND=m
+CONFIG_SND_TIMER=m
+CONFIG_SND_PCM=m
+CONFIG_SND_HWDEP=m
+CONFIG_SND_RAWMIDI=m
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+# CONFIG_SND_PCM_OSS_PLUGINS is not set
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SUPPORT_OLD_API=y
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_DRIVERS=y
+CONFIG_SND_DUMMY=m
+CONFIG_SND_VIRMIDI=m
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+CONFIG_SND_ARM=y
+CONFIG_SND_SPI=y
+# CONFIG_SND_AT73C213 is not set
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_USB_CAIAQ=m
+CONFIG_SND_USB_CAIAQ_INPUT=y
+# CONFIG_SND_SOC is not set
+# CONFIG_SOUND_PRIME is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=m
+# CONFIG_HID_DEBUG is not set
+CONFIG_HIDRAW=y
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=m
+# CONFIG_HID_PID is not set
+CONFIG_USB_HIDDEV=y
+
+#
+# USB HID Boot Protocol drivers
+#
+CONFIG_USB_KBD=m
+CONFIG_USB_MOUSE=m
+
+#
+# Special HID drivers
+#
+CONFIG_HID_COMPAT=y
+CONFIG_HID_A4TECH=m
+CONFIG_HID_APPLE=m
+CONFIG_HID_BELKIN=m
+CONFIG_HID_BRIGHT=m
+CONFIG_HID_CHERRY=m
+CONFIG_HID_CHICONY=m
+CONFIG_HID_CYPRESS=m
+CONFIG_HID_DELL=m
+CONFIG_HID_EZKEY=m
+CONFIG_HID_GYRATION=m
+CONFIG_HID_LOGITECH=m
+# CONFIG_LOGITECH_FF is not set
+# CONFIG_LOGIRUMBLEPAD2_FF is not set
+CONFIG_HID_MICROSOFT=m
+CONFIG_HID_MONTEREY=m
+CONFIG_HID_PANTHERLORD=m
+# CONFIG_PANTHERLORD_FF is not set
+CONFIG_HID_PETALYNX=m
+CONFIG_HID_SAMSUNG=m
+CONFIG_HID_SONY=m
+CONFIG_HID_SUNPLUS=m
+# CONFIG_THRUSTMASTER_FF is not set
+# CONFIG_ZEROPLUS_FF is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_DYNAMIC_MINORS=y
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
+#
+
+#
+# see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+CONFIG_USB_STORAGE_DATAFAB=y
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_DPCM=y
+CONFIG_USB_STORAGE_USBAT=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+CONFIG_USB_STORAGE_JUMPSHOT=y
+CONFIG_USB_STORAGE_ALAUDA=y
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+CONFIG_USB_STORAGE_KARMA=y
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+CONFIG_USB_LIBUSUAL=y
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=m
+CONFIG_USB_EZUSB=y
+CONFIG_USB_SERIAL_GENERIC=y
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_CH341 is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_CP2101 is not set
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+# CONFIG_USB_SERIAL_FTDI_SIO is not set
+# CONFIG_USB_SERIAL_FUNSOFT is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_IUU is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_MOTOROLA is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+CONFIG_USB_SERIAL_PL2303=m
+# CONFIG_USB_SERIAL_OTI6858 is not set
+CONFIG_USB_SERIAL_SPCP8X5=m
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+CONFIG_USB_SERIAL_DEBUG=m
+
+#
+# USB Miscellaneous drivers
+#
+CONFIG_USB_EMI62=m
+CONFIG_USB_EMI26=m
+CONFIG_USB_ADUTUX=m
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGET is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+CONFIG_USB_TEST=m
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
+CONFIG_USB_GADGET=m
+CONFIG_USB_GADGET_DEBUG_FILES=y
+CONFIG_USB_GADGET_DEBUG_FS=y
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+CONFIG_USB_GADGET_AT91=y
+CONFIG_USB_AT91=m
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+# CONFIG_USB_GADGET_S3C2410 is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+# CONFIG_USB_GADGET_DUALSPEED is not set
+CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+CONFIG_USB_G_SERIAL=m
+CONFIG_USB_MIDI_GADGET=m
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+CONFIG_SDIO_UART=m
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_SDHCI is not set
+CONFIG_MMC_AT91=y
+CONFIG_MMC_SPI=m
+# CONFIG_MEMSTICK is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=m
+# CONFIG_LEDS_PCA955X is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+CONFIG_RTC_INTF_DEV_UIE_EMUL=y
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+CONFIG_RTC_DRV_DS1307=m
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+CONFIG_RTC_DRV_DS1305=y
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_RTC_DRV_AT91SAM9 is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_UIO is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+CONFIG_JBD_DEBUG=y
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_CHECK=y
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_FILE_LOCKING=y
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_FUSE_FS=m
+CONFIG_GENERIC_ACL=y
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+CONFIG_NTFS_FS=m
+# CONFIG_NTFS_DEBUG is not set
+CONFIG_NTFS_RW=y
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=m
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_LZO=y
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_JFFS2_CMODE_NONE is not set
+# CONFIG_JFFS2_CMODE_PRIORITY is not set
+# CONFIG_JFFS2_CMODE_SIZE is not set
+CONFIG_JFFS2_CMODE_FAVOURLZO=y
+# CONFIG_UBIFS_FS is not set
+CONFIG_CRAMFS=m
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V2_ACL=y
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_LOCKD=m
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
+CONFIG_NFS_ACL_SUPPORT=m
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=m
+CONFIG_SUNRPC_GSS=m
+# CONFIG_SUNRPC_REGISTER_V4 is not set
+CONFIG_RPCSEC_GSS_KRB5=m
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+CONFIG_CIFS=m
+# CONFIG_CIFS_STATS is not set
+CONFIG_CIFS_WEAK_PW_HASH=y
+# CONFIG_CIFS_XATTR is not set
+# CONFIG_CIFS_DEBUG2 is not set
+# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+CONFIG_MAC_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_LDM_PARTITION=y
+CONFIG_LDM_DEBUG=y
+CONFIG_SGI_PARTITION=y
+# CONFIG_ULTRIX_PARTITION is not set
+CONFIG_SUN_PARTITION=y
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="cp437"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=m
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=m
+CONFIG_DLM=m
+# CONFIG_DLM_DEBUG is not set
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+
+#
+# Tracers
+#
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_DEBUG_USER is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+CONFIG_SECURITYFS=y
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_GF128MUL=m
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=m
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=m
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=m
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_CRC_CCITT=m
+CONFIG_CRC16=m
+# CONFIG_CRC_T10DIF is not set
+CONFIG_CRC_ITU_T=m
+CONFIG_CRC32=y
+CONFIG_CRC7=m
+CONFIG_LIBCRC32C=m
+CONFIG_AUDIT_GENERIC=y
+CONFIG_ZLIB_INFLATE=m
+CONFIG_ZLIB_DEFLATE=m
+CONFIG_LZO_COMPRESS=m
+CONFIG_LZO_DECOMPRESS=m
+CONFIG_REED_SOLOMON=m
+CONFIG_REED_SOLOMON_DEC16=y
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y

+ 3 - 0
arch/arm/configs/omap_4430sdp_defconfig

@@ -242,10 +242,13 @@ CONFIG_CPU_CP15_MMU=y
 # CONFIG_CPU_DCACHE_DISABLE is not set
 # CONFIG_CPU_BPREDICT_DISABLE is not set
 CONFIG_HAS_TLS_REG=y
+CONFIG_OUTER_CACHE=y
+CONFIG_CACHE_L2X0=y
 CONFIG_ARM_L1_CACHE_SHIFT=5
 # CONFIG_ARM_ERRATA_430973 is not set
 # CONFIG_ARM_ERRATA_458693 is not set
 # CONFIG_ARM_ERRATA_460075 is not set
+CONFIG_PL310_ERRATA_588369=y
 CONFIG_ARM_GIC=y
 
 #

+ 228 - 0
arch/arm/include/asm/atomic.h

@@ -235,6 +235,234 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
 #define smp_mb__before_atomic_inc()	smp_mb()
 #define smp_mb__after_atomic_inc()	smp_mb()
 
+#ifndef CONFIG_GENERIC_ATOMIC64
+typedef struct {
+	u64 __aligned(8) counter;
+} atomic64_t;
+
+#define ATOMIC64_INIT(i) { (i) }
+
+static inline u64 atomic64_read(atomic64_t *v)
+{
+	u64 result;
+
+	__asm__ __volatile__("@ atomic64_read\n"
+"	ldrexd	%0, %H0, [%1]"
+	: "=&r" (result)
+	: "r" (&v->counter)
+	);
+
+	return result;
+}
+
+static inline void atomic64_set(atomic64_t *v, u64 i)
+{
+	u64 tmp;
+
+	__asm__ __volatile__("@ atomic64_set\n"
+"1:	ldrexd	%0, %H0, [%1]\n"
+"	strexd	%0, %2, %H2, [%1]\n"
+"	teq	%0, #0\n"
+"	bne	1b"
+	: "=&r" (tmp)
+	: "r" (&v->counter), "r" (i)
+	: "cc");
+}
+
+static inline void atomic64_add(u64 i, atomic64_t *v)
+{
+	u64 result;
+	unsigned long tmp;
+
+	__asm__ __volatile__("@ atomic64_add\n"
+"1:	ldrexd	%0, %H0, [%2]\n"
+"	adds	%0, %0, %3\n"
+"	adc	%H0, %H0, %H3\n"
+"	strexd	%1, %0, %H0, [%2]\n"
+"	teq	%1, #0\n"
+"	bne	1b"
+	: "=&r" (result), "=&r" (tmp)
+	: "r" (&v->counter), "r" (i)
+	: "cc");
+}
+
+static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
+{
+	u64 result;
+	unsigned long tmp;
+
+	smp_mb();
+
+	__asm__ __volatile__("@ atomic64_add_return\n"
+"1:	ldrexd	%0, %H0, [%2]\n"
+"	adds	%0, %0, %3\n"
+"	adc	%H0, %H0, %H3\n"
+"	strexd	%1, %0, %H0, [%2]\n"
+"	teq	%1, #0\n"
+"	bne	1b"
+	: "=&r" (result), "=&r" (tmp)
+	: "r" (&v->counter), "r" (i)
+	: "cc");
+
+	smp_mb();
+
+	return result;
+}
+
+static inline void atomic64_sub(u64 i, atomic64_t *v)
+{
+	u64 result;
+	unsigned long tmp;
+
+	__asm__ __volatile__("@ atomic64_sub\n"
+"1:	ldrexd	%0, %H0, [%2]\n"
+"	subs	%0, %0, %3\n"
+"	sbc	%H0, %H0, %H3\n"
+"	strexd	%1, %0, %H0, [%2]\n"
+"	teq	%1, #0\n"
+"	bne	1b"
+	: "=&r" (result), "=&r" (tmp)
+	: "r" (&v->counter), "r" (i)
+	: "cc");
+}
+
+static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
+{
+	u64 result;
+	unsigned long tmp;
+
+	smp_mb();
+
+	__asm__ __volatile__("@ atomic64_sub_return\n"
+"1:	ldrexd	%0, %H0, [%2]\n"
+"	subs	%0, %0, %3\n"
+"	sbc	%H0, %H0, %H3\n"
+"	strexd	%1, %0, %H0, [%2]\n"
+"	teq	%1, #0\n"
+"	bne	1b"
+	: "=&r" (result), "=&r" (tmp)
+	: "r" (&v->counter), "r" (i)
+	: "cc");
+
+	smp_mb();
+
+	return result;
+}
+
+static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
+{
+	u64 oldval;
+	unsigned long res;
+
+	smp_mb();
+
+	do {
+		__asm__ __volatile__("@ atomic64_cmpxchg\n"
+		"ldrexd		%1, %H1, [%2]\n"
+		"mov		%0, #0\n"
+		"teq		%1, %3\n"
+		"teqeq		%H1, %H3\n"
+		"strexdeq	%0, %4, %H4, [%2]"
+		: "=&r" (res), "=&r" (oldval)
+		: "r" (&ptr->counter), "r" (old), "r" (new)
+		: "cc");
+	} while (res);
+
+	smp_mb();
+
+	return oldval;
+}
+
+static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
+{
+	u64 result;
+	unsigned long tmp;
+
+	smp_mb();
+
+	__asm__ __volatile__("@ atomic64_xchg\n"
+"1:	ldrexd	%0, %H0, [%2]\n"
+"	strexd	%1, %3, %H3, [%2]\n"
+"	teq	%1, #0\n"
+"	bne	1b"
+	: "=&r" (result), "=&r" (tmp)
+	: "r" (&ptr->counter), "r" (new)
+	: "cc");
+
+	smp_mb();
+
+	return result;
+}
+
+static inline u64 atomic64_dec_if_positive(atomic64_t *v)
+{
+	u64 result;
+	unsigned long tmp;
+
+	smp_mb();
+
+	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
+"1:	ldrexd	%0, %H0, [%2]\n"
+"	subs	%0, %0, #1\n"
+"	sbc	%H0, %H0, #0\n"
+"	teq	%H0, #0\n"
+"	bmi	2f\n"
+"	strexd	%1, %0, %H0, [%2]\n"
+"	teq	%1, #0\n"
+"	bne	1b\n"
+"2:"
+	: "=&r" (result), "=&r" (tmp)
+	: "r" (&v->counter)
+	: "cc");
+
+	smp_mb();
+
+	return result;
+}
+
+static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+{
+	u64 val;
+	unsigned long tmp;
+	int ret = 1;
+
+	smp_mb();
+
+	__asm__ __volatile__("@ atomic64_add_unless\n"
+"1:	ldrexd	%0, %H0, [%3]\n"
+"	teq	%0, %4\n"
+"	teqeq	%H0, %H4\n"
+"	moveq	%1, #0\n"
+"	beq	2f\n"
+"	adds	%0, %0, %5\n"
+"	adc	%H0, %H0, %H5\n"
+"	strexd	%2, %0, %H0, [%3]\n"
+"	teq	%2, #0\n"
+"	bne	1b\n"
+"2:"
+	: "=&r" (val), "=&r" (ret), "=&r" (tmp)
+	: "r" (&v->counter), "r" (u), "r" (a)
+	: "cc");
+
+	if (ret)
+		smp_mb();
+
+	return ret;
+}
+
+#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
+#define atomic64_inc(v)			atomic64_add(1LL, (v))
+#define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
+#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
+#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
+#define atomic64_dec(v)			atomic64_sub(1LL, (v))
+#define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
+#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
+#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
+
+#else /* !CONFIG_GENERIC_ATOMIC64 */
+#include <asm-generic/atomic64.h>
+#endif
 #include <asm-generic/atomic-long.h>
 #endif
 #endif

+ 23 - 46
arch/arm/include/asm/cacheflush.h

@@ -42,7 +42,8 @@
 #endif
 
 #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
-    defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020)
+    defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
+    defined(CONFIG_CPU_ARM1026)
 # define MULTI_CACHE 1
 #endif
 
@@ -196,21 +197,6 @@
  *	DMA Cache Coherency
  *	===================
  *
- *	dma_inv_range(start, end)
- *
- *		Invalidate (discard) the specified virtual address range.
- *		May not write back any entries.  If 'start' or 'end'
- *		are not cache line aligned, those lines must be written
- *		back.
- *		- start  - virtual start address
- *		- end    - virtual end address
- *
- *	dma_clean_range(start, end)
- *
- *		Clean (write back) the specified virtual address range.
- *		- start  - virtual start address
- *		- end    - virtual end address
- *
  *	dma_flush_range(start, end)
  *
  *		Clean and invalidate the specified virtual address range.
@@ -227,8 +213,9 @@ struct cpu_cache_fns {
 	void (*coherent_user_range)(unsigned long, unsigned long);
 	void (*flush_kern_dcache_area)(void *, size_t);
 
-	void (*dma_inv_range)(const void *, const void *);
-	void (*dma_clean_range)(const void *, const void *);
+	void (*dma_map_area)(const void *, size_t, int);
+	void (*dma_unmap_area)(const void *, size_t, int);
+
 	void (*dma_flush_range)(const void *, const void *);
 };
 
@@ -258,8 +245,8 @@ extern struct cpu_cache_fns cpu_cache;
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  */
-#define dmac_inv_range			cpu_cache.dma_inv_range
-#define dmac_clean_range		cpu_cache.dma_clean_range
+#define dmac_map_area			cpu_cache.dma_map_area
+#define dmac_unmap_area		cpu_cache.dma_unmap_area
 #define dmac_flush_range		cpu_cache.dma_flush_range
 
 #else
@@ -284,12 +271,12 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  */
-#define dmac_inv_range			__glue(_CACHE,_dma_inv_range)
-#define dmac_clean_range		__glue(_CACHE,_dma_clean_range)
+#define dmac_map_area			__glue(_CACHE,_dma_map_area)
+#define dmac_unmap_area		__glue(_CACHE,_dma_unmap_area)
 #define dmac_flush_range		__glue(_CACHE,_dma_flush_range)
 
-extern void dmac_inv_range(const void *, const void *);
-extern void dmac_clean_range(const void *, const void *);
+extern void dmac_map_area(const void *, size_t, int);
+extern void dmac_unmap_area(const void *, size_t, int);
 extern void dmac_flush_range(const void *, const void *);
 
 #endif
@@ -330,12 +317,8 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
  * processes address space.  Really, we want to allow our "user
  * space" model to handle this.
  */
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-	do {							\
-		memcpy(dst, src, len);				\
-		flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
-	} while (0)
-
+extern void copy_to_user_page(struct vm_area_struct *, struct page *,
+	unsigned long, void *, const void *, unsigned long);
 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
 	do {							\
 		memcpy(dst, src, len);				\
@@ -369,17 +352,6 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
 	}
 }
 
-static inline void
-vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
-			 unsigned long uaddr, void *kaddr,
-			 unsigned long len, int write)
-{
-	if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
-		unsigned long addr = (unsigned long)kaddr;
-		__cpuc_coherent_kern_range(addr, addr + len);
-	}
-}
-
 #ifndef CONFIG_CPU_CACHE_VIPT
 #define flush_cache_mm(mm) \
 		vivt_flush_cache_mm(mm)
@@ -387,15 +359,10 @@ vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
 		vivt_flush_cache_range(vma,start,end)
 #define flush_cache_page(vma,addr,pfn) \
 		vivt_flush_cache_page(vma,addr,pfn)
-#define flush_ptrace_access(vma,page,ua,ka,len,write) \
-		vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
 #else
 extern void flush_cache_mm(struct mm_struct *mm);
 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
-extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
-				unsigned long uaddr, void *kaddr,
-				unsigned long len, int write);
 #endif
 
 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
@@ -446,6 +413,16 @@ static inline void __flush_icache_all(void)
 	    : "r" (0));
 #endif
 }
+static inline void flush_kernel_vmap_range(void *addr, int size)
+{
+	if ((cache_is_vivt() || cache_is_vipt_aliasing()))
+	  __cpuc_flush_dcache_area(addr, (size_t)size);
+}
+static inline void invalidate_kernel_vmap_range(void *addr, int size)
+{
+	if ((cache_is_vivt() || cache_is_vipt_aliasing()))
+	  __cpuc_flush_dcache_area(addr, (size_t)size);
+}
 
 #define ARCH_HAS_FLUSH_ANON_PAGE
 static inline void flush_anon_page(struct vm_area_struct *vma,

+ 3 - 0
arch/arm/include/asm/clkdev.h

@@ -27,4 +27,7 @@ struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
 void clkdev_add(struct clk_lookup *cl);
 void clkdev_drop(struct clk_lookup *cl);
 
+void clkdev_add_table(struct clk_lookup *, size_t);
+int clk_add_alias(const char *, const char *, char *, struct device *);
+
 #endif

+ 60 - 19
arch/arm/include/asm/dma-mapping.h

@@ -57,18 +57,58 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
 #endif
 
 /*
- * DMA-consistent mapping functions.  These allocate/free a region of
- * uncached, unwrite-buffered mapped memory space for use with DMA
- * devices.  This is the "generic" version.  The PCI specific version
- * is in pci.h
+ * The DMA API is built upon the notion of "buffer ownership".  A buffer
+ * is either exclusively owned by the CPU (and therefore may be accessed
+ * by it) or exclusively owned by the DMA device.  These helper functions
+ * represent the transitions between these two ownership states.
  *
- * Note: Drivers should NOT use this function directly, as it will break
- * platforms with CONFIG_DMABOUNCE.
- * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+ * Note, however, that on later ARMs, this notion does not work due to
+ * speculative prefetches.  We model our approach on the assumption that
+ * the CPU does do speculative prefetches, which means we clean caches
+ * before transfers and delay cache invalidation until transfer completion.
+ *
+ * Private support functions: these are not part of the API and are
+ * liable to change.  Drivers must not use these.
  */
-extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
-extern void dma_cache_maint_page(struct page *page, unsigned long offset,
-				 size_t size, int rw);
+static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
+	enum dma_data_direction dir)
+{
+	extern void ___dma_single_cpu_to_dev(const void *, size_t,
+		enum dma_data_direction);
+
+	if (!arch_is_coherent())
+		___dma_single_cpu_to_dev(kaddr, size, dir);
+}
+
+static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
+	enum dma_data_direction dir)
+{
+	extern void ___dma_single_dev_to_cpu(const void *, size_t,
+		enum dma_data_direction);
+
+	if (!arch_is_coherent())
+		___dma_single_dev_to_cpu(kaddr, size, dir);
+}
+
+static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
+	size_t size, enum dma_data_direction dir)
+{
+	extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
+		size_t, enum dma_data_direction);
+
+	if (!arch_is_coherent())
+		___dma_page_cpu_to_dev(page, off, size, dir);
+}
+
+static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
+	size_t size, enum dma_data_direction dir)
+{
+	extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
+		size_t, enum dma_data_direction);
+
+	if (!arch_is_coherent())
+		___dma_page_dev_to_cpu(page, off, size, dir);
+}
 
 /*
  * Return whether the given device DMA address mask can be supported
@@ -304,8 +344,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
 {
 	BUG_ON(!valid_dma_direction(dir));
 
-	if (!arch_is_coherent())
-		dma_cache_maint(cpu_addr, size, dir);
+	__dma_single_cpu_to_dev(cpu_addr, size, dir);
 
 	return virt_to_dma(dev, cpu_addr);
 }
@@ -329,8 +368,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
 {
 	BUG_ON(!valid_dma_direction(dir));
 
-	if (!arch_is_coherent())
-		dma_cache_maint_page(page, offset, size, dir);
+	__dma_page_cpu_to_dev(page, offset, size, dir);
 
 	return page_to_dma(dev, page) + offset;
 }
@@ -352,7 +390,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
 static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
 		size_t size, enum dma_data_direction dir)
 {
-	/* nothing to do */
+	__dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
 }
 
 /**
@@ -372,7 +410,8 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
 static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
 		size_t size, enum dma_data_direction dir)
 {
-	/* nothing to do */
+	__dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK,
+		size, dir);
 }
 #endif /* CONFIG_DMABOUNCE */
 
@@ -400,7 +439,10 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
 {
 	BUG_ON(!valid_dma_direction(dir));
 
-	dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
+	if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
+		return;
+
+	__dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
 }
 
 static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -412,8 +454,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
 	if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
 		return;
 
-	if (!arch_is_coherent())
-		dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
+	__dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
 }
 
 static inline void dma_sync_single_for_cpu(struct device *dev,

+ 9 - 2
arch/arm/include/asm/io.h

@@ -69,9 +69,16 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
 /*
  * __arm_ioremap takes CPU physical address.
  * __arm_ioremap_pfn takes a Page Frame Number and an offset into that page
+ * The _caller variety takes a __builtin_return_address(0) value for
+ * /proc/vmalloc to use - and should only be used in non-inline functions.
  */
-extern void __iomem * __arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
-extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int);
+extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
+	size_t, unsigned int, void *);
+extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int,
+	void *);
+
+extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
+extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int);
 extern void __iounmap(volatile void __iomem *addr);
 
 /*

+ 0 - 8
arch/arm/include/asm/mach/time.h

@@ -46,12 +46,4 @@ struct sys_timer {
 extern struct sys_timer *system_timer;
 extern void timer_tick(void);
 
-/*
- * Kernel time keeping support.
- */
-struct timespec;
-extern int (*set_rtc)(void);
-extern void save_time_delta(struct timespec *delta, struct timespec *rtc);
-extern void restore_time_delta(struct timespec *delta, struct timespec *rtc);
-
 #endif

+ 13 - 10
arch/arm/include/asm/memory.h

@@ -76,6 +76,17 @@
  */
 #define IOREMAP_MAX_ORDER	24
 
+/*
+ * Size of DMA-consistent memory region.  Must be multiple of 2M,
+ * between 2MB and 14MB inclusive.
+ */
+#ifndef CONSISTENT_DMA_SIZE
+#define CONSISTENT_DMA_SIZE 	SZ_2M
+#endif
+
+#define CONSISTENT_END		(0xffe00000UL)
+#define CONSISTENT_BASE		(CONSISTENT_END - CONSISTENT_DMA_SIZE)
+
 #else /* CONFIG_MMU */
 
 /*
@@ -93,11 +104,11 @@
 #endif
 
 #ifndef PHYS_OFFSET
-#define PHYS_OFFSET 		(CONFIG_DRAM_BASE)
+#define PHYS_OFFSET 		UL(CONFIG_DRAM_BASE)
 #endif
 
 #ifndef END_MEM
-#define END_MEM     		(CONFIG_DRAM_BASE + CONFIG_DRAM_SIZE)
+#define END_MEM     		(UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
 #endif
 
 #ifndef PAGE_OFFSET
@@ -112,14 +123,6 @@
 
 #endif /* !CONFIG_MMU */
 
-/*
- * Size of DMA-consistent memory region.  Must be multiple of 2M,
- * between 2MB and 14MB inclusive.
- */
-#ifndef CONSISTENT_DMA_SIZE
-#define CONSISTENT_DMA_SIZE SZ_2M
-#endif
-
 /*
  * Physical vs virtual RAM address space conversion.  These are
  * private definitions which should NOT be used outside memory.h

+ 1 - 0
arch/arm/include/asm/mmu.h

@@ -6,6 +6,7 @@
 typedef struct {
 #ifdef CONFIG_CPU_HAS_ASID
 	unsigned int id;
+	spinlock_t id_lock;
 #endif
 	unsigned int kvm_seq;
 } mm_context_t;

+ 15 - 0
arch/arm/include/asm/mmu_context.h

@@ -43,12 +43,23 @@ void __check_kvm_seq(struct mm_struct *mm);
 #define ASID_FIRST_VERSION	(1 << ASID_BITS)
 
 extern unsigned int cpu_last_asid;
+#ifdef CONFIG_SMP
+DECLARE_PER_CPU(struct mm_struct *, current_mm);
+#endif
 
 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 void __new_context(struct mm_struct *mm);
 
 static inline void check_context(struct mm_struct *mm)
 {
+	/*
+	 * This code is executed with interrupts enabled. Therefore,
+	 * mm->context.id cannot be updated to the latest ASID version
+	 * on a different CPU (and condition below not triggered)
+	 * without first getting an IPI to reset the context. The
+	 * alternative is to take a read_lock on mm->context.id_lock
+	 * (after changing its type to rwlock_t).
+	 */
 	if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
 		__new_context(mm);
 
@@ -108,6 +119,10 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
 		__flush_icache_all();
 #endif
 	if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
+#ifdef CONFIG_SMP
+		struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
+		*crt_mm = next;
+#endif
 		check_context(next);
 		cpu_switch_mm(next->pgd, next);
 		if (cache_is_vivt())

+ 4 - 3
arch/arm/include/asm/page.h

@@ -117,11 +117,12 @@
 #endif
 
 struct page;
+struct vm_area_struct;
 
 struct cpu_user_fns {
 	void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
 	void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
-			unsigned long vaddr);
+			unsigned long vaddr, struct vm_area_struct *vma);
 };
 
 #ifdef MULTI_USER
@@ -137,7 +138,7 @@ extern struct cpu_user_fns cpu_user;
 
 extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
 extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
-			unsigned long vaddr);
+			unsigned long vaddr, struct vm_area_struct *vma);
 #endif
 
 #define clear_user_highpage(page,vaddr)		\
@@ -145,7 +146,7 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
 
 #define __HAVE_ARCH_COPY_USER_HIGHPAGE
 #define copy_user_highpage(to,from,vaddr,vma)	\
-	__cpu_copy_user_highpage(to, from, vaddr)
+	__cpu_copy_user_highpage(to, from, vaddr, vma)
 
 #define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
 extern void copy_page(void *to, const void *from);

+ 31 - 0
arch/arm/include/asm/perf_event.h

@@ -0,0 +1,31 @@
+/*
+ *  linux/arch/arm/include/asm/perf_event.h
+ *
+ *  Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ARM_PERF_EVENT_H__
+#define __ARM_PERF_EVENT_H__
+
+/*
+ * NOP: on *most* (read: all supported) ARM platforms, the performance
+ * counter interrupts are regular interrupts and not an NMI. This
+ * means that when we receive the interrupt we can call
+ * perf_event_do_pending() that handles all of the work with
+ * interrupts enabled.
+ */
+static inline void
+set_perf_event_pending(void)
+{
+}
+
+/* ARM performance counters start from 1 (in the cp15 accesses) so use the
+ * same indexes here for consistency. */
+#define PERF_EVENT_INDEX_OFFSET 1
+
+#endif /* __ARM_PERF_EVENT_H__ */

+ 2 - 2
arch/arm/include/asm/pgtable-nommu.h

@@ -86,8 +86,8 @@ extern unsigned int kobjsize(const void *objp);
  * All 32bit addresses are effectively valid for vmalloc...
  * Sort of meaningless for non-VM targets.
  */
-#define	VMALLOC_START	0
-#define	VMALLOC_END	0xffffffff
+#define	VMALLOC_START	0UL
+#define	VMALLOC_END	0xffffffffUL
 
 #define FIRST_USER_ADDRESS      (0)
 

+ 75 - 0
arch/arm/include/asm/pmu.h

@@ -0,0 +1,75 @@
+/*
+ *  linux/arch/arm/include/asm/pmu.h
+ *
+ *  Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ARM_PMU_H__
+#define __ARM_PMU_H__
+
+#ifdef CONFIG_CPU_HAS_PMU
+
+struct pmu_irqs {
+	const int   *irqs;
+	int	    num_irqs;
+};
+
+/**
+ * reserve_pmu() - reserve the hardware performance counters
+ *
+ * Reserve the hardware performance counters in the system for exclusive use.
+ * The 'struct pmu_irqs' for the system is returned on success, ERR_PTR()
+ * encoded error on failure.
+ */
+extern const struct pmu_irqs *
+reserve_pmu(void);
+
+/**
+ * release_pmu() - Relinquish control of the performance counters
+ *
+ * Release the performance counters and allow someone else to use them.
+ * Callers must have disabled the counters and released IRQs before calling
+ * this. The 'struct pmu_irqs' returned from reserve_pmu() must be passed as
+ * a cookie.
+ */
+extern int
+release_pmu(const struct pmu_irqs *irqs);
+
+/**
+ * init_pmu() - Initialise the PMU.
+ *
+ * Initialise the system ready for PMU enabling. This should typically set the
+ * IRQ affinity and nothing else. The users (oprofile/perf events etc) will do
+ * the actual hardware initialisation.
+ */
+extern int
+init_pmu(void);
+
+#else /* CONFIG_CPU_HAS_PMU */
+
+static inline const struct pmu_irqs *
+reserve_pmu(void)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline int
+release_pmu(const struct pmu_irqs *irqs)
+{
+	return -ENODEV;
+}
+
+static inline int
+init_pmu(void)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_CPU_HAS_PMU */
+
+#endif /* __ARM_PMU_H__ */

+ 0 - 12
arch/arm/include/asm/setup.h

@@ -223,18 +223,6 @@ extern struct meminfo meminfo;
 #define bank_phys_end(bank)	((bank)->start + (bank)->size)
 #define bank_phys_size(bank)	(bank)->size
 
-/*
- * Early command line parameters.
- */
-struct early_params {
-	const char *arg;
-	void (*fn)(char **p);
-};
-
-#define __early_param(name,fn)					\
-static struct early_params __early_##fn __used			\
-__attribute__((__section__(".early_param.init"))) = { name, fn }
-
 #endif  /*  __KERNEL__  */
 
 #endif

+ 5 - 0
arch/arm/include/asm/smp_plat.h

@@ -13,4 +13,9 @@ static inline int tlb_ops_need_broadcast(void)
 	return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
 }
 
+static inline int cache_ops_need_broadcast(void)
+{
+	return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1;
+}
+
 #endif

+ 23 - 13
arch/arm/include/asm/spinlock.h

@@ -5,6 +5,22 @@
 #error SMP not supported on pre-ARMv6 CPUs
 #endif
 
+static inline void dsb_sev(void)
+{
+#if __LINUX_ARM_ARCH__ >= 7
+	__asm__ __volatile__ (
+		"dsb\n"
+		"sev"
+	);
+#elif defined(CONFIG_CPU_32v6K)
+	__asm__ __volatile__ (
+		"mcr p15, 0, %0, c7, c10, 4\n"
+		"sev"
+		: : "r" (0)
+	);
+#endif
+}
+
 /*
  * ARMv6 Spin-locking.
  *
@@ -69,13 +85,11 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 
 	__asm__ __volatile__(
 "	str	%1, [%0]\n"
-#ifdef CONFIG_CPU_32v6K
-"	mcr	p15, 0, %1, c7, c10, 4\n" /* DSB */
-"	sev"
-#endif
 	:
 	: "r" (&lock->lock), "r" (0)
 	: "cc");
+
+	dsb_sev();
 }
 
 /*
@@ -132,13 +146,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
 
 	__asm__ __volatile__(
 	"str	%1, [%0]\n"
-#ifdef CONFIG_CPU_32v6K
-"	mcr	p15, 0, %1, c7, c10, 4\n" /* DSB */
-"	sev\n"
-#endif
 	:
 	: "r" (&rw->lock), "r" (0)
 	: "cc");
+
+	dsb_sev();
 }
 
 /* write_can_lock - would write_trylock() succeed? */
@@ -188,14 +200,12 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
 "	strex	%1, %0, [%2]\n"
 "	teq	%1, #0\n"
 "	bne	1b"
-#ifdef CONFIG_CPU_32v6K
-"\n	cmp	%0, #0\n"
-"	mcreq   p15, 0, %0, c7, c10, 4\n"
-"	seveq"
-#endif
 	: "=&r" (tmp), "=&r" (tmp2)
 	: "r" (&rw->lock)
 	: "cc");
+
+	if (tmp == 0)
+		dsb_sev();
 }
 
 static inline int arch_read_trylock(arch_rwlock_t *rw)

+ 1 - 2
arch/arm/include/asm/system.h

@@ -73,8 +73,7 @@ extern unsigned int mem_fclk_21285;
 
 struct pt_regs;
 
-void die(const char *msg, struct pt_regs *regs, int err)
-		__attribute__((noreturn));
+void die(const char *msg, struct pt_regs *regs, int err);
 
 struct siginfo;
 void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,

+ 2 - 1
arch/arm/include/asm/thread_info.h

@@ -115,7 +115,8 @@ extern void iwmmxt_task_restore(struct thread_info *, void *);
 extern void iwmmxt_task_release(struct thread_info *);
 extern void iwmmxt_task_switch(struct thread_info *);
 
-extern void vfp_sync_state(struct thread_info *thread);
+extern void vfp_sync_hwstate(struct thread_info *);
+extern void vfp_flush_hwstate(struct thread_info *);
 
 #endif
 

+ 2 - 1
arch/arm/include/asm/tlbflush.h

@@ -529,7 +529,8 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
  * cache entries for the kernels virtual memory range are written
  * back to the page.
  */
-extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
+extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
+	pte_t *ptep);
 
 #endif
 

+ 3 - 0
arch/arm/kernel/Makefile

@@ -17,6 +17,7 @@ obj-y		:= compat.o elf.o entry-armv.o entry-common.o irq.o \
 		   process.o ptrace.o return_address.o setup.o signal.o \
 		   sys_arm.o stacktrace.o time.o traps.o
 
+obj-$(CONFIG_LEDS)		+= leds.o
 obj-$(CONFIG_OC_ETM)		+= etm.o
 
 obj-$(CONFIG_ISA_DMA_API)	+= dma.o
@@ -46,6 +47,8 @@ obj-$(CONFIG_CPU_XSCALE)	+= xscale-cp0.o
 obj-$(CONFIG_CPU_XSC3)		+= xscale-cp0.o
 obj-$(CONFIG_CPU_MOHAWK)	+= xscale-cp0.o
 obj-$(CONFIG_IWMMXT)		+= iwmmxt.o
+obj-$(CONFIG_CPU_HAS_PMU)	+= pmu.o
+obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o
 AFLAGS_iwmmxt.o			:= -Wa,-mcpu=iwmmxt
 
 ifneq ($(CONFIG_ARCH_EBSA110),y)

+ 5 - 0
arch/arm/kernel/asm-offsets.c

@@ -12,6 +12,7 @@
  */
 #include <linux/sched.h>
 #include <linux/mm.h>
+#include <linux/dma-mapping.h>
 #include <asm/mach/arch.h>
 #include <asm/thread_info.h>
 #include <asm/memory.h>
@@ -112,5 +113,9 @@ int main(void)
 #ifdef MULTI_PABORT
   DEFINE(PROCESSOR_PABT_FUNC,	offsetof(struct processor, _prefetch_abort));
 #endif
+  BLANK();
+  DEFINE(DMA_BIDIRECTIONAL,	DMA_BIDIRECTIONAL);
+  DEFINE(DMA_TO_DEVICE,		DMA_TO_DEVICE);
+  DEFINE(DMA_FROM_DEVICE,	DMA_FROM_DEVICE);
   return 0; 
 }

+ 5 - 3
arch/arm/kernel/bios32.c

@@ -616,15 +616,17 @@ char * __init pcibios_setup(char *str)
  * but we want to try to avoid allocating at 0x2900-0x2bff
  * which might be mirrored at 0x0100-0x03ff..
  */
-void pcibios_align_resource(void *data, struct resource *res,
-			    resource_size_t size, resource_size_t align)
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+				resource_size_t size, resource_size_t align)
 {
 	resource_size_t start = res->start;
 
 	if (res->flags & IORESOURCE_IO && start & 0x300)
 		start = (start + 0x3ff) & ~0x3ff;
 
-	res->start = (start + align - 1) & ~(align - 1);
+	start = (start + align - 1) & ~(align - 1);
+
+	return start;
 }
 
 /**

+ 115 - 0
arch/arm/kernel/leds.c

@@ -0,0 +1,115 @@
+/*
+ * LED support code, ripped out of arch/arm/kernel/time.c
+ *
+ *  Copyright (C) 1994-2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sysdev.h>
+
+#include <asm/leds.h>
+
+static void dummy_leds_event(led_event_t evt)
+{
+}
+
+void (*leds_event)(led_event_t) = dummy_leds_event;
+
+struct leds_evt_name {
+	const char	name[8];
+	int		on;
+	int		off;
+};
+
+static const struct leds_evt_name evt_names[] = {
+	{ "amber", led_amber_on, led_amber_off },
+	{ "blue",  led_blue_on,  led_blue_off  },
+	{ "green", led_green_on, led_green_off },
+	{ "red",   led_red_on,   led_red_off   },
+};
+
+static ssize_t leds_store(struct sys_device *dev,
+			struct sysdev_attribute *attr,
+			const char *buf, size_t size)
+{
+	int ret = -EINVAL, len = strcspn(buf, " ");
+
+	if (len > 0 && buf[len] == '\0')
+		len--;
+
+	if (strncmp(buf, "claim", len) == 0) {
+		leds_event(led_claim);
+		ret = size;
+	} else if (strncmp(buf, "release", len) == 0) {
+		leds_event(led_release);
+		ret = size;
+	} else {
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(evt_names); i++) {
+			if (strlen(evt_names[i].name) != len ||
+			    strncmp(buf, evt_names[i].name, len) != 0)
+				continue;
+			if (strncmp(buf+len, " on", 3) == 0) {
+				leds_event(evt_names[i].on);
+				ret = size;
+			} else if (strncmp(buf+len, " off", 4) == 0) {
+				leds_event(evt_names[i].off);
+				ret = size;
+			}
+			break;
+		}
+	}
+	return ret;
+}
+
+static SYSDEV_ATTR(event, 0200, NULL, leds_store);
+
+static int leds_suspend(struct sys_device *dev, pm_message_t state)
+{
+	leds_event(led_stop);
+	return 0;
+}
+
+static int leds_resume(struct sys_device *dev)
+{
+	leds_event(led_start);
+	return 0;
+}
+
+static int leds_shutdown(struct sys_device *dev)
+{
+	leds_event(led_halted);
+	return 0;
+}
+
+static struct sysdev_class leds_sysclass = {
+	.name		= "leds",
+	.shutdown	= leds_shutdown,
+	.suspend	= leds_suspend,
+	.resume		= leds_resume,
+};
+
+static struct sys_device leds_device = {
+	.id		= 0,
+	.cls		= &leds_sysclass,
+};
+
+static int __init leds_init(void)
+{
+	int ret;
+	ret = sysdev_class_register(&leds_sysclass);
+	if (ret == 0)
+		ret = sysdev_register(&leds_device);
+	if (ret == 0)
+		ret = sysdev_create_file(&leds_device, &attr_event);
+	return ret;
+}
+
+device_initcall(leds_init);
+
+EXPORT_SYMBOL(leds_event);

+ 2276 - 0
arch/arm/kernel/perf_event.c

@@ -0,0 +1,2276 @@
+#undef DEBUG
+
+/*
+ * ARM performance counter support.
+ *
+ * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
+ *
+ * ARMv7 support: Jean Pihet <jpihet@mvista.com>
+ * 2010 (c) MontaVista Software, LLC.
+ *
+ * This code is based on the sparc64 perf event code, which is in turn based
+ * on the x86 code. Callchain code is based on the ARM OProfile backtrace
+ * code.
+ */
+#define pr_fmt(fmt) "hw perfevents: " fmt
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+#include <asm/cputype.h>
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/pmu.h>
+#include <asm/stacktrace.h>
+
+static const struct pmu_irqs *pmu_irqs;
+
+/*
+ * Hardware lock to serialize accesses to PMU registers. Needed for the
+ * read/modify/write sequences.
+ */
+DEFINE_SPINLOCK(pmu_lock);
+
+/*
+ * ARMv6 supports a maximum of 3 events, starting from index 1. If we add
+ * another platform that supports more, we need to increase this to be the
+ * largest of all platforms.
+ *
+ * ARMv7 supports up to 32 events:
+ *  cycle counter CCNT + 31 events counters CNT0..30.
+ *  Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
+ */
+#define ARMPMU_MAX_HWEVENTS		33
+
+/* The events for a given CPU. */
+struct cpu_hw_events {
+	/*
+	 * The events that are active on the CPU for the given index. Index 0
+	 * is reserved.
+	 */
+	struct perf_event	*events[ARMPMU_MAX_HWEVENTS];
+
+	/*
+	 * A 1 bit for an index indicates that the counter is being used for
+	 * an event. A 0 means that the counter can be used.
+	 */
+	unsigned long		used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
+
+	/*
+	 * A 1 bit for an index indicates that the counter is actively being
+	 * used.
+	 */
+	unsigned long		active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
+};
+DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
+
+struct arm_pmu {
+	char		*name;
+	irqreturn_t	(*handle_irq)(int irq_num, void *dev);
+	void		(*enable)(struct hw_perf_event *evt, int idx);
+	void		(*disable)(struct hw_perf_event *evt, int idx);
+	int		(*event_map)(int evt);
+	u64		(*raw_event)(u64);
+	int		(*get_event_idx)(struct cpu_hw_events *cpuc,
+					 struct hw_perf_event *hwc);
+	u32		(*read_counter)(int idx);
+	void		(*write_counter)(int idx, u32 val);
+	void		(*start)(void);
+	void		(*stop)(void);
+	int		num_events;
+	u64		max_period;
+};
+
+/* Set at runtime when we know what CPU type we are. */
+static const struct arm_pmu *armpmu;
+
+#define HW_OP_UNSUPPORTED		0xFFFF
+
+#define C(_x) \
+	PERF_COUNT_HW_CACHE_##_x
+
+#define CACHE_OP_UNSUPPORTED		0xFFFF
+
+static unsigned armpmu_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+				     [PERF_COUNT_HW_CACHE_OP_MAX]
+				     [PERF_COUNT_HW_CACHE_RESULT_MAX];
+
+static int
+armpmu_map_cache_event(u64 config)
+{
+	unsigned int cache_type, cache_op, cache_result, ret;
+
+	cache_type = (config >>  0) & 0xff;
+	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+		return -EINVAL;
+
+	cache_op = (config >>  8) & 0xff;
+	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+		return -EINVAL;
+
+	cache_result = (config >> 16) & 0xff;
+	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+		return -EINVAL;
+
+	ret = (int)armpmu_perf_cache_map[cache_type][cache_op][cache_result];
+
+	if (ret == CACHE_OP_UNSUPPORTED)
+		return -ENOENT;
+
+	return ret;
+}
+
+static int
+armpmu_event_set_period(struct perf_event *event,
+			struct hw_perf_event *hwc,
+			int idx)
+{
+	s64 left = atomic64_read(&hwc->period_left);
+	s64 period = hwc->sample_period;
+	int ret = 0;
+
+	if (unlikely(left <= -period)) {
+		left = period;
+		atomic64_set(&hwc->period_left, left);
+		hwc->last_period = period;
+		ret = 1;
+	}
+
+	if (unlikely(left <= 0)) {
+		left += period;
+		atomic64_set(&hwc->period_left, left);
+		hwc->last_period = period;
+		ret = 1;
+	}
+
+	if (left > (s64)armpmu->max_period)
+		left = armpmu->max_period;
+
+	atomic64_set(&hwc->prev_count, (u64)-left);
+
+	armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
+
+	perf_event_update_userpage(event);
+
+	return ret;
+}
+
+static u64
+armpmu_event_update(struct perf_event *event,
+		    struct hw_perf_event *hwc,
+		    int idx)
+{
+	int shift = 64 - 32;
+	s64 prev_raw_count, new_raw_count;
+	s64 delta;
+
+again:
+	prev_raw_count = atomic64_read(&hwc->prev_count);
+	new_raw_count = armpmu->read_counter(idx);
+
+	if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
+			     new_raw_count) != prev_raw_count)
+		goto again;
+
+	delta = (new_raw_count << shift) - (prev_raw_count << shift);
+	delta >>= shift;
+
+	atomic64_add(delta, &event->count);
+	atomic64_sub(delta, &hwc->period_left);
+
+	return new_raw_count;
+}
+
+static void
+armpmu_disable(struct perf_event *event)
+{
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	WARN_ON(idx < 0);
+
+	clear_bit(idx, cpuc->active_mask);
+	armpmu->disable(hwc, idx);
+
+	barrier();
+
+	armpmu_event_update(event, hwc, idx);
+	cpuc->events[idx] = NULL;
+	clear_bit(idx, cpuc->used_mask);
+
+	perf_event_update_userpage(event);
+}
+
+static void
+armpmu_read(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+
+	/* Don't read disabled counters! */
+	if (hwc->idx < 0)
+		return;
+
+	armpmu_event_update(event, hwc, hwc->idx);
+}
+
+static void
+armpmu_unthrottle(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+
+	/*
+	 * Set the period again. Some counters can't be stopped, so when we
+	 * were throttled we simply disabled the IRQ source and the counter
+	 * may have been left counting. If we don't do this step then we may
+	 * get an interrupt too soon or *way* too late if the overflow has
+	 * happened since disabling.
+	 */
+	armpmu_event_set_period(event, hwc, hwc->idx);
+	armpmu->enable(hwc, hwc->idx);
+}
+
+static int
+armpmu_enable(struct perf_event *event)
+{
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx;
+	int err = 0;
+
+	/* If we don't have a space for the counter then finish early. */
+	idx = armpmu->get_event_idx(cpuc, hwc);
+	if (idx < 0) {
+		err = idx;
+		goto out;
+	}
+
+	/*
+	 * If there is an event in the counter we are going to use then make
+	 * sure it is disabled.
+	 */
+	event->hw.idx = idx;
+	armpmu->disable(hwc, idx);
+	cpuc->events[idx] = event;
+	set_bit(idx, cpuc->active_mask);
+
+	/* Set the period for the event. */
+	armpmu_event_set_period(event, hwc, idx);
+
+	/* Enable the event. */
+	armpmu->enable(hwc, idx);
+
+	/* Propagate our changes to the userspace mapping. */
+	perf_event_update_userpage(event);
+
+out:
+	return err;
+}
+
+static struct pmu pmu = {
+	.enable	    = armpmu_enable,
+	.disable    = armpmu_disable,
+	.unthrottle = armpmu_unthrottle,
+	.read	    = armpmu_read,
+};
+
+static int
+validate_event(struct cpu_hw_events *cpuc,
+	       struct perf_event *event)
+{
+	struct hw_perf_event fake_event = event->hw;
+
+	if (event->pmu && event->pmu != &pmu)
+		return 0;
+
+	return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
+}
+
+static int
+validate_group(struct perf_event *event)
+{
+	struct perf_event *sibling, *leader = event->group_leader;
+	struct cpu_hw_events fake_pmu;
+
+	memset(&fake_pmu, 0, sizeof(fake_pmu));
+
+	if (!validate_event(&fake_pmu, leader))
+		return -ENOSPC;
+
+	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+		if (!validate_event(&fake_pmu, sibling))
+			return -ENOSPC;
+	}
+
+	if (!validate_event(&fake_pmu, event))
+		return -ENOSPC;
+
+	return 0;
+}
+
+static int
+armpmu_reserve_hardware(void)
+{
+	int i;
+	int err;
+
+	pmu_irqs = reserve_pmu();
+	if (IS_ERR(pmu_irqs)) {
+		pr_warning("unable to reserve pmu\n");
+		return PTR_ERR(pmu_irqs);
+	}
+
+	init_pmu();
+
+	if (pmu_irqs->num_irqs < 1) {
+		pr_err("no irqs for PMUs defined\n");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < pmu_irqs->num_irqs; ++i) {
+		err = request_irq(pmu_irqs->irqs[i], armpmu->handle_irq,
+				  IRQF_DISABLED, "armpmu", NULL);
+		if (err) {
+			pr_warning("unable to request IRQ%d for ARM "
+				   "perf counters\n", pmu_irqs->irqs[i]);
+			break;
+		}
+	}
+
+	if (err) {
+		for (i = i - 1; i >= 0; --i)
+			free_irq(pmu_irqs->irqs[i], NULL);
+		release_pmu(pmu_irqs);
+		pmu_irqs = NULL;
+	}
+
+	return err;
+}
+
+static void
+armpmu_release_hardware(void)
+{
+	int i;
+
+	for (i = pmu_irqs->num_irqs - 1; i >= 0; --i)
+		free_irq(pmu_irqs->irqs[i], NULL);
+	armpmu->stop();
+
+	release_pmu(pmu_irqs);
+	pmu_irqs = NULL;
+}
+
+static atomic_t active_events = ATOMIC_INIT(0);
+static DEFINE_MUTEX(pmu_reserve_mutex);
+
+static void
+hw_perf_event_destroy(struct perf_event *event)
+{
+	if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
+		armpmu_release_hardware();
+		mutex_unlock(&pmu_reserve_mutex);
+	}
+}
+
+static int
+__hw_perf_event_init(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	int mapping, err;
+
+	/* Decode the generic type into an ARM event identifier. */
+	if (PERF_TYPE_HARDWARE == event->attr.type) {
+		mapping = armpmu->event_map(event->attr.config);
+	} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
+		mapping = armpmu_map_cache_event(event->attr.config);
+	} else if (PERF_TYPE_RAW == event->attr.type) {
+		mapping = armpmu->raw_event(event->attr.config);
+	} else {
+		pr_debug("event type %x not supported\n", event->attr.type);
+		return -EOPNOTSUPP;
+	}
+
+	if (mapping < 0) {
+		pr_debug("event %x:%llx not supported\n", event->attr.type,
+			 event->attr.config);
+		return mapping;
+	}
+
+	/*
+	 * Check whether we need to exclude the counter from certain modes.
+	 * The ARM performance counters are on all of the time so if someone
+	 * has asked us for some excludes then we have to fail.
+	 */
+	if (event->attr.exclude_kernel || event->attr.exclude_user ||
+	    event->attr.exclude_hv || event->attr.exclude_idle) {
+		pr_debug("ARM performance counters do not support "
+			 "mode exclusion\n");
+		return -EPERM;
+	}
+
+	/*
+	 * We don't assign an index until we actually place the event onto
+	 * hardware. Use -1 to signify that we haven't decided where to put it
+	 * yet. For SMP systems, each core has it's own PMU so we can't do any
+	 * clever allocation or constraints checking at this point.
+	 */
+	hwc->idx = -1;
+
+	/*
+	 * Store the event encoding into the config_base field. config and
+	 * event_base are unused as the only 2 things we need to know are
+	 * the event mapping and the counter to use. The counter to use is
+	 * also the indx and the config_base is the event type.
+	 */
+	hwc->config_base	    = (unsigned long)mapping;
+	hwc->config		    = 0;
+	hwc->event_base		    = 0;
+
+	if (!hwc->sample_period) {
+		hwc->sample_period  = armpmu->max_period;
+		hwc->last_period    = hwc->sample_period;
+		atomic64_set(&hwc->period_left, hwc->sample_period);
+	}
+
+	err = 0;
+	if (event->group_leader != event) {
+		err = validate_group(event);
+		if (err)
+			return -EINVAL;
+	}
+
+	return err;
+}
+
+const struct pmu *
+hw_perf_event_init(struct perf_event *event)
+{
+	int err = 0;
+
+	if (!armpmu)
+		return ERR_PTR(-ENODEV);
+
+	event->destroy = hw_perf_event_destroy;
+
+	if (!atomic_inc_not_zero(&active_events)) {
+		if (atomic_read(&active_events) > perf_max_events) {
+			atomic_dec(&active_events);
+			return ERR_PTR(-ENOSPC);
+		}
+
+		mutex_lock(&pmu_reserve_mutex);
+		if (atomic_read(&active_events) == 0) {
+			err = armpmu_reserve_hardware();
+		}
+
+		if (!err)
+			atomic_inc(&active_events);
+		mutex_unlock(&pmu_reserve_mutex);
+	}
+
+	if (err)
+		return ERR_PTR(err);
+
+	err = __hw_perf_event_init(event);
+	if (err)
+		hw_perf_event_destroy(event);
+
+	return err ? ERR_PTR(err) : &pmu;
+}
+
+void
+hw_perf_enable(void)
+{
+	/* Enable all of the perf events on hardware. */
+	int idx;
+	struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+	if (!armpmu)
+		return;
+
+	for (idx = 0; idx <= armpmu->num_events; ++idx) {
+		struct perf_event *event = cpuc->events[idx];
+
+		if (!event)
+			continue;
+
+		armpmu->enable(&event->hw, idx);
+	}
+
+	armpmu->start();
+}
+
+void
+hw_perf_disable(void)
+{
+	if (armpmu)
+		armpmu->stop();
+}
+
+/*
+ * ARMv6 Performance counter handling code.
+ *
+ * ARMv6 has 2 configurable performance counters and a single cycle counter.
+ * They all share a single reset bit but can be written to zero so we can use
+ * that for a reset.
+ *
+ * The counters can't be individually enabled or disabled so when we remove
+ * one event and replace it with another we could get spurious counts from the
+ * wrong event. However, we can take advantage of the fact that the
+ * performance counters can export events to the event bus, and the event bus
+ * itself can be monitored. This requires that we *don't* export the events to
+ * the event bus. The procedure for disabling a configurable counter is:
+ *	- change the counter to count the ETMEXTOUT[0] signal (0x20). This
+ *	  effectively stops the counter from counting.
+ *	- disable the counter's interrupt generation (each counter has it's
+ *	  own interrupt enable bit).
+ * Once stopped, the counter value can be written as 0 to reset.
+ *
+ * To enable a counter:
+ *	- enable the counter's interrupt generation.
+ *	- set the new event type.
+ *
+ * Note: the dedicated cycle counter only counts cycles and can't be
+ * enabled/disabled independently of the others. When we want to disable the
+ * cycle counter, we have to just disable the interrupt reporting and start
+ * ignoring that counter. When re-enabling, we have to reset the value and
+ * enable the interrupt.
+ */
+
+enum armv6_perf_types {
+	ARMV6_PERFCTR_ICACHE_MISS	    = 0x0,
+	ARMV6_PERFCTR_IBUF_STALL	    = 0x1,
+	ARMV6_PERFCTR_DDEP_STALL	    = 0x2,
+	ARMV6_PERFCTR_ITLB_MISS		    = 0x3,
+	ARMV6_PERFCTR_DTLB_MISS		    = 0x4,
+	ARMV6_PERFCTR_BR_EXEC		    = 0x5,
+	ARMV6_PERFCTR_BR_MISPREDICT	    = 0x6,
+	ARMV6_PERFCTR_INSTR_EXEC	    = 0x7,
+	ARMV6_PERFCTR_DCACHE_HIT	    = 0x9,
+	ARMV6_PERFCTR_DCACHE_ACCESS	    = 0xA,
+	ARMV6_PERFCTR_DCACHE_MISS	    = 0xB,
+	ARMV6_PERFCTR_DCACHE_WBACK	    = 0xC,
+	ARMV6_PERFCTR_SW_PC_CHANGE	    = 0xD,
+	ARMV6_PERFCTR_MAIN_TLB_MISS	    = 0xF,
+	ARMV6_PERFCTR_EXPL_D_ACCESS	    = 0x10,
+	ARMV6_PERFCTR_LSU_FULL_STALL	    = 0x11,
+	ARMV6_PERFCTR_WBUF_DRAINED	    = 0x12,
+	ARMV6_PERFCTR_CPU_CYCLES	    = 0xFF,
+	ARMV6_PERFCTR_NOP		    = 0x20,
+};
+
+enum armv6_counters {
+	ARMV6_CYCLE_COUNTER = 1,
+	ARMV6_COUNTER0,
+	ARMV6_COUNTER1,
+};
+
+/*
+ * The hardware events that we support. We do support cache operations but
+ * we have harvard caches and no way to combine instruction and data
+ * accesses/misses in hardware.
+ */
+static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
+	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV6_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV6_PERFCTR_INSTR_EXEC,
+	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
+	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV6_PERFCTR_BR_MISPREDICT,
+	[PERF_COUNT_HW_BUS_CYCLES]	    = HW_OP_UNSUPPORTED,
+};
+
+static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+					  [PERF_COUNT_HW_CACHE_OP_MAX]
+					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+	[C(L1D)] = {
+		/*
+		 * The performance counters don't differentiate between read
+		 * and write accesses/misses so this isn't strictly correct,
+		 * but it's the best we can do. Writes and reads get
+		 * combined.
+		 */
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV6_PERFCTR_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV6_PERFCTR_DCACHE_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV6_PERFCTR_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV6_PERFCTR_DCACHE_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(L1I)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV6_PERFCTR_ICACHE_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV6_PERFCTR_ICACHE_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(LL)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(DTLB)] = {
+		/*
+		 * The ARM performance counters can count micro DTLB misses,
+		 * micro ITLB misses and main TLB misses. There isn't an event
+		 * for TLB misses, so use the micro misses here and if users
+		 * want the main TLB misses they can use a raw counter.
+		 */
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV6_PERFCTR_DTLB_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV6_PERFCTR_DTLB_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(ITLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV6_PERFCTR_ITLB_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV6_PERFCTR_ITLB_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(BPU)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+};
+
+enum armv6mpcore_perf_types {
+	ARMV6MPCORE_PERFCTR_ICACHE_MISS	    = 0x0,
+	ARMV6MPCORE_PERFCTR_IBUF_STALL	    = 0x1,
+	ARMV6MPCORE_PERFCTR_DDEP_STALL	    = 0x2,
+	ARMV6MPCORE_PERFCTR_ITLB_MISS	    = 0x3,
+	ARMV6MPCORE_PERFCTR_DTLB_MISS	    = 0x4,
+	ARMV6MPCORE_PERFCTR_BR_EXEC	    = 0x5,
+	ARMV6MPCORE_PERFCTR_BR_NOTPREDICT   = 0x6,
+	ARMV6MPCORE_PERFCTR_BR_MISPREDICT   = 0x7,
+	ARMV6MPCORE_PERFCTR_INSTR_EXEC	    = 0x8,
+	ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
+	ARMV6MPCORE_PERFCTR_DCACHE_RDMISS   = 0xB,
+	ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
+	ARMV6MPCORE_PERFCTR_DCACHE_WRMISS   = 0xD,
+	ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
+	ARMV6MPCORE_PERFCTR_SW_PC_CHANGE    = 0xF,
+	ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS   = 0x10,
+	ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
+	ARMV6MPCORE_PERFCTR_LSU_FULL_STALL  = 0x12,
+	ARMV6MPCORE_PERFCTR_WBUF_DRAINED    = 0x13,
+	ARMV6MPCORE_PERFCTR_CPU_CYCLES	    = 0xFF,
+};
+
+/*
+ * The hardware events that we support. We do support cache operations but
+ * we have harvard caches and no way to combine instruction and data
+ * accesses/misses in hardware.
+ */
+static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
+	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
+	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
+	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
+	[PERF_COUNT_HW_BUS_CYCLES]	    = HW_OP_UNSUPPORTED,
+};
+
+static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+					[PERF_COUNT_HW_CACHE_OP_MAX]
+					[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+	[C(L1D)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]  =
+				ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
+			[C(RESULT_MISS)]    =
+				ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]  =
+				ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
+			[C(RESULT_MISS)]    =
+				ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(L1I)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(LL)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(DTLB)] = {
+		/*
+		 * The ARM performance counters can count micro DTLB misses,
+		 * micro ITLB misses and main TLB misses. There isn't an event
+		 * for TLB misses, so use the micro misses here and if users
+		 * want the main TLB misses they can use a raw counter.
+		 */
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_DTLB_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_DTLB_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(ITLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ITLB_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ITLB_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(BPU)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
+		},
+	},
+};
+
+static inline unsigned long
+armv6_pmcr_read(void)
+{
+	u32 val;
+	asm volatile("mrc   p15, 0, %0, c15, c12, 0" : "=r"(val));
+	return val;
+}
+
+static inline void
+armv6_pmcr_write(unsigned long val)
+{
+	asm volatile("mcr   p15, 0, %0, c15, c12, 0" : : "r"(val));
+}
+
+#define ARMV6_PMCR_ENABLE		(1 << 0)
+#define ARMV6_PMCR_CTR01_RESET		(1 << 1)
+#define ARMV6_PMCR_CCOUNT_RESET		(1 << 2)
+#define ARMV6_PMCR_CCOUNT_DIV		(1 << 3)
+#define ARMV6_PMCR_COUNT0_IEN		(1 << 4)
+#define ARMV6_PMCR_COUNT1_IEN		(1 << 5)
+#define ARMV6_PMCR_CCOUNT_IEN		(1 << 6)
+#define ARMV6_PMCR_COUNT0_OVERFLOW	(1 << 8)
+#define ARMV6_PMCR_COUNT1_OVERFLOW	(1 << 9)
+#define ARMV6_PMCR_CCOUNT_OVERFLOW	(1 << 10)
+#define ARMV6_PMCR_EVT_COUNT0_SHIFT	20
+#define ARMV6_PMCR_EVT_COUNT0_MASK	(0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
+#define ARMV6_PMCR_EVT_COUNT1_SHIFT	12
+#define ARMV6_PMCR_EVT_COUNT1_MASK	(0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
+
+#define ARMV6_PMCR_OVERFLOWED_MASK \
+	(ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
+	 ARMV6_PMCR_CCOUNT_OVERFLOW)
+
+static inline int
+armv6_pmcr_has_overflowed(unsigned long pmcr)
+{
+	return (pmcr & ARMV6_PMCR_OVERFLOWED_MASK);
+}
+
+static inline int
+armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
+				  enum armv6_counters counter)
+{
+	int ret = 0;
+
+	if (ARMV6_CYCLE_COUNTER == counter)
+		ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
+	else if (ARMV6_COUNTER0 == counter)
+		ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
+	else if (ARMV6_COUNTER1 == counter)
+		ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
+	else
+		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+
+	return ret;
+}
+
+static inline u32
+armv6pmu_read_counter(int counter)
+{
+	unsigned long value = 0;
+
+	if (ARMV6_CYCLE_COUNTER == counter)
+		asm volatile("mrc   p15, 0, %0, c15, c12, 1" : "=r"(value));
+	else if (ARMV6_COUNTER0 == counter)
+		asm volatile("mrc   p15, 0, %0, c15, c12, 2" : "=r"(value));
+	else if (ARMV6_COUNTER1 == counter)
+		asm volatile("mrc   p15, 0, %0, c15, c12, 3" : "=r"(value));
+	else
+		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+
+	return value;
+}
+
+static inline void
+armv6pmu_write_counter(int counter,
+		       u32 value)
+{
+	if (ARMV6_CYCLE_COUNTER == counter)
+		asm volatile("mcr   p15, 0, %0, c15, c12, 1" : : "r"(value));
+	else if (ARMV6_COUNTER0 == counter)
+		asm volatile("mcr   p15, 0, %0, c15, c12, 2" : : "r"(value));
+	else if (ARMV6_COUNTER1 == counter)
+		asm volatile("mcr   p15, 0, %0, c15, c12, 3" : : "r"(value));
+	else
+		WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+}
+
+void
+armv6pmu_enable_event(struct hw_perf_event *hwc,
+		      int idx)
+{
+	unsigned long val, mask, evt, flags;
+
+	if (ARMV6_CYCLE_COUNTER == idx) {
+		mask	= 0;
+		evt	= ARMV6_PMCR_CCOUNT_IEN;
+	} else if (ARMV6_COUNTER0 == idx) {
+		mask	= ARMV6_PMCR_EVT_COUNT0_MASK;
+		evt	= (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
+			  ARMV6_PMCR_COUNT0_IEN;
+	} else if (ARMV6_COUNTER1 == idx) {
+		mask	= ARMV6_PMCR_EVT_COUNT1_MASK;
+		evt	= (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
+			  ARMV6_PMCR_COUNT1_IEN;
+	} else {
+		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+		return;
+	}
+
+	/*
+	 * Mask out the current event and set the counter to count the event
+	 * that we're interested in.
+	 */
+	spin_lock_irqsave(&pmu_lock, flags);
+	val = armv6_pmcr_read();
+	val &= ~mask;
+	val |= evt;
+	armv6_pmcr_write(val);
+	spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static irqreturn_t
+armv6pmu_handle_irq(int irq_num,
+		    void *dev)
+{
+	unsigned long pmcr = armv6_pmcr_read();
+	struct perf_sample_data data;
+	struct cpu_hw_events *cpuc;
+	struct pt_regs *regs;
+	int idx;
+
+	if (!armv6_pmcr_has_overflowed(pmcr))
+		return IRQ_NONE;
+
+	regs = get_irq_regs();
+
+	/*
+	 * The interrupts are cleared by writing the overflow flags back to
+	 * the control register. All of the other bits don't have any effect
+	 * if they are rewritten, so write the whole value back.
+	 */
+	armv6_pmcr_write(pmcr);
+
+	data.addr = 0;
+
+	cpuc = &__get_cpu_var(cpu_hw_events);
+	for (idx = 0; idx <= armpmu->num_events; ++idx) {
+		struct perf_event *event = cpuc->events[idx];
+		struct hw_perf_event *hwc;
+
+		if (!test_bit(idx, cpuc->active_mask))
+			continue;
+
+		/*
+		 * We have a single interrupt for all counters. Check that
+		 * each counter has overflowed before we process it.
+		 */
+		if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
+			continue;
+
+		hwc = &event->hw;
+		armpmu_event_update(event, hwc, idx);
+		data.period = event->hw.last_period;
+		if (!armpmu_event_set_period(event, hwc, idx))
+			continue;
+
+		if (perf_event_overflow(event, 0, &data, regs))
+			armpmu->disable(hwc, idx);
+	}
+
+	/*
+	 * Handle the pending perf events.
+	 *
+	 * Note: this call *must* be run with interrupts enabled. For
+	 * platforms that can have the PMU interrupts raised as a PMI, this
+	 * will not work.
+	 */
+	perf_event_do_pending();
+
+	return IRQ_HANDLED;
+}
+
+static void
+armv6pmu_start(void)
+{
+	unsigned long flags, val;
+
+	spin_lock_irqsave(&pmu_lock, flags);
+	val = armv6_pmcr_read();
+	val |= ARMV6_PMCR_ENABLE;
+	armv6_pmcr_write(val);
+	spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+void
+armv6pmu_stop(void)
+{
+	unsigned long flags, val;
+
+	spin_lock_irqsave(&pmu_lock, flags);
+	val = armv6_pmcr_read();
+	val &= ~ARMV6_PMCR_ENABLE;
+	armv6_pmcr_write(val);
+	spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static inline int
+armv6pmu_event_map(int config)
+{
+	int mapping = armv6_perf_map[config];
+	if (HW_OP_UNSUPPORTED == mapping)
+		mapping = -EOPNOTSUPP;
+	return mapping;
+}
+
+static inline int
+armv6mpcore_pmu_event_map(int config)
+{
+	int mapping = armv6mpcore_perf_map[config];
+	if (HW_OP_UNSUPPORTED == mapping)
+		mapping = -EOPNOTSUPP;
+	return mapping;
+}
+
+static u64
+armv6pmu_raw_event(u64 config)
+{
+	return config & 0xff;
+}
+
+static int
+armv6pmu_get_event_idx(struct cpu_hw_events *cpuc,
+		       struct hw_perf_event *event)
+{
+	/* Always place a cycle counter into the cycle counter. */
+	if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
+		if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
+			return -EAGAIN;
+
+		return ARMV6_CYCLE_COUNTER;
+	} else {
+		/*
+		 * For anything other than a cycle counter, try and use
+		 * counter0 and counter1.
+		 */
+		if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) {
+			return ARMV6_COUNTER1;
+		}
+
+		if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) {
+			return ARMV6_COUNTER0;
+		}
+
+		/* The counters are all in use. */
+		return -EAGAIN;
+	}
+}
+
+static void
+armv6pmu_disable_event(struct hw_perf_event *hwc,
+		       int idx)
+{
+	unsigned long val, mask, evt, flags;
+
+	if (ARMV6_CYCLE_COUNTER == idx) {
+		mask	= ARMV6_PMCR_CCOUNT_IEN;
+		evt	= 0;
+	} else if (ARMV6_COUNTER0 == idx) {
+		mask	= ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
+		evt	= ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
+	} else if (ARMV6_COUNTER1 == idx) {
+		mask	= ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
+		evt	= ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
+	} else {
+		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+		return;
+	}
+
+	/*
+	 * Mask out the current event and set the counter to count the number
+	 * of ETM bus signal assertion cycles. The external reporting should
+	 * be disabled and so this should never increment.
+	 */
+	spin_lock_irqsave(&pmu_lock, flags);
+	val = armv6_pmcr_read();
+	val &= ~mask;
+	val |= evt;
+	armv6_pmcr_write(val);
+	spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
+			      int idx)
+{
+	unsigned long val, mask, flags, evt = 0;
+
+	if (ARMV6_CYCLE_COUNTER == idx) {
+		mask	= ARMV6_PMCR_CCOUNT_IEN;
+	} else if (ARMV6_COUNTER0 == idx) {
+		mask	= ARMV6_PMCR_COUNT0_IEN;
+	} else if (ARMV6_COUNTER1 == idx) {
+		mask	= ARMV6_PMCR_COUNT1_IEN;
+	} else {
+		WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+		return;
+	}
+
+	/*
+	 * Unlike UP ARMv6, we don't have a way of stopping the counters. We
+	 * simply disable the interrupt reporting.
+	 */
+	spin_lock_irqsave(&pmu_lock, flags);
+	val = armv6_pmcr_read();
+	val &= ~mask;
+	val |= evt;
+	armv6_pmcr_write(val);
+	spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static const struct arm_pmu armv6pmu = {
+	.name			= "v6",
+	.handle_irq		= armv6pmu_handle_irq,
+	.enable			= armv6pmu_enable_event,
+	.disable		= armv6pmu_disable_event,
+	.event_map		= armv6pmu_event_map,
+	.raw_event		= armv6pmu_raw_event,
+	.read_counter		= armv6pmu_read_counter,
+	.write_counter		= armv6pmu_write_counter,
+	.get_event_idx		= armv6pmu_get_event_idx,
+	.start			= armv6pmu_start,
+	.stop			= armv6pmu_stop,
+	.num_events		= 3,
+	.max_period		= (1LLU << 32) - 1,
+};
+
+/*
+ * ARMv6mpcore is almost identical to single core ARMv6 with the exception
+ * that some of the events have different enumerations and that there is no
+ * *hack* to stop the programmable counters. To stop the counters we simply
+ * disable the interrupt reporting and update the event. When unthrottling we
+ * reset the period and enable the interrupt reporting.
+ */
+static const struct arm_pmu armv6mpcore_pmu = {
+	.name			= "v6mpcore",
+	.handle_irq		= armv6pmu_handle_irq,
+	.enable			= armv6pmu_enable_event,
+	.disable		= armv6mpcore_pmu_disable_event,
+	.event_map		= armv6mpcore_pmu_event_map,
+	.raw_event		= armv6pmu_raw_event,
+	.read_counter		= armv6pmu_read_counter,
+	.write_counter		= armv6pmu_write_counter,
+	.get_event_idx		= armv6pmu_get_event_idx,
+	.start			= armv6pmu_start,
+	.stop			= armv6pmu_stop,
+	.num_events		= 3,
+	.max_period		= (1LLU << 32) - 1,
+};
+
+/*
+ * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
+ *
+ * Copied from ARMv6 code, with the low level code inspired
+ *  by the ARMv7 Oprofile code.
+ *
+ * Cortex-A8 has up to 4 configurable performance counters and
+ *  a single cycle counter.
+ * Cortex-A9 has up to 31 configurable performance counters and
+ *  a single cycle counter.
+ *
+ * All counters can be enabled/disabled and IRQ masked separately. The cycle
+ *  counter and all 4 performance counters together can be reset separately.
+ */
+
+#define ARMV7_PMU_CORTEX_A8_NAME		"ARMv7 Cortex-A8"
+
+#define ARMV7_PMU_CORTEX_A9_NAME		"ARMv7 Cortex-A9"
+
+/* Common ARMv7 event types */
+enum armv7_perf_types {
+	ARMV7_PERFCTR_PMNC_SW_INCR		= 0x00,
+	ARMV7_PERFCTR_IFETCH_MISS		= 0x01,
+	ARMV7_PERFCTR_ITLB_MISS			= 0x02,
+	ARMV7_PERFCTR_DCACHE_REFILL		= 0x03,
+	ARMV7_PERFCTR_DCACHE_ACCESS		= 0x04,
+	ARMV7_PERFCTR_DTLB_REFILL		= 0x05,
+	ARMV7_PERFCTR_DREAD			= 0x06,
+	ARMV7_PERFCTR_DWRITE			= 0x07,
+
+	ARMV7_PERFCTR_EXC_TAKEN			= 0x09,
+	ARMV7_PERFCTR_EXC_EXECUTED		= 0x0A,
+	ARMV7_PERFCTR_CID_WRITE			= 0x0B,
+	/* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
+	 * It counts:
+	 *  - all branch instructions,
+	 *  - instructions that explicitly write the PC,
+	 *  - exception generating instructions.
+	 */
+	ARMV7_PERFCTR_PC_WRITE			= 0x0C,
+	ARMV7_PERFCTR_PC_IMM_BRANCH		= 0x0D,
+	ARMV7_PERFCTR_UNALIGNED_ACCESS		= 0x0F,
+	ARMV7_PERFCTR_PC_BRANCH_MIS_PRED	= 0x10,
+	ARMV7_PERFCTR_CLOCK_CYCLES		= 0x11,
+
+	ARMV7_PERFCTR_PC_BRANCH_MIS_USED	= 0x12,
+
+	ARMV7_PERFCTR_CPU_CYCLES		= 0xFF
+};
+
+/* ARMv7 Cortex-A8 specific event types */
+enum armv7_a8_perf_types {
+	ARMV7_PERFCTR_INSTR_EXECUTED		= 0x08,
+
+	ARMV7_PERFCTR_PC_PROC_RETURN		= 0x0E,
+
+	ARMV7_PERFCTR_WRITE_BUFFER_FULL		= 0x40,
+	ARMV7_PERFCTR_L2_STORE_MERGED		= 0x41,
+	ARMV7_PERFCTR_L2_STORE_BUFF		= 0x42,
+	ARMV7_PERFCTR_L2_ACCESS			= 0x43,
+	ARMV7_PERFCTR_L2_CACH_MISS		= 0x44,
+	ARMV7_PERFCTR_AXI_READ_CYCLES		= 0x45,
+	ARMV7_PERFCTR_AXI_WRITE_CYCLES		= 0x46,
+	ARMV7_PERFCTR_MEMORY_REPLAY		= 0x47,
+	ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY	= 0x48,
+	ARMV7_PERFCTR_L1_DATA_MISS		= 0x49,
+	ARMV7_PERFCTR_L1_INST_MISS		= 0x4A,
+	ARMV7_PERFCTR_L1_DATA_COLORING		= 0x4B,
+	ARMV7_PERFCTR_L1_NEON_DATA		= 0x4C,
+	ARMV7_PERFCTR_L1_NEON_CACH_DATA		= 0x4D,
+	ARMV7_PERFCTR_L2_NEON			= 0x4E,
+	ARMV7_PERFCTR_L2_NEON_HIT		= 0x4F,
+	ARMV7_PERFCTR_L1_INST			= 0x50,
+	ARMV7_PERFCTR_PC_RETURN_MIS_PRED	= 0x51,
+	ARMV7_PERFCTR_PC_BRANCH_FAILED		= 0x52,
+	ARMV7_PERFCTR_PC_BRANCH_TAKEN		= 0x53,
+	ARMV7_PERFCTR_PC_BRANCH_EXECUTED	= 0x54,
+	ARMV7_PERFCTR_OP_EXECUTED		= 0x55,
+	ARMV7_PERFCTR_CYCLES_INST_STALL		= 0x56,
+	ARMV7_PERFCTR_CYCLES_INST		= 0x57,
+	ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL	= 0x58,
+	ARMV7_PERFCTR_CYCLES_NEON_INST_STALL	= 0x59,
+	ARMV7_PERFCTR_NEON_CYCLES		= 0x5A,
+
+	ARMV7_PERFCTR_PMU0_EVENTS		= 0x70,
+	ARMV7_PERFCTR_PMU1_EVENTS		= 0x71,
+	ARMV7_PERFCTR_PMU_EVENTS		= 0x72,
+};
+
+/* ARMv7 Cortex-A9 specific event types */
+enum armv7_a9_perf_types {
+	ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC	= 0x40,
+	ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC	= 0x41,
+	ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC	= 0x42,
+
+	ARMV7_PERFCTR_COHERENT_LINE_MISS	= 0x50,
+	ARMV7_PERFCTR_COHERENT_LINE_HIT		= 0x51,
+
+	ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES	= 0x60,
+	ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES	= 0x61,
+	ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES	= 0x62,
+	ARMV7_PERFCTR_STREX_EXECUTED_PASSED	= 0x63,
+	ARMV7_PERFCTR_STREX_EXECUTED_FAILED	= 0x64,
+	ARMV7_PERFCTR_DATA_EVICTION		= 0x65,
+	ARMV7_PERFCTR_ISSUE_STAGE_NO_INST	= 0x66,
+	ARMV7_PERFCTR_ISSUE_STAGE_EMPTY		= 0x67,
+	ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE	= 0x68,
+
+	ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS	= 0x6E,
+
+	ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST	= 0x70,
+	ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST	= 0x71,
+	ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST	= 0x72,
+	ARMV7_PERFCTR_FP_EXECUTED_INST		= 0x73,
+	ARMV7_PERFCTR_NEON_EXECUTED_INST	= 0x74,
+
+	ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES	= 0x80,
+	ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES	= 0x81,
+	ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES	= 0x82,
+	ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES	= 0x83,
+	ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES	= 0x84,
+	ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES 	= 0x85,
+	ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES	= 0x86,
+
+	ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES	= 0x8A,
+	ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES	= 0x8B,
+
+	ARMV7_PERFCTR_ISB_INST			= 0x90,
+	ARMV7_PERFCTR_DSB_INST			= 0x91,
+	ARMV7_PERFCTR_DMB_INST			= 0x92,
+	ARMV7_PERFCTR_EXT_INTERRUPTS		= 0x93,
+
+	ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED	= 0xA0,
+	ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED	= 0xA1,
+	ARMV7_PERFCTR_PLE_FIFO_FLUSH		= 0xA2,
+	ARMV7_PERFCTR_PLE_RQST_COMPLETED	= 0xA3,
+	ARMV7_PERFCTR_PLE_FIFO_OVERFLOW		= 0xA4,
+	ARMV7_PERFCTR_PLE_RQST_PROG		= 0xA5
+};
+
+/*
+ * Cortex-A8 HW events mapping
+ *
+ * The hardware events that we support. We do support cache operations but
+ * we have harvard caches and no way to combine instruction and data
+ * accesses/misses in hardware.
+ */
+static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
+	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]	    = ARMV7_PERFCTR_INSTR_EXECUTED,
+	[PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_CACHE_MISSES]	    = HW_OP_UNSUPPORTED,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
+};
+
+static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+					  [PERF_COUNT_HW_CACHE_OP_MAX]
+					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+	[C(L1D)] = {
+		/*
+		 * The performance counters don't differentiate between read
+		 * and write accesses/misses so this isn't strictly correct,
+		 * but it's the best we can do. Writes and reads get
+		 * combined.
+		 */
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DCACHE_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DCACHE_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(L1I)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_INST,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_INST_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_INST,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_INST_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(LL)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACH_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L2_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L2_CACH_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(DTLB)] = {
+		/*
+		 * Only ITLB misses and DTLB refills are supported.
+		 * If users want the DTLB refills misses a raw counter
+		 * must be used.
+		 */
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(ITLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(BPU)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_WRITE,
+			[C(RESULT_MISS)]
+					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_WRITE,
+			[C(RESULT_MISS)]
+					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+};
+
+/*
+ * Cortex-A9 HW events mapping
+ */
+static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
+	[PERF_COUNT_HW_CPU_CYCLES]	    = ARMV7_PERFCTR_CPU_CYCLES,
+	[PERF_COUNT_HW_INSTRUCTIONS]	    =
+					ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
+	[PERF_COUNT_HW_CACHE_REFERENCES]    = ARMV7_PERFCTR_COHERENT_LINE_HIT,
+	[PERF_COUNT_HW_CACHE_MISSES]	    = ARMV7_PERFCTR_COHERENT_LINE_MISS,
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+	[PERF_COUNT_HW_BRANCH_MISSES]	    = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+	[PERF_COUNT_HW_BUS_CYCLES]	    = ARMV7_PERFCTR_CLOCK_CYCLES,
+};
+
+static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+					  [PERF_COUNT_HW_CACHE_OP_MAX]
+					  [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+	[C(L1D)] = {
+		/*
+		 * The performance counters don't differentiate between read
+		 * and write accesses/misses so this isn't strictly correct,
+		 * but it's the best we can do. Writes and reads get
+		 * combined.
+		 */
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DCACHE_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_DCACHE_ACCESS,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DCACHE_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(L1I)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_IFETCH_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(LL)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(DTLB)] = {
+		/*
+		 * Only ITLB misses and DTLB refills are supported.
+		 * If users want the DTLB refills misses a raw counter
+		 * must be used.
+		 */
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_DTLB_REFILL,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(ITLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= ARMV7_PERFCTR_ITLB_MISS,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+	[C(BPU)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_WRITE,
+			[C(RESULT_MISS)]
+					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_PC_WRITE,
+			[C(RESULT_MISS)]
+					= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+		},
+		[C(OP_PREFETCH)] = {
+			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
+			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
+		},
+	},
+};
+
+/*
+ * Perf Events counters
+ */
+enum armv7_counters {
+	ARMV7_CYCLE_COUNTER 		= 1,	/* Cycle counter */
+	ARMV7_COUNTER0			= 2,	/* First event counter */
+};
+
+/*
+ * The cycle counter is ARMV7_CYCLE_COUNTER.
+ * The first event counter is ARMV7_COUNTER0.
+ * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
+ */
+#define	ARMV7_COUNTER_LAST	(ARMV7_COUNTER0 + armpmu->num_events - 1)
+
+/*
+ * ARMv7 low level PMNC access
+ */
+
+/*
+ * Per-CPU PMNC: config reg
+ */
+#define ARMV7_PMNC_E		(1 << 0) /* Enable all counters */
+#define ARMV7_PMNC_P		(1 << 1) /* Reset all counters */
+#define ARMV7_PMNC_C		(1 << 2) /* Cycle counter reset */
+#define ARMV7_PMNC_D		(1 << 3) /* CCNT counts every 64th cpu cycle */
+#define ARMV7_PMNC_X		(1 << 4) /* Export to ETM */
+#define ARMV7_PMNC_DP		(1 << 5) /* Disable CCNT if non-invasive debug*/
+#define	ARMV7_PMNC_N_SHIFT	11	 /* Number of counters supported */
+#define	ARMV7_PMNC_N_MASK	0x1f
+#define	ARMV7_PMNC_MASK		0x3f	 /* Mask for writable bits */
+
+/*
+ * Available counters
+ */
+#define ARMV7_CNT0 		0	/* First event counter */
+#define ARMV7_CCNT 		31	/* Cycle counter */
+
+/* Perf Event to low level counters mapping */
+#define ARMV7_EVENT_CNT_TO_CNTx	(ARMV7_COUNTER0 - ARMV7_CNT0)
+
+/*
+ * CNTENS: counters enable reg
+ */
+#define ARMV7_CNTENS_P(idx)	(1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
+#define ARMV7_CNTENS_C		(1 << ARMV7_CCNT)
+
+/*
+ * CNTENC: counters disable reg
+ */
+#define ARMV7_CNTENC_P(idx)	(1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
+#define ARMV7_CNTENC_C		(1 << ARMV7_CCNT)
+
+/*
+ * INTENS: counters overflow interrupt enable reg
+ */
+#define ARMV7_INTENS_P(idx)	(1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
+#define ARMV7_INTENS_C		(1 << ARMV7_CCNT)
+
+/*
+ * INTENC: counters overflow interrupt disable reg
+ */
+#define ARMV7_INTENC_P(idx)	(1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
+#define ARMV7_INTENC_C		(1 << ARMV7_CCNT)
+
+/*
+ * EVTSEL: Event selection reg
+ */
+#define	ARMV7_EVTSEL_MASK	0x7f		/* Mask for writable bits */
+
+/*
+ * SELECT: Counter selection reg
+ */
+#define	ARMV7_SELECT_MASK	0x1f		/* Mask for writable bits */
+
+/*
+ * FLAG: counters overflow flag status reg
+ */
+#define ARMV7_FLAG_P(idx)	(1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
+#define ARMV7_FLAG_C		(1 << ARMV7_CCNT)
+#define	ARMV7_FLAG_MASK		0xffffffff	/* Mask for writable bits */
+#define	ARMV7_OVERFLOWED_MASK	ARMV7_FLAG_MASK
+
+static inline unsigned long armv7_pmnc_read(void)
+{
+	u32 val;
+	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
+	return val;
+}
+
+static inline void armv7_pmnc_write(unsigned long val)
+{
+	val &= ARMV7_PMNC_MASK;
+	asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
+}
+
+static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
+{
+	return pmnc & ARMV7_OVERFLOWED_MASK;
+}
+
+static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
+					enum armv7_counters counter)
+{
+	int ret;
+
+	if (counter == ARMV7_CYCLE_COUNTER)
+		ret = pmnc & ARMV7_FLAG_C;
+	else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
+		ret = pmnc & ARMV7_FLAG_P(counter);
+	else
+		pr_err("CPU%u checking wrong counter %d overflow status\n",
+			smp_processor_id(), counter);
+
+	return ret;
+}
+
+static inline int armv7_pmnc_select_counter(unsigned int idx)
+{
+	u32 val;
+
+	if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
+		pr_err("CPU%u selecting wrong PMNC counter"
+			" %d\n", smp_processor_id(), idx);
+		return -1;
+	}
+
+	val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
+	asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
+
+	return idx;
+}
+
+static inline u32 armv7pmu_read_counter(int idx)
+{
+	unsigned long value = 0;
+
+	if (idx == ARMV7_CYCLE_COUNTER)
+		asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
+	else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
+		if (armv7_pmnc_select_counter(idx) == idx)
+			asm volatile("mrc p15, 0, %0, c9, c13, 2"
+				     : "=r" (value));
+	} else
+		pr_err("CPU%u reading wrong counter %d\n",
+			smp_processor_id(), idx);
+
+	return value;
+}
+
+static inline void armv7pmu_write_counter(int idx, u32 value)
+{
+	if (idx == ARMV7_CYCLE_COUNTER)
+		asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
+	else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
+		if (armv7_pmnc_select_counter(idx) == idx)
+			asm volatile("mcr p15, 0, %0, c9, c13, 2"
+				     : : "r" (value));
+	} else
+		pr_err("CPU%u writing wrong counter %d\n",
+			smp_processor_id(), idx);
+}
+
+static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
+{
+	if (armv7_pmnc_select_counter(idx) == idx) {
+		val &= ARMV7_EVTSEL_MASK;
+		asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
+	}
+}
+
+static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
+{
+	u32 val;
+
+	if ((idx != ARMV7_CYCLE_COUNTER) &&
+	    ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
+		pr_err("CPU%u enabling wrong PMNC counter"
+			" %d\n", smp_processor_id(), idx);
+		return -1;
+	}
+
+	if (idx == ARMV7_CYCLE_COUNTER)
+		val = ARMV7_CNTENS_C;
+	else
+		val = ARMV7_CNTENS_P(idx);
+
+	asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
+
+	return idx;
+}
+
+static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
+{
+	u32 val;
+
+
+	if ((idx != ARMV7_CYCLE_COUNTER) &&
+	    ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
+		pr_err("CPU%u disabling wrong PMNC counter"
+			" %d\n", smp_processor_id(), idx);
+		return -1;
+	}
+
+	if (idx == ARMV7_CYCLE_COUNTER)
+		val = ARMV7_CNTENC_C;
+	else
+		val = ARMV7_CNTENC_P(idx);
+
+	asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
+
+	return idx;
+}
+
+static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
+{
+	u32 val;
+
+	if ((idx != ARMV7_CYCLE_COUNTER) &&
+	    ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
+		pr_err("CPU%u enabling wrong PMNC counter"
+			" interrupt enable %d\n", smp_processor_id(), idx);
+		return -1;
+	}
+
+	if (idx == ARMV7_CYCLE_COUNTER)
+		val = ARMV7_INTENS_C;
+	else
+		val = ARMV7_INTENS_P(idx);
+
+	asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
+
+	return idx;
+}
+
+static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
+{
+	u32 val;
+
+	if ((idx != ARMV7_CYCLE_COUNTER) &&
+	    ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
+		pr_err("CPU%u disabling wrong PMNC counter"
+			" interrupt enable %d\n", smp_processor_id(), idx);
+		return -1;
+	}
+
+	if (idx == ARMV7_CYCLE_COUNTER)
+		val = ARMV7_INTENC_C;
+	else
+		val = ARMV7_INTENC_P(idx);
+
+	asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
+
+	return idx;
+}
+
+static inline u32 armv7_pmnc_getreset_flags(void)
+{
+	u32 val;
+
+	/* Read */
+	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
+
+	/* Write to clear flags */
+	val &= ARMV7_FLAG_MASK;
+	asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
+
+	return val;
+}
+
+#ifdef DEBUG
+static void armv7_pmnc_dump_regs(void)
+{
+	u32 val;
+	unsigned int cnt;
+
+	printk(KERN_INFO "PMNC registers dump:\n");
+
+	asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
+	printk(KERN_INFO "PMNC  =0x%08x\n", val);
+
+	asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
+	printk(KERN_INFO "CNTENS=0x%08x\n", val);
+
+	asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
+	printk(KERN_INFO "INTENS=0x%08x\n", val);
+
+	asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
+	printk(KERN_INFO "FLAGS =0x%08x\n", val);
+
+	asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
+	printk(KERN_INFO "SELECT=0x%08x\n", val);
+
+	asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
+	printk(KERN_INFO "CCNT  =0x%08x\n", val);
+
+	for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
+		armv7_pmnc_select_counter(cnt);
+		asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
+		printk(KERN_INFO "CNT[%d] count =0x%08x\n",
+			cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
+		asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
+		printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
+			cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
+	}
+}
+#endif
+
+void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+	unsigned long flags;
+
+	/*
+	 * Enable counter and interrupt, and set the counter to count
+	 * the event that we're interested in.
+	 */
+	spin_lock_irqsave(&pmu_lock, flags);
+
+	/*
+	 * Disable counter
+	 */
+	armv7_pmnc_disable_counter(idx);
+
+	/*
+	 * Set event (if destined for PMNx counters)
+	 * We don't need to set the event if it's a cycle count
+	 */
+	if (idx != ARMV7_CYCLE_COUNTER)
+		armv7_pmnc_write_evtsel(idx, hwc->config_base);
+
+	/*
+	 * Enable interrupt for this counter
+	 */
+	armv7_pmnc_enable_intens(idx);
+
+	/*
+	 * Enable counter
+	 */
+	armv7_pmnc_enable_counter(idx);
+
+	spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
+{
+	unsigned long flags;
+
+	/*
+	 * Disable counter and interrupt
+	 */
+	spin_lock_irqsave(&pmu_lock, flags);
+
+	/*
+	 * Disable counter
+	 */
+	armv7_pmnc_disable_counter(idx);
+
+	/*
+	 * Disable interrupt for this counter
+	 */
+	armv7_pmnc_disable_intens(idx);
+
+	spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
+{
+	unsigned long pmnc;
+	struct perf_sample_data data;
+	struct cpu_hw_events *cpuc;
+	struct pt_regs *regs;
+	int idx;
+
+	/*
+	 * Get and reset the IRQ flags
+	 */
+	pmnc = armv7_pmnc_getreset_flags();
+
+	/*
+	 * Did an overflow occur?
+	 */
+	if (!armv7_pmnc_has_overflowed(pmnc))
+		return IRQ_NONE;
+
+	/*
+	 * Handle the counter(s) overflow(s)
+	 */
+	regs = get_irq_regs();
+
+	data.addr = 0;
+
+	cpuc = &__get_cpu_var(cpu_hw_events);
+	for (idx = 0; idx <= armpmu->num_events; ++idx) {
+		struct perf_event *event = cpuc->events[idx];
+		struct hw_perf_event *hwc;
+
+		if (!test_bit(idx, cpuc->active_mask))
+			continue;
+
+		/*
+		 * We have a single interrupt for all counters. Check that
+		 * each counter has overflowed before we process it.
+		 */
+		if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
+			continue;
+
+		hwc = &event->hw;
+		armpmu_event_update(event, hwc, idx);
+		data.period = event->hw.last_period;
+		if (!armpmu_event_set_period(event, hwc, idx))
+			continue;
+
+		if (perf_event_overflow(event, 0, &data, regs))
+			armpmu->disable(hwc, idx);
+	}
+
+	/*
+	 * Handle the pending perf events.
+	 *
+	 * Note: this call *must* be run with interrupts enabled. For
+	 * platforms that can have the PMU interrupts raised as a PMI, this
+	 * will not work.
+	 */
+	perf_event_do_pending();
+
+	return IRQ_HANDLED;
+}
+
+static void armv7pmu_start(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&pmu_lock, flags);
+	/* Enable all counters */
+	armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
+	spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void armv7pmu_stop(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&pmu_lock, flags);
+	/* Disable all counters */
+	armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
+	spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static inline int armv7_a8_pmu_event_map(int config)
+{
+	int mapping = armv7_a8_perf_map[config];
+	if (HW_OP_UNSUPPORTED == mapping)
+		mapping = -EOPNOTSUPP;
+	return mapping;
+}
+
+static inline int armv7_a9_pmu_event_map(int config)
+{
+	int mapping = armv7_a9_perf_map[config];
+	if (HW_OP_UNSUPPORTED == mapping)
+		mapping = -EOPNOTSUPP;
+	return mapping;
+}
+
+static u64 armv7pmu_raw_event(u64 config)
+{
+	return config & 0xff;
+}
+
+static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
+				  struct hw_perf_event *event)
+{
+	int idx;
+
+	/* Always place a cycle counter into the cycle counter. */
+	if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
+		if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
+			return -EAGAIN;
+
+		return ARMV7_CYCLE_COUNTER;
+	} else {
+		/*
+		 * For anything other than a cycle counter, try and use
+		 * the events counters
+		 */
+		for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
+			if (!test_and_set_bit(idx, cpuc->used_mask))
+				return idx;
+		}
+
+		/* The counters are all in use. */
+		return -EAGAIN;
+	}
+}
+
+static struct arm_pmu armv7pmu = {
+	.handle_irq		= armv7pmu_handle_irq,
+	.enable			= armv7pmu_enable_event,
+	.disable		= armv7pmu_disable_event,
+	.raw_event		= armv7pmu_raw_event,
+	.read_counter		= armv7pmu_read_counter,
+	.write_counter		= armv7pmu_write_counter,
+	.get_event_idx		= armv7pmu_get_event_idx,
+	.start			= armv7pmu_start,
+	.stop			= armv7pmu_stop,
+	.max_period		= (1LLU << 32) - 1,
+};
+
+static u32 __init armv7_reset_read_pmnc(void)
+{
+	u32 nb_cnt;
+
+	/* Initialize & Reset PMNC: C and P bits */
+	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
+
+	/* Read the nb of CNTx counters supported from PMNC */
+	nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
+
+	/* Add the CPU cycles counter and return */
+	return nb_cnt + 1;
+}
+
+static int __init
+init_hw_perf_events(void)
+{
+	unsigned long cpuid = read_cpuid_id();
+	unsigned long implementor = (cpuid & 0xFF000000) >> 24;
+	unsigned long part_number = (cpuid & 0xFFF0);
+
+	/* We only support ARM CPUs implemented by ARM at the moment. */
+	if (0x41 == implementor) {
+		switch (part_number) {
+		case 0xB360:	/* ARM1136 */
+		case 0xB560:	/* ARM1156 */
+		case 0xB760:	/* ARM1176 */
+			armpmu = &armv6pmu;
+			memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
+					sizeof(armv6_perf_cache_map));
+			perf_max_events	= armv6pmu.num_events;
+			break;
+		case 0xB020:	/* ARM11mpcore */
+			armpmu = &armv6mpcore_pmu;
+			memcpy(armpmu_perf_cache_map,
+			       armv6mpcore_perf_cache_map,
+			       sizeof(armv6mpcore_perf_cache_map));
+			perf_max_events = armv6mpcore_pmu.num_events;
+			break;
+		case 0xC080:	/* Cortex-A8 */
+			armv7pmu.name = ARMV7_PMU_CORTEX_A8_NAME;
+			memcpy(armpmu_perf_cache_map, armv7_a8_perf_cache_map,
+				sizeof(armv7_a8_perf_cache_map));
+			armv7pmu.event_map = armv7_a8_pmu_event_map;
+			armpmu = &armv7pmu;
+
+			/* Reset PMNC and read the nb of CNTx counters
+			    supported */
+			armv7pmu.num_events = armv7_reset_read_pmnc();
+			perf_max_events = armv7pmu.num_events;
+			break;
+		case 0xC090:	/* Cortex-A9 */
+			armv7pmu.name = ARMV7_PMU_CORTEX_A9_NAME;
+			memcpy(armpmu_perf_cache_map, armv7_a9_perf_cache_map,
+				sizeof(armv7_a9_perf_cache_map));
+			armv7pmu.event_map = armv7_a9_pmu_event_map;
+			armpmu = &armv7pmu;
+
+			/* Reset PMNC and read the nb of CNTx counters
+			    supported */
+			armv7pmu.num_events = armv7_reset_read_pmnc();
+			perf_max_events = armv7pmu.num_events;
+			break;
+		default:
+			pr_info("no hardware support available\n");
+			perf_max_events = -1;
+		}
+	}
+
+	if (armpmu)
+		pr_info("enabled with %s PMU driver, %d counters available\n",
+			armpmu->name, armpmu->num_events);
+
+	return 0;
+}
+arch_initcall(init_hw_perf_events);
+
+/*
+ * Callchain handling code.
+ */
+static inline void
+callchain_store(struct perf_callchain_entry *entry,
+		u64 ip)
+{
+	if (entry->nr < PERF_MAX_STACK_DEPTH)
+		entry->ip[entry->nr++] = ip;
+}
+
+/*
+ * The registers we're interested in are at the end of the variable
+ * length saved register structure. The fp points at the end of this
+ * structure so the address of this struct is:
+ * (struct frame_tail *)(xxx->fp)-1
+ *
+ * This code has been adapted from the ARM OProfile support.
+ */
+struct frame_tail {
+	struct frame_tail   *fp;
+	unsigned long	    sp;
+	unsigned long	    lr;
+} __attribute__((packed));
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static struct frame_tail *
+user_backtrace(struct frame_tail *tail,
+	       struct perf_callchain_entry *entry)
+{
+	struct frame_tail buftail;
+
+	/* Also check accessibility of one struct frame_tail beyond */
+	if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+		return NULL;
+	if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
+		return NULL;
+
+	callchain_store(entry, buftail.lr);
+
+	/*
+	 * Frame pointers should strictly progress back up the stack
+	 * (towards higher addresses).
+	 */
+	if (tail >= buftail.fp)
+		return NULL;
+
+	return buftail.fp - 1;
+}
+
+static void
+perf_callchain_user(struct pt_regs *regs,
+		    struct perf_callchain_entry *entry)
+{
+	struct frame_tail *tail;
+
+	callchain_store(entry, PERF_CONTEXT_USER);
+
+	if (!user_mode(regs))
+		regs = task_pt_regs(current);
+
+	tail = (struct frame_tail *)regs->ARM_fp - 1;
+
+	while (tail && !((unsigned long)tail & 0x3))
+		tail = user_backtrace(tail, entry);
+}
+
+/*
+ * Gets called by walk_stackframe() for every stackframe. This will be called
+ * whist unwinding the stackframe and is like a subroutine return so we use
+ * the PC.
+ */
+static int
+callchain_trace(struct stackframe *fr,
+		void *data)
+{
+	struct perf_callchain_entry *entry = data;
+	callchain_store(entry, fr->pc);
+	return 0;
+}
+
+static void
+perf_callchain_kernel(struct pt_regs *regs,
+		      struct perf_callchain_entry *entry)
+{
+	struct stackframe fr;
+
+	callchain_store(entry, PERF_CONTEXT_KERNEL);
+	fr.fp = regs->ARM_fp;
+	fr.sp = regs->ARM_sp;
+	fr.lr = regs->ARM_lr;
+	fr.pc = regs->ARM_pc;
+	walk_stackframe(&fr, callchain_trace, entry);
+}
+
+static void
+perf_do_callchain(struct pt_regs *regs,
+		  struct perf_callchain_entry *entry)
+{
+	int is_user;
+
+	if (!regs)
+		return;
+
+	is_user = user_mode(regs);
+
+	if (!current || !current->pid)
+		return;
+
+	if (is_user && current->state != TASK_RUNNING)
+		return;
+
+	if (!is_user)
+		perf_callchain_kernel(regs, entry);
+
+	if (current->mm)
+		perf_callchain_user(regs, entry);
+}
+
+static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
+
+struct perf_callchain_entry *
+perf_callchain(struct pt_regs *regs)
+{
+	struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
+
+	entry->nr = 0;
+	perf_do_callchain(regs, entry);
+	return entry;
+}

+ 103 - 0
arch/arm/kernel/pmu.c

@@ -0,0 +1,103 @@
+/*
+ *  linux/arch/arm/kernel/pmu.c
+ *
+ *  Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <asm/pmu.h>
+
+/*
+ * Define the IRQs for the system. We could use something like a platform
+ * device but that seems fairly heavyweight for this. Also, the performance
+ * counters can't be removed or hotplugged.
+ *
+ * Ordering is important: init_pmu() will use the ordering to set the affinity
+ * to the corresponding core. e.g. the first interrupt will go to cpu 0, the
+ * second goes to cpu 1 etc.
+ */
+static const int irqs[] = {
+#if defined(CONFIG_ARCH_OMAP2)
+	3,
+#elif defined(CONFIG_ARCH_BCMRING)
+	IRQ_PMUIRQ,
+#elif defined(CONFIG_MACH_REALVIEW_EB)
+	IRQ_EB11MP_PMU_CPU0,
+	IRQ_EB11MP_PMU_CPU1,
+	IRQ_EB11MP_PMU_CPU2,
+	IRQ_EB11MP_PMU_CPU3,
+#elif defined(CONFIG_ARCH_OMAP3)
+	INT_34XX_BENCH_MPU_EMUL,
+#elif defined(CONFIG_ARCH_IOP32X)
+	IRQ_IOP32X_CORE_PMU,
+#elif defined(CONFIG_ARCH_IOP33X)
+	IRQ_IOP33X_CORE_PMU,
+#elif defined(CONFIG_ARCH_PXA)
+	IRQ_PMU,
+#endif
+};
+
+static const struct pmu_irqs pmu_irqs = {
+	.irqs	    = irqs,
+	.num_irqs   = ARRAY_SIZE(irqs),
+};
+
+static volatile long pmu_lock;
+
+const struct pmu_irqs *
+reserve_pmu(void)
+{
+	return test_and_set_bit_lock(0, &pmu_lock) ? ERR_PTR(-EBUSY) :
+		&pmu_irqs;
+}
+EXPORT_SYMBOL_GPL(reserve_pmu);
+
+int
+release_pmu(const struct pmu_irqs *irqs)
+{
+	if (WARN_ON(irqs != &pmu_irqs))
+		return -EINVAL;
+	clear_bit_unlock(0, &pmu_lock);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(release_pmu);
+
+static int
+set_irq_affinity(int irq,
+		 unsigned int cpu)
+{
+#ifdef CONFIG_SMP
+	int err = irq_set_affinity(irq, cpumask_of(cpu));
+	if (err)
+		pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
+			   irq, cpu);
+	return err;
+#else
+	return 0;
+#endif
+}
+
+int
+init_pmu(void)
+{
+	int i, err = 0;
+
+	for (i = 0; i < pmu_irqs.num_irqs; ++i) {
+		err = set_irq_affinity(pmu_irqs.irqs[i], i);
+		if (err)
+			break;
+	}
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(init_pmu);

+ 35 - 18
arch/arm/kernel/ptrace.c

@@ -499,10 +499,41 @@ static struct undef_hook thumb_break_hook = {
 	.fn		= break_trap,
 };
 
+static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr)
+{
+	unsigned int instr2;
+	void __user *pc;
+
+	/* Check the second half of the instruction.  */
+	pc = (void __user *)(instruction_pointer(regs) + 2);
+
+	if (processor_mode(regs) == SVC_MODE) {
+		instr2 = *(u16 *) pc;
+	} else {
+		get_user(instr2, (u16 __user *)pc);
+	}
+
+	if (instr2 == 0xa000) {
+		ptrace_break(current, regs);
+		return 0;
+	} else {
+		return 1;
+	}
+}
+
+static struct undef_hook thumb2_break_hook = {
+	.instr_mask	= 0xffff,
+	.instr_val	= 0xf7f0,
+	.cpsr_mask	= PSR_T_BIT,
+	.cpsr_val	= PSR_T_BIT,
+	.fn		= thumb2_break_trap,
+};
+
 static int __init ptrace_break_init(void)
 {
 	register_undef_hook(&arm_break_hook);
 	register_undef_hook(&thumb_break_hook);
+	register_undef_hook(&thumb2_break_hook);
 	return 0;
 }
 
@@ -669,7 +700,7 @@ static int ptrace_getvfpregs(struct task_struct *tsk, void __user *data)
 	union vfp_state *vfp = &thread->vfpstate;
 	struct user_vfp __user *ufp = data;
 
-	vfp_sync_state(thread);
+	vfp_sync_hwstate(thread);
 
 	/* copy the floating point registers */
 	if (copy_to_user(&ufp->fpregs, &vfp->hard.fpregs,
@@ -692,7 +723,7 @@ static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data)
 	union vfp_state *vfp = &thread->vfpstate;
 	struct user_vfp __user *ufp = data;
 
-	vfp_sync_state(thread);
+	vfp_sync_hwstate(thread);
 
 	/* copy the floating point registers */
 	if (copy_from_user(&vfp->hard.fpregs, &ufp->fpregs,
@@ -703,6 +734,8 @@ static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data)
 	if (get_user(vfp->hard.fpscr, &ufp->fpscr))
 		return -EFAULT;
 
+	vfp_flush_hwstate(thread);
+
 	return 0;
 }
 #endif
@@ -712,26 +745,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 	int ret;
 
 	switch (request) {
-		/*
-		 * read word at location "addr" in the child process.
-		 */
-		case PTRACE_PEEKTEXT:
-		case PTRACE_PEEKDATA:
-			ret = generic_ptrace_peekdata(child, addr, data);
-			break;
-
 		case PTRACE_PEEKUSR:
 			ret = ptrace_read_user(child, addr, (unsigned long __user *)data);
 			break;
 
-		/*
-		 * write the word at location addr.
-		 */
-		case PTRACE_POKETEXT:
-		case PTRACE_POKEDATA:
-			ret = generic_ptrace_pokedata(child, addr, data);
-			break;
-
 		case PTRACE_POKEUSR:
 			ret = ptrace_write_user(child, addr, data);
 			break;

+ 32 - 48
arch/arm/kernel/setup.c

@@ -24,6 +24,7 @@
 #include <linux/interrupt.h>
 #include <linux/smp.h>
 #include <linux/fs.h>
+#include <linux/proc_fs.h>
 
 #include <asm/unified.h>
 #include <asm/cpu.h>
@@ -102,6 +103,7 @@ struct cpu_cache_fns cpu_cache;
 #endif
 #ifdef CONFIG_OUTER_CACHE
 struct outer_cache_fns outer_cache;
+EXPORT_SYMBOL(outer_cache);
 #endif
 
 struct stack {
@@ -117,7 +119,7 @@ EXPORT_SYMBOL(elf_platform);
 
 static const char *cpu_name;
 static const char *machine_name;
-static char __initdata command_line[COMMAND_LINE_SIZE];
+static char __initdata cmd_line[COMMAND_LINE_SIZE];
 
 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
@@ -417,10 +419,11 @@ static int __init arm_add_memory(unsigned long start, unsigned long size)
  * Pick out the memory size.  We look for mem=size@start,
  * where start and size are "size[KkMm]"
  */
-static void __init early_mem(char **p)
+static int __init early_mem(char *p)
 {
 	static int usermem __initdata = 0;
 	unsigned long size, start;
+	char *endp;
 
 	/*
 	 * If the user specifies memory size, we
@@ -433,52 +436,15 @@ static void __init early_mem(char **p)
 	}
 
 	start = PHYS_OFFSET;
-	size  = memparse(*p, p);
-	if (**p == '@')
-		start = memparse(*p + 1, p);
+	size  = memparse(p, &endp);
+	if (*endp == '@')
+		start = memparse(endp + 1, NULL);
 
 	arm_add_memory(start, size);
-}
-__early_param("mem=", early_mem);
 
-/*
- * Initial parsing of the command line.
- */
-static void __init parse_cmdline(char **cmdline_p, char *from)
-{
-	char c = ' ', *to = command_line;
-	int len = 0;
-
-	for (;;) {
-		if (c == ' ') {
-			extern struct early_params __early_begin, __early_end;
-			struct early_params *p;
-
-			for (p = &__early_begin; p < &__early_end; p++) {
-				int arglen = strlen(p->arg);
-
-				if (memcmp(from, p->arg, arglen) == 0) {
-					if (to != command_line)
-						to -= 1;
-					from += arglen;
-					p->fn(&from);
-
-					while (*from != ' ' && *from != '\0')
-						from++;
-					break;
-				}
-			}
-		}
-		c = *from++;
-		if (!c)
-			break;
-		if (COMMAND_LINE_SIZE <= ++len)
-			break;
-		*to++ = c;
-	}
-	*to = '\0';
-	*cmdline_p = command_line;
+	return 0;
 }
+early_param("mem", early_mem);
 
 static void __init
 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
@@ -739,9 +705,15 @@ void __init setup_arch(char **cmdline_p)
 	init_mm.end_data   = (unsigned long) _edata;
 	init_mm.brk	   = (unsigned long) _end;
 
-	memcpy(boot_command_line, from, COMMAND_LINE_SIZE);
-	boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
-	parse_cmdline(cmdline_p, from);
+	/* parse_early_param needs a boot_command_line */
+	strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
+
+	/* populate cmd_line too for later use, preserving boot_command_line */
+	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
+	*cmdline_p = cmd_line;
+
+	parse_early_param();
+
 	paging_init(mdesc);
 	request_standard_resources(&meminfo, mdesc);
 
@@ -782,9 +754,21 @@ static int __init topology_init(void)
 
 	return 0;
 }
-
 subsys_initcall(topology_init);
 
+#ifdef CONFIG_HAVE_PROC_CPU
+static int __init proc_cpu_init(void)
+{
+	struct proc_dir_entry *res;
+
+	res = proc_mkdir("cpu", NULL);
+	if (!res)
+		return -ENOMEM;
+	return 0;
+}
+fs_initcall(proc_cpu_init);
+#endif
+
 static const char *hwcap_str[] = {
 	"swp",
 	"half",

+ 0 - 178
arch/arm/kernel/time.c

@@ -10,11 +10,6 @@
  *
  *  This file contains the ARM-specific time handling details:
  *  reading the RTC at bootup, etc...
- *
- *  1994-07-02  Alan Modra
- *              fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
- *  1998-12-20  Updated NTP code according to technical memorandum Jan '96
- *              "A Kernel Model for Precision Timekeeping" by Dave Mills
  */
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -77,11 +72,6 @@ unsigned long profile_pc(struct pt_regs *regs)
 EXPORT_SYMBOL(profile_pc);
 #endif
 
-/*
- * hook for setting the RTC's idea of the current time.
- */
-int (*set_rtc)(void);
-
 #ifndef CONFIG_GENERIC_TIME
 static unsigned long dummy_gettimeoffset(void)
 {
@@ -89,140 +79,6 @@ static unsigned long dummy_gettimeoffset(void)
 }
 #endif
 
-static unsigned long next_rtc_update;
-
-/*
- * If we have an externally synchronized linux clock, then update
- * CMOS clock accordingly every ~11 minutes.  set_rtc() has to be
- * called as close as possible to 500 ms before the new second
- * starts.
- */
-static inline void do_set_rtc(void)
-{
-	if (!ntp_synced() || set_rtc == NULL)
-		return;
-
-	if (next_rtc_update &&
-	    time_before((unsigned long)xtime.tv_sec, next_rtc_update))
-		return;
-
-	if (xtime.tv_nsec < 500000000 - ((unsigned) tick_nsec >> 1) &&
-	    xtime.tv_nsec >= 500000000 + ((unsigned) tick_nsec >> 1))
-		return;
-
-	if (set_rtc())
-		/*
-		 * rtc update failed.  Try again in 60s
-		 */
-		next_rtc_update = xtime.tv_sec + 60;
-	else
-		next_rtc_update = xtime.tv_sec + 660;
-}
-
-#ifdef CONFIG_LEDS
-
-static void dummy_leds_event(led_event_t evt)
-{
-}
-
-void (*leds_event)(led_event_t) = dummy_leds_event;
-
-struct leds_evt_name {
-	const char	name[8];
-	int		on;
-	int		off;
-};
-
-static const struct leds_evt_name evt_names[] = {
-	{ "amber", led_amber_on, led_amber_off },
-	{ "blue",  led_blue_on,  led_blue_off  },
-	{ "green", led_green_on, led_green_off },
-	{ "red",   led_red_on,   led_red_off   },
-};
-
-static ssize_t leds_store(struct sys_device *dev,
-			struct sysdev_attribute *attr,
-			const char *buf, size_t size)
-{
-	int ret = -EINVAL, len = strcspn(buf, " ");
-
-	if (len > 0 && buf[len] == '\0')
-		len--;
-
-	if (strncmp(buf, "claim", len) == 0) {
-		leds_event(led_claim);
-		ret = size;
-	} else if (strncmp(buf, "release", len) == 0) {
-		leds_event(led_release);
-		ret = size;
-	} else {
-		int i;
-
-		for (i = 0; i < ARRAY_SIZE(evt_names); i++) {
-			if (strlen(evt_names[i].name) != len ||
-			    strncmp(buf, evt_names[i].name, len) != 0)
-				continue;
-			if (strncmp(buf+len, " on", 3) == 0) {
-				leds_event(evt_names[i].on);
-				ret = size;
-			} else if (strncmp(buf+len, " off", 4) == 0) {
-				leds_event(evt_names[i].off);
-				ret = size;
-			}
-			break;
-		}
-	}
-	return ret;
-}
-
-static SYSDEV_ATTR(event, 0200, NULL, leds_store);
-
-static int leds_suspend(struct sys_device *dev, pm_message_t state)
-{
-	leds_event(led_stop);
-	return 0;
-}
-
-static int leds_resume(struct sys_device *dev)
-{
-	leds_event(led_start);
-	return 0;
-}
-
-static int leds_shutdown(struct sys_device *dev)
-{
-	leds_event(led_halted);
-	return 0;
-}
-
-static struct sysdev_class leds_sysclass = {
-	.name		= "leds",
-	.shutdown	= leds_shutdown,
-	.suspend	= leds_suspend,
-	.resume		= leds_resume,
-};
-
-static struct sys_device leds_device = {
-	.id		= 0,
-	.cls		= &leds_sysclass,
-};
-
-static int __init leds_init(void)
-{
-	int ret;
-	ret = sysdev_class_register(&leds_sysclass);
-	if (ret == 0)
-		ret = sysdev_register(&leds_device);
-	if (ret == 0)
-		ret = sysdev_create_file(&leds_device, &attr_event);
-	return ret;
-}
-
-device_initcall(leds_init);
-
-EXPORT_SYMBOL(leds_event);
-#endif
-
 #ifdef CONFIG_LEDS_TIMER
 static inline void do_leds(void)
 {
@@ -295,39 +151,6 @@ int do_settimeofday(struct timespec *tv)
 EXPORT_SYMBOL(do_settimeofday);
 #endif /* !CONFIG_GENERIC_TIME */
 
-/**
- * save_time_delta - Save the offset between system time and RTC time
- * @delta: pointer to timespec to store delta
- * @rtc: pointer to timespec for current RTC time
- *
- * Return a delta between the system time and the RTC time, such
- * that system time can be restored later with restore_time_delta()
- */
-void save_time_delta(struct timespec *delta, struct timespec *rtc)
-{
-	set_normalized_timespec(delta,
-				xtime.tv_sec - rtc->tv_sec,
-				xtime.tv_nsec - rtc->tv_nsec);
-}
-EXPORT_SYMBOL(save_time_delta);
-
-/**
- * restore_time_delta - Restore the current system time
- * @delta: delta returned by save_time_delta()
- * @rtc: pointer to timespec for current RTC time
- */
-void restore_time_delta(struct timespec *delta, struct timespec *rtc)
-{
-	struct timespec ts;
-
-	set_normalized_timespec(&ts,
-				delta->tv_sec + rtc->tv_sec,
-				delta->tv_nsec + rtc->tv_nsec);
-
-	do_settimeofday(&ts);
-}
-EXPORT_SYMBOL(restore_time_delta);
-
 #ifndef CONFIG_GENERIC_CLOCKEVENTS
 /*
  * Kernel system timer support.
@@ -336,7 +159,6 @@ void timer_tick(void)
 {
 	profile_tick(CPU_PROFILING);
 	do_leds();
-	do_set_rtc();
 	write_seqlock(&xtime_lock);
 	do_timer(1);
 	write_sequnlock(&xtime_lock);

+ 25 - 10
arch/arm/kernel/traps.c

@@ -12,15 +12,17 @@
  *  'linux/arch/arm/lib/traps.S'.  Mostly a debugging aid, but will probably
  *  kill the offending process.
  */
-#include <linux/module.h>
 #include <linux/signal.h>
-#include <linux/spinlock.h>
 #include <linux/personality.h>
 #include <linux/kallsyms.h>
-#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
 #include <linux/hardirq.h>
+#include <linux/kdebug.h>
+#include <linux/module.h>
+#include <linux/kexec.h>
+#include <linux/delay.h>
 #include <linux/init.h>
-#include <linux/uaccess.h>
 
 #include <asm/atomic.h>
 #include <asm/cacheflush.h>
@@ -224,14 +226,21 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
 #define S_SMP ""
 #endif
 
-static void __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs)
+static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs)
 {
 	struct task_struct *tsk = thread->task;
 	static int die_counter;
+	int ret;
 
 	printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
 	       str, err, ++die_counter);
 	sysfs_printk_last_file();
+
+	/* trap and error numbers are mostly meaningless on ARM */
+	ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
+	if (ret == NOTIFY_STOP)
+		return ret;
+
 	print_modules();
 	__show_regs(regs);
 	printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
@@ -243,6 +252,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
 		dump_backtrace(regs, tsk);
 		dump_instr(KERN_EMERG, regs);
 	}
+
+	return ret;
 }
 
 DEFINE_SPINLOCK(die_lock);
@@ -250,16 +261,21 @@ DEFINE_SPINLOCK(die_lock);
 /*
  * This function is protected against re-entrancy.
  */
-NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
+void die(const char *str, struct pt_regs *regs, int err)
 {
 	struct thread_info *thread = current_thread_info();
+	int ret;
 
 	oops_enter();
 
 	spin_lock_irq(&die_lock);
 	console_verbose();
 	bust_spinlocks(1);
-	__die(str, err, thread, regs);
+	ret = __die(str, err, thread, regs);
+
+	if (regs && kexec_should_crash(thread->task))
+		crash_kexec(regs);
+
 	bust_spinlocks(0);
 	add_taint(TAINT_DIE);
 	spin_unlock_irq(&die_lock);
@@ -267,11 +283,10 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
 
 	if (in_interrupt())
 		panic("Fatal exception in interrupt");
-
 	if (panic_on_oops)
 		panic("Fatal exception");
-
-	do_exit(SIGSEGV);
+	if (ret != NOTIFY_STOP)
+		do_exit(SIGSEGV);
 }
 
 void arm_notify_die(const char *str, struct pt_regs *regs,

+ 0 - 4
arch/arm/kernel/vmlinux.lds.S

@@ -43,10 +43,6 @@ SECTIONS
 
 		INIT_SETUP(16)
 
-		__early_begin = .;
-			*(.early_param.init)
-		__early_end = .;
-
 		INIT_CALLS
 		CON_INITCALL
 		SECURITY_INITCALL

+ 23 - 0
arch/arm/mach-at91/Kconfig

@@ -89,6 +89,12 @@ config ARCH_AT91CAP9
 	select GENERIC_CLOCKEVENTS
 	select HAVE_FB_ATMEL
 
+config ARCH_AT572D940HF
+	bool "AT572D940HF"
+	select CPU_ARM926T
+	select GENERIC_TIME
+	select GENERIC_CLOCKEVENTS
+
 config ARCH_AT91X40
 	bool "AT91x40"
 
@@ -390,6 +396,23 @@ endif
 
 # ----------------------------------------------------------
 
+if ARCH_AT572D940HF
+
+comment "AT572D940HF Board Type"
+
+config MACH_AT572D940HFEB
+	bool "AT572D940HF-EK"
+	depends on ARCH_AT572D940HF
+	select HAVE_AT91_DATAFLASH_CARD
+	select HAVE_NAND_ATMEL_BUSWIDTH_16
+	help
+	  Select this if you are using Atmel's AT572D940HF-EK evaluation kit.
+	  <http://www.atmel.com/products/diopsis/default.asp>
+
+endif
+
+# ----------------------------------------------------------
+
 if ARCH_AT91X40
 
 comment "AT91X40 Board Type"

+ 4 - 0
arch/arm/mach-at91/Makefile

@@ -19,6 +19,7 @@ obj-$(CONFIG_ARCH_AT91SAM9RL)	+= at91sam9rl.o at91sam926x_time.o at91sam9rl_devi
 obj-$(CONFIG_ARCH_AT91SAM9G20)	+= at91sam9260.o at91sam926x_time.o at91sam9260_devices.o  sam9_smc.o
  obj-$(CONFIG_ARCH_AT91SAM9G45)	+= at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o
 obj-$(CONFIG_ARCH_AT91CAP9)	+= at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o
+obj-$(CONFIG_ARCH_AT572D940HF)  += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o
 obj-$(CONFIG_ARCH_AT91X40)	+= at91x40.o at91x40_time.o
 
 # AT91RM9200 board-specific support
@@ -69,6 +70,9 @@ obj-$(CONFIG_MACH_AT91SAM9G45EKES) += board-sam9m10g45ek.o
 # AT91CAP9 board-specific support
 obj-$(CONFIG_MACH_AT91CAP9ADK)	+= board-cap9adk.o
 
+# AT572D940HF board-specific support
+obj-$(CONFIG_MACH_AT572D940HFEB) += board-at572d940hf_ek.o
+
 # AT91X40 board-specific support
 obj-$(CONFIG_MACH_AT91EB01)	+= board-eb01.o
 

+ 377 - 0
arch/arm/mach-at91/at572d940hf.c

@@ -0,0 +1,377 @@
+/*
+ * arch/arm/mach-at91/at572d940hf.c
+ *
+ * Antonio R. Costa <costa.antonior@gmail.com>
+ * Copyright (C) 2008 Atmel
+ *
+ * Copyright (C) 2005 SAN People
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/module.h>
+
+#include <asm/mach/irq.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <mach/at572d940hf.h>
+#include <mach/at91_pmc.h>
+#include <mach/at91_rstc.h>
+
+#include "generic.h"
+#include "clock.h"
+
+static struct map_desc at572d940hf_io_desc[] __initdata = {
+	{
+		.virtual	= AT91_VA_BASE_SYS,
+		.pfn		= __phys_to_pfn(AT91_BASE_SYS),
+		.length		= SZ_16K,
+		.type		= MT_DEVICE,
+	}, {
+		.virtual	= AT91_IO_VIRT_BASE - AT572D940HF_SRAM_SIZE,
+		.pfn		= __phys_to_pfn(AT572D940HF_SRAM_BASE),
+		.length		= AT572D940HF_SRAM_SIZE,
+		.type		= MT_DEVICE,
+	},
+};
+
+/* --------------------------------------------------------------------
+ *  Clocks
+ * -------------------------------------------------------------------- */
+
+/*
+ * The peripheral clocks.
+ */
+static struct clk pioA_clk = {
+	.name		= "pioA_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_PIOA,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk pioB_clk = {
+	.name		= "pioB_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_PIOB,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk pioC_clk = {
+	.name		= "pioC_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_PIOC,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk macb_clk = {
+	.name		= "macb_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_EMAC,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk usart0_clk = {
+	.name		= "usart0_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_US0,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk usart1_clk = {
+	.name		= "usart1_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_US1,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk usart2_clk = {
+	.name		= "usart2_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_US2,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk mmc_clk = {
+	.name		= "mci_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_MCI,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk udc_clk = {
+	.name		= "udc_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_UDP,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk twi0_clk = {
+	.name		= "twi0_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_TWI0,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk spi0_clk = {
+	.name		= "spi0_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_SPI0,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk spi1_clk = {
+	.name		= "spi1_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_SPI1,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk ssc0_clk = {
+	.name		= "ssc0_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_SSC0,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk ssc1_clk = {
+	.name		= "ssc1_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_SSC1,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk ssc2_clk = {
+	.name		= "ssc2_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_SSC2,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk tc0_clk = {
+	.name		= "tc0_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_TC0,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk tc1_clk = {
+	.name		= "tc1_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_TC1,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk tc2_clk = {
+	.name		= "tc2_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_TC2,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk ohci_clk = {
+	.name		= "ohci_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_UHP,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk ssc3_clk = {
+	.name		= "ssc3_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_SSC3,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk twi1_clk = {
+	.name		= "twi1_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_TWI1,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk can0_clk = {
+	.name		= "can0_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_CAN0,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk can1_clk = {
+	.name		= "can1_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_CAN1,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+static struct clk mAgicV_clk = {
+	.name		= "mAgicV_clk",
+	.pmc_mask	= 1 << AT572D940HF_ID_MSIRQ0,
+	.type		= CLK_TYPE_PERIPHERAL,
+};
+
+
+static struct clk *periph_clocks[] __initdata = {
+	&pioA_clk,
+	&pioB_clk,
+	&pioC_clk,
+	&macb_clk,
+	&usart0_clk,
+	&usart1_clk,
+	&usart2_clk,
+	&mmc_clk,
+	&udc_clk,
+	&twi0_clk,
+	&spi0_clk,
+	&spi1_clk,
+	&ssc0_clk,
+	&ssc1_clk,
+	&ssc2_clk,
+	&tc0_clk,
+	&tc1_clk,
+	&tc2_clk,
+	&ohci_clk,
+	&ssc3_clk,
+	&twi1_clk,
+	&can0_clk,
+	&can1_clk,
+	&mAgicV_clk,
+	/* irq0 .. irq2 */
+};
+
+/*
+ * The five programmable clocks.
+ * You must configure pin multiplexing to bring these signals out.
+ */
+static struct clk pck0 = {
+	.name		= "pck0",
+	.pmc_mask	= AT91_PMC_PCK0,
+	.type		= CLK_TYPE_PROGRAMMABLE,
+	.id		= 0,
+};
+static struct clk pck1 = {
+	.name		= "pck1",
+	.pmc_mask	= AT91_PMC_PCK1,
+	.type		= CLK_TYPE_PROGRAMMABLE,
+	.id		= 1,
+};
+static struct clk pck2 = {
+	.name		= "pck2",
+	.pmc_mask	= AT91_PMC_PCK2,
+	.type		= CLK_TYPE_PROGRAMMABLE,
+	.id		= 2,
+};
+static struct clk pck3 = {
+	.name		= "pck3",
+	.pmc_mask	= AT91_PMC_PCK3,
+	.type		= CLK_TYPE_PROGRAMMABLE,
+	.id		= 3,
+};
+
+static struct clk mAgicV_mem_clk = {
+	.name		= "mAgicV_mem_clk",
+	.pmc_mask	= AT91_PMC_PCK4,
+	.type		= CLK_TYPE_PROGRAMMABLE,
+	.id		= 4,
+};
+
+/* HClocks */
+static struct clk hck0 = {
+	.name		= "hck0",
+	.pmc_mask	= AT91_PMC_HCK0,
+	.type		= CLK_TYPE_SYSTEM,
+	.id		= 0,
+};
+static struct clk hck1 = {
+	.name		= "hck1",
+	.pmc_mask	= AT91_PMC_HCK1,
+	.type		= CLK_TYPE_SYSTEM,
+	.id		= 1,
+};
+
+static void __init at572d940hf_register_clocks(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
+		clk_register(periph_clocks[i]);
+
+	clk_register(&pck0);
+	clk_register(&pck1);
+	clk_register(&pck2);
+	clk_register(&pck3);
+	clk_register(&mAgicV_mem_clk);
+
+	clk_register(&hck0);
+	clk_register(&hck1);
+}
+
+/* --------------------------------------------------------------------
+ *  GPIO
+ * -------------------------------------------------------------------- */
+
+static struct at91_gpio_bank at572d940hf_gpio[] = {
+	{
+		.id		= AT572D940HF_ID_PIOA,
+		.offset		= AT91_PIOA,
+		.clock		= &pioA_clk,
+	}, {
+		.id		= AT572D940HF_ID_PIOB,
+		.offset		= AT91_PIOB,
+		.clock		= &pioB_clk,
+	}, {
+		.id		= AT572D940HF_ID_PIOC,
+		.offset		= AT91_PIOC,
+		.clock		= &pioC_clk,
+	}
+};
+
+static void at572d940hf_reset(void)
+{
+	at91_sys_write(AT91_RSTC_CR, AT91_RSTC_KEY | AT91_RSTC_PROCRST | AT91_RSTC_PERRST);
+}
+
+
+/* --------------------------------------------------------------------
+ *  AT572D940HF processor initialization
+ * -------------------------------------------------------------------- */
+
+void __init at572d940hf_initialize(unsigned long main_clock)
+{
+	/* Map peripherals */
+	iotable_init(at572d940hf_io_desc, ARRAY_SIZE(at572d940hf_io_desc));
+
+	at91_arch_reset = at572d940hf_reset;
+	at91_extern_irq = (1 << AT572D940HF_ID_IRQ0) | (1 << AT572D940HF_ID_IRQ1)
+			| (1 << AT572D940HF_ID_IRQ2);
+
+	/* Init clock subsystem */
+	at91_clock_init(main_clock);
+
+	/* Register the processor-specific clocks */
+	at572d940hf_register_clocks();
+
+	/* Register GPIO subsystem */
+	at91_gpio_init(at572d940hf_gpio, 3);
+}
+
+/* --------------------------------------------------------------------
+ *  Interrupt initialization
+ * -------------------------------------------------------------------- */
+
+/*
+ * The default interrupt priority levels (0 = lowest, 7 = highest).
+ */
+static unsigned int at572d940hf_default_irq_priority[NR_AIC_IRQS] __initdata = {
+	7,	/* Advanced Interrupt Controller */
+	7,	/* System Peripherals */
+	0,	/* Parallel IO Controller A */
+	0,	/* Parallel IO Controller B */
+	0,	/* Parallel IO Controller C */
+	3,	/* Ethernet */
+	6,	/* USART 0 */
+	6,	/* USART 1 */
+	6,	/* USART 2 */
+	0,	/* Multimedia Card Interface */
+	4,	/* USB Device Port */
+	0,	/* Two-Wire Interface 0 */
+	6,	/* Serial Peripheral Interface 0 */
+	6,	/* Serial Peripheral Interface 1 */
+	5,	/* Serial Synchronous Controller 0 */
+	5,	/* Serial Synchronous Controller 1 */
+	5,	/* Serial Synchronous Controller 2 */
+	0,	/* Timer Counter 0 */
+	0,	/* Timer Counter 1 */
+	0,	/* Timer Counter 2 */
+	3,	/* USB Host port */
+	3,	/* Serial Synchronous Controller 3 */
+	0,	/* Two-Wire Interface 1 */
+	0,	/* CAN Controller 0 */
+	0,	/* CAN Controller 1 */
+	0,	/* mAgicV HALT line */
+	0,	/* mAgicV SIRQ0 line */
+	0,	/* mAgicV exception line */
+	0,	/* mAgicV end of DMA line */
+	0,	/* Advanced Interrupt Controller */
+	0,	/* Advanced Interrupt Controller */
+	0,	/* Advanced Interrupt Controller */
+};
+
+void __init at572d940hf_init_interrupts(unsigned int priority[NR_AIC_IRQS])
+{
+	if (!priority)
+		priority = at572d940hf_default_irq_priority;
+
+	/* Initialize the AIC interrupt controller */
+	at91_aic_init(priority);
+
+	/* Enable GPIO interrupts */
+	at91_gpio_irq_setup();
+}
+

+ 970 - 0
arch/arm/mach-at91/at572d940hf_devices.c

@@ -0,0 +1,970 @@
+/*
+ * arch/arm/mach-at91/at572d940hf_devices.c
+ *
+ * Copyright (C) 2008 Atmel Antonio R. Costa <costa.antonior@gmail.com>
+ * Copyright (C) 2005 Thibaut VARENE <varenet@parisc-linux.org>
+ * Copyright (C) 2005 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+#include <mach/board.h>
+#include <mach/gpio.h>
+#include <mach/at572d940hf.h>
+#include <mach/at572d940hf_matrix.h>
+#include <mach/at91sam9_smc.h>
+
+#include "generic.h"
+#include "sam9_smc.h"
+
+
+/* --------------------------------------------------------------------
+ *  USB Host
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+static u64 ohci_dmamask = DMA_BIT_MASK(32);
+static struct at91_usbh_data usbh_data;
+
+static struct resource usbh_resources[] = {
+	[0] = {
+		.start	= AT572D940HF_UHP_BASE,
+		.end	= AT572D940HF_UHP_BASE + SZ_1M - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= AT572D940HF_ID_UHP,
+		.end	= AT572D940HF_ID_UHP,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device at572d940hf_usbh_device = {
+	.name		= "at91_ohci",
+	.id		= -1,
+	.dev		= {
+				.dma_mask		= &ohci_dmamask,
+				.coherent_dma_mask	= DMA_BIT_MASK(32),
+				.platform_data		= &usbh_data,
+	},
+	.resource	= usbh_resources,
+	.num_resources	= ARRAY_SIZE(usbh_resources),
+};
+
+void __init at91_add_device_usbh(struct at91_usbh_data *data)
+{
+	if (!data)
+		return;
+
+	usbh_data = *data;
+	platform_device_register(&at572d940hf_usbh_device);
+
+}
+#else
+void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ *  USB Device (Gadget)
+ * -------------------------------------------------------------------- */
+
+#ifdef CONFIG_USB_GADGET_AT91
+static struct at91_udc_data udc_data;
+
+static struct resource udc_resources[] = {
+	[0] = {
+		.start	= AT572D940HF_BASE_UDP,
+		.end	= AT572D940HF_BASE_UDP + SZ_16K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= AT572D940HF_ID_UDP,
+		.end	= AT572D940HF_ID_UDP,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device at572d940hf_udc_device = {
+	.name		= "at91_udc",
+	.id		= -1,
+	.dev		= {
+				.platform_data		= &udc_data,
+	},
+	.resource	= udc_resources,
+	.num_resources	= ARRAY_SIZE(udc_resources),
+};
+
+void __init at91_add_device_udc(struct at91_udc_data *data)
+{
+	if (!data)
+		return;
+
+	if (data->vbus_pin) {
+		at91_set_gpio_input(data->vbus_pin, 0);
+		at91_set_deglitch(data->vbus_pin, 1);
+	}
+
+	/* Pullup pin is handled internally */
+
+	udc_data = *data;
+	platform_device_register(&at572d940hf_udc_device);
+}
+#else
+void __init at91_add_device_udc(struct at91_udc_data *data) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ *  Ethernet
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE)
+static u64 eth_dmamask = DMA_BIT_MASK(32);
+static struct at91_eth_data eth_data;
+
+static struct resource eth_resources[] = {
+	[0] = {
+		.start	= AT572D940HF_BASE_EMAC,
+		.end	= AT572D940HF_BASE_EMAC + SZ_16K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= AT572D940HF_ID_EMAC,
+		.end	= AT572D940HF_ID_EMAC,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device at572d940hf_eth_device = {
+	.name		= "macb",
+	.id		= -1,
+	.dev		= {
+			.dma_mask		= &eth_dmamask,
+			.coherent_dma_mask	= DMA_BIT_MASK(32),
+			.platform_data		= &eth_data,
+	},
+	.resource	= eth_resources,
+	.num_resources	= ARRAY_SIZE(eth_resources),
+};
+
+void __init at91_add_device_eth(struct at91_eth_data *data)
+{
+	if (!data)
+		return;
+
+	if (data->phy_irq_pin) {
+		at91_set_gpio_input(data->phy_irq_pin, 0);
+		at91_set_deglitch(data->phy_irq_pin, 1);
+	}
+
+	/* Only RMII is supported */
+	data->is_rmii = 1;
+
+	/* Pins used for RMII */
+	at91_set_A_periph(AT91_PIN_PA16, 0);	/* ETXCK_EREFCK */
+	at91_set_A_periph(AT91_PIN_PA17, 0);	/* ERXDV */
+	at91_set_A_periph(AT91_PIN_PA18, 0);	/* ERX0 */
+	at91_set_A_periph(AT91_PIN_PA19, 0);	/* ERX1 */
+	at91_set_A_periph(AT91_PIN_PA20, 0);	/* ERXER */
+	at91_set_A_periph(AT91_PIN_PA23, 0);	/* ETXEN */
+	at91_set_A_periph(AT91_PIN_PA21, 0);	/* ETX0 */
+	at91_set_A_periph(AT91_PIN_PA22, 0);	/* ETX1 */
+	at91_set_A_periph(AT91_PIN_PA13, 0);	/* EMDIO */
+	at91_set_A_periph(AT91_PIN_PA14, 0);	/* EMDC */
+
+	eth_data = *data;
+	platform_device_register(&at572d940hf_eth_device);
+}
+#else
+void __init at91_add_device_eth(struct at91_eth_data *data) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ *  MMC / SD
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_MMC_AT91) || defined(CONFIG_MMC_AT91_MODULE)
+static u64 mmc_dmamask = DMA_BIT_MASK(32);
+static struct at91_mmc_data mmc_data;
+
+static struct resource mmc_resources[] = {
+	[0] = {
+		.start	= AT572D940HF_BASE_MCI,
+		.end	= AT572D940HF_BASE_MCI + SZ_16K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= AT572D940HF_ID_MCI,
+		.end	= AT572D940HF_ID_MCI,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device at572d940hf_mmc_device = {
+	.name		= "at91_mci",
+	.id		= -1,
+	.dev		= {
+				.dma_mask		= &mmc_dmamask,
+				.coherent_dma_mask	= DMA_BIT_MASK(32),
+				.platform_data		= &mmc_data,
+	},
+	.resource	= mmc_resources,
+	.num_resources	= ARRAY_SIZE(mmc_resources),
+};
+
+void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
+{
+	if (!data)
+		return;
+
+	/* input/irq */
+	if (data->det_pin) {
+		at91_set_gpio_input(data->det_pin, 1);
+		at91_set_deglitch(data->det_pin, 1);
+	}
+	if (data->wp_pin)
+		at91_set_gpio_input(data->wp_pin, 1);
+	if (data->vcc_pin)
+		at91_set_gpio_output(data->vcc_pin, 0);
+
+	/* CLK */
+	at91_set_A_periph(AT91_PIN_PC22, 0);
+
+	/* CMD */
+	at91_set_A_periph(AT91_PIN_PC23, 1);
+
+	/* DAT0, maybe DAT1..DAT3 */
+	at91_set_A_periph(AT91_PIN_PC24, 1);
+	if (data->wire4) {
+		at91_set_A_periph(AT91_PIN_PC25, 1);
+		at91_set_A_periph(AT91_PIN_PC26, 1);
+		at91_set_A_periph(AT91_PIN_PC27, 1);
+	}
+
+	mmc_data = *data;
+	platform_device_register(&at572d940hf_mmc_device);
+}
+#else
+void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ *  NAND / SmartMedia
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_MTD_NAND_ATMEL) || defined(CONFIG_MTD_NAND_ATMEL_MODULE)
+static struct atmel_nand_data nand_data;
+
+#define NAND_BASE	AT91_CHIPSELECT_3
+
+static struct resource nand_resources[] = {
+	{
+		.start	= NAND_BASE,
+		.end	= NAND_BASE + SZ_256M - 1,
+		.flags	= IORESOURCE_MEM,
+	}
+};
+
+static struct platform_device at572d940hf_nand_device = {
+	.name		= "atmel_nand",
+	.id		= -1,
+	.dev		= {
+				.platform_data	= &nand_data,
+	},
+	.resource	= nand_resources,
+	.num_resources	= ARRAY_SIZE(nand_resources),
+};
+
+void __init at91_add_device_nand(struct atmel_nand_data *data)
+{
+	unsigned long csa;
+
+	if (!data)
+		return;
+
+	csa = at91_sys_read(AT91_MATRIX_EBICSA);
+	at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_CS3A_SMC_SMARTMEDIA);
+
+	/* enable pin */
+	if (data->enable_pin)
+		at91_set_gpio_output(data->enable_pin, 1);
+
+	/* ready/busy pin */
+	if (data->rdy_pin)
+		at91_set_gpio_input(data->rdy_pin, 1);
+
+	/* card detect pin */
+	if (data->det_pin)
+		at91_set_gpio_input(data->det_pin, 1);
+
+	at91_set_A_periph(AT91_PIN_PB28, 0);		/* A[22] */
+	at91_set_B_periph(AT91_PIN_PA28, 0);		/* NANDOE */
+	at91_set_B_periph(AT91_PIN_PA29, 0);		/* NANDWE */
+
+	nand_data = *data;
+	platform_device_register(&at572d940hf_nand_device);
+}
+
+#else
+void __init at91_add_device_nand(struct atmel_nand_data *data) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ *  TWI (i2c)
+ * -------------------------------------------------------------------- */
+
+/*
+ * Prefer the GPIO code since the TWI controller isn't robust
+ * (gets overruns and underruns under load) and can only issue
+ * repeated STARTs in one scenario (the driver doesn't yet handle them).
+ */
+
+#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE)
+
+static struct i2c_gpio_platform_data pdata = {
+	.sda_pin		= AT91_PIN_PC7,
+	.sda_is_open_drain	= 1,
+	.scl_pin		= AT91_PIN_PC8,
+	.scl_is_open_drain	= 1,
+	.udelay			= 2,		/* ~100 kHz */
+};
+
+static struct platform_device at572d940hf_twi_device {
+	.name			= "i2c-gpio",
+	.id			= -1,
+	.dev.platform_data	= &pdata,
+};
+
+void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices)
+{
+	at91_set_GPIO_periph(AT91_PIN_PC7, 1);		/* TWD (SDA) */
+	at91_set_multi_drive(AT91_PIN_PC7, 1);
+
+	at91_set_GPIO_periph(AT91_PIN_PA8, 1);		/* TWCK (SCL) */
+	at91_set_multi_drive(AT91_PIN_PC8, 1);
+
+	i2c_register_board_info(0, devices, nr_devices);
+	platform_device_register(&at572d940hf_twi_device);
+}
+
+#elif defined(CONFIG_I2C_AT91) || defined(CONFIG_I2C_AT91_MODULE)
+
+static struct resource twi0_resources[] = {
+	[0] = {
+		.start	= AT572D940HF_BASE_TWI0,
+		.end	= AT572D940HF_BASE_TWI0 + SZ_16K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= AT572D940HF_ID_TWI0,
+		.end	= AT572D940HF_ID_TWI0,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device at572d940hf_twi0_device = {
+	.name		= "at91_i2c",
+	.id		= 0,
+	.resource	= twi0_resources,
+	.num_resources	= ARRAY_SIZE(twi0_resources),
+};
+
+static struct resource twi1_resources[] = {
+	[0] = {
+		.start	= AT572D940HF_BASE_TWI1,
+		.end	= AT572D940HF_BASE_TWI1 + SZ_16K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= AT572D940HF_ID_TWI1,
+		.end	= AT572D940HF_ID_TWI1,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device at572d940hf_twi1_device = {
+	.name		= "at91_i2c",
+	.id		= 1,
+	.resource	= twi1_resources,
+	.num_resources	= ARRAY_SIZE(twi1_resources),
+};
+
+void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices)
+{
+	/* pins used for TWI0 interface */
+	at91_set_A_periph(AT91_PIN_PC7, 0);		/* TWD */
+	at91_set_multi_drive(AT91_PIN_PC7, 1);
+
+	at91_set_A_periph(AT91_PIN_PC8, 0);		/* TWCK */
+	at91_set_multi_drive(AT91_PIN_PC8, 1);
+
+	/* pins used for TWI1 interface */
+	at91_set_A_periph(AT91_PIN_PC20, 0);		/* TWD */
+	at91_set_multi_drive(AT91_PIN_PC20, 1);
+
+	at91_set_A_periph(AT91_PIN_PC21, 0);		/* TWCK */
+	at91_set_multi_drive(AT91_PIN_PC21, 1);
+
+	i2c_register_board_info(0, devices, nr_devices);
+	platform_device_register(&at572d940hf_twi0_device);
+	platform_device_register(&at572d940hf_twi1_device);
+}
+#else
+void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ *  SPI
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
+static u64 spi_dmamask = DMA_BIT_MASK(32);
+
+static struct resource spi0_resources[] = {
+	[0] = {
+		.start	= AT572D940HF_BASE_SPI0,
+		.end	= AT572D940HF_BASE_SPI0 + SZ_16K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= AT572D940HF_ID_SPI0,
+		.end	= AT572D940HF_ID_SPI0,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device at572d940hf_spi0_device = {
+	.name		= "atmel_spi",
+	.id		= 0,
+	.dev		= {
+				.dma_mask		= &spi_dmamask,
+				.coherent_dma_mask	= DMA_BIT_MASK(32),
+	},
+	.resource	= spi0_resources,
+	.num_resources	= ARRAY_SIZE(spi0_resources),
+};
+
+static const unsigned spi0_standard_cs[4] = { AT91_PIN_PA3, AT91_PIN_PA4, AT91_PIN_PA5, AT91_PIN_PA6 };
+
+static struct resource spi1_resources[] = {
+	[0] = {
+		.start	= AT572D940HF_BASE_SPI1,
+		.end	= AT572D940HF_BASE_SPI1 + SZ_16K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= AT572D940HF_ID_SPI1,
+		.end	= AT572D940HF_ID_SPI1,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device at572d940hf_spi1_device = {
+	.name		= "atmel_spi",
+	.id		= 1,
+	.dev		= {
+				.dma_mask		= &spi_dmamask,
+				.coherent_dma_mask	= DMA_BIT_MASK(32),
+	},
+	.resource	= spi1_resources,
+	.num_resources	= ARRAY_SIZE(spi1_resources),
+};
+
+static const unsigned spi1_standard_cs[4] = { AT91_PIN_PC3, AT91_PIN_PC4, AT91_PIN_PC5, AT91_PIN_PC6 };
+
+void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
+{
+	int i;
+	unsigned long cs_pin;
+	short enable_spi0 = 0;
+	short enable_spi1 = 0;
+
+	/* Choose SPI chip-selects */
+	for (i = 0; i < nr_devices; i++) {
+		if (devices[i].controller_data)
+			cs_pin = (unsigned long) devices[i].controller_data;
+		else if (devices[i].bus_num == 0)
+			cs_pin = spi0_standard_cs[devices[i].chip_select];
+		else
+			cs_pin = spi1_standard_cs[devices[i].chip_select];
+
+		if (devices[i].bus_num == 0)
+			enable_spi0 = 1;
+		else
+			enable_spi1 = 1;
+
+		/* enable chip-select pin */
+		at91_set_gpio_output(cs_pin, 1);
+
+		/* pass chip-select pin to driver */
+		devices[i].controller_data = (void *) cs_pin;
+	}
+
+	spi_register_board_info(devices, nr_devices);
+
+	/* Configure SPI bus(es) */
+	if (enable_spi0) {
+		at91_set_A_periph(AT91_PIN_PA0, 0);	/* SPI0_MISO */
+		at91_set_A_periph(AT91_PIN_PA1, 0);	/* SPI0_MOSI */
+		at91_set_A_periph(AT91_PIN_PA2, 0);	/* SPI0_SPCK */
+
+		at91_clock_associate("spi0_clk", &at572d940hf_spi0_device.dev, "spi_clk");
+		platform_device_register(&at572d940hf_spi0_device);
+	}
+	if (enable_spi1) {
+		at91_set_A_periph(AT91_PIN_PC0, 0);	/* SPI1_MISO */
+		at91_set_A_periph(AT91_PIN_PC1, 0);	/* SPI1_MOSI */
+		at91_set_A_periph(AT91_PIN_PC2, 0);	/* SPI1_SPCK */
+
+		at91_clock_associate("spi1_clk", &at572d940hf_spi1_device.dev, "spi_clk");
+		platform_device_register(&at572d940hf_spi1_device);
+	}
+}
+#else
+void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ *  Timer/Counter blocks
+ * -------------------------------------------------------------------- */
+
+#ifdef CONFIG_ATMEL_TCLIB
+
+static struct resource tcb_resources[] = {
+	[0] = {
+		.start	= AT572D940HF_BASE_TCB,
+		.end	= AT572D940HF_BASE_TCB + SZ_16K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= AT572D940HF_ID_TC0,
+		.end	= AT572D940HF_ID_TC0,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[2] = {
+		.start	= AT572D940HF_ID_TC1,
+		.end	= AT572D940HF_ID_TC1,
+		.flags	= IORESOURCE_IRQ,
+	},
+	[3] = {
+		.start	= AT572D940HF_ID_TC2,
+		.end	= AT572D940HF_ID_TC2,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device at572d940hf_tcb_device = {
+	.name		= "atmel_tcb",
+	.id		= 0,
+	.resource	= tcb_resources,
+	.num_resources	= ARRAY_SIZE(tcb_resources),
+};
+
+static void __init at91_add_device_tc(void)
+{
+	/* this chip has a separate clock and irq for each TC channel */
+	at91_clock_associate("tc0_clk", &at572d940hf_tcb_device.dev, "t0_clk");
+	at91_clock_associate("tc1_clk", &at572d940hf_tcb_device.dev, "t1_clk");
+	at91_clock_associate("tc2_clk", &at572d940hf_tcb_device.dev, "t2_clk");
+	platform_device_register(&at572d940hf_tcb_device);
+}
+#else
+static void __init at91_add_device_tc(void) { }
+#endif
+
+
+/* --------------------------------------------------------------------
+ *  RTT
+ * -------------------------------------------------------------------- */
+
+static struct resource rtt_resources[] = {
+	{
+		.start	= AT91_BASE_SYS + AT91_RTT,
+		.end	= AT91_BASE_SYS + AT91_RTT + SZ_16 - 1,
+		.flags	= IORESOURCE_MEM,
+	}
+};
+
+static struct platform_device at572d940hf_rtt_device = {
+	.name		= "at91_rtt",
+	.id		= 0,
+	.resource	= rtt_resources,
+	.num_resources	= ARRAY_SIZE(rtt_resources),
+};
+
+static void __init at91_add_device_rtt(void)
+{
+	platform_device_register(&at572d940hf_rtt_device);
+}
+
+
+/* --------------------------------------------------------------------
+ *  Watchdog
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE)
+static struct platform_device at572d940hf_wdt_device = {
+	.name		= "at91_wdt",
+	.id		= -1,
+	.num_resources	= 0,
+};
+
+static void __init at91_add_device_watchdog(void)
+{
+	platform_device_register(&at572d940hf_wdt_device);
+}
+#else
+static void __init at91_add_device_watchdog(void) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ *  UART
+ * -------------------------------------------------------------------- */
+
+#if defined(CONFIG_SERIAL_ATMEL)
+static struct resource dbgu_resources[] = {
+	[0] = {
+		.start	= AT91_VA_BASE_SYS + AT91_DBGU,
+		.end	= AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= AT91_ID_SYS,
+		.end	= AT91_ID_SYS,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct atmel_uart_data dbgu_data = {
+	.use_dma_tx	= 0,
+	.use_dma_rx	= 0,		/* DBGU not capable of receive DMA */
+	.regs		= (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
+};
+
+static u64 dbgu_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device at572d940hf_dbgu_device = {
+	.name		= "atmel_usart",
+	.id		= 0,
+	.dev		= {
+				.dma_mask		= &dbgu_dmamask,
+				.coherent_dma_mask	= DMA_BIT_MASK(32),
+				.platform_data		= &dbgu_data,
+	},
+	.resource	= dbgu_resources,
+	.num_resources	= ARRAY_SIZE(dbgu_resources),
+};
+
+static inline void configure_dbgu_pins(void)
+{
+	at91_set_A_periph(AT91_PIN_PC31, 1);		/* DTXD */
+	at91_set_A_periph(AT91_PIN_PC30, 0);		/* DRXD */
+}
+
+static struct resource uart0_resources[] = {
+	[0] = {
+		.start	= AT572D940HF_BASE_US0,
+		.end	= AT572D940HF_BASE_US0 + SZ_16K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= AT572D940HF_ID_US0,
+		.end	= AT572D940HF_ID_US0,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct atmel_uart_data uart0_data = {
+	.use_dma_tx	= 1,
+	.use_dma_rx	= 1,
+};
+
+static u64 uart0_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device at572d940hf_uart0_device = {
+	.name		= "atmel_usart",
+	.id		= 1,
+	.dev		= {
+				.dma_mask		= &uart0_dmamask,
+				.coherent_dma_mask	= DMA_BIT_MASK(32),
+				.platform_data		= &uart0_data,
+	},
+	.resource	= uart0_resources,
+	.num_resources	= ARRAY_SIZE(uart0_resources),
+};
+
+static inline void configure_usart0_pins(unsigned pins)
+{
+	at91_set_A_periph(AT91_PIN_PA8, 1);		/* TXD0 */
+	at91_set_A_periph(AT91_PIN_PA7, 0);		/* RXD0 */
+
+	if (pins & ATMEL_UART_RTS)
+		at91_set_A_periph(AT91_PIN_PA10, 0);	/* RTS0 */
+	if (pins & ATMEL_UART_CTS)
+		at91_set_A_periph(AT91_PIN_PA9, 0);	/* CTS0 */
+}
+
+static struct resource uart1_resources[] = {
+	[0] = {
+		.start	= AT572D940HF_BASE_US1,
+		.end	= AT572D940HF_BASE_US1 + SZ_16K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= AT572D940HF_ID_US1,
+		.end	= AT572D940HF_ID_US1,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct atmel_uart_data uart1_data = {
+	.use_dma_tx	= 1,
+	.use_dma_rx	= 1,
+};
+
+static u64 uart1_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device at572d940hf_uart1_device = {
+	.name		= "atmel_usart",
+	.id		= 2,
+	.dev		= {
+				.dma_mask		= &uart1_dmamask,
+				.coherent_dma_mask	= DMA_BIT_MASK(32),
+				.platform_data		= &uart1_data,
+	},
+	.resource	= uart1_resources,
+	.num_resources	= ARRAY_SIZE(uart1_resources),
+};
+
+static inline void configure_usart1_pins(unsigned pins)
+{
+	at91_set_A_periph(AT91_PIN_PC10, 1);		/* TXD1 */
+	at91_set_A_periph(AT91_PIN_PC9 , 0);		/* RXD1 */
+
+	if (pins & ATMEL_UART_RTS)
+		at91_set_A_periph(AT91_PIN_PC12, 0);	/* RTS1 */
+	if (pins & ATMEL_UART_CTS)
+		at91_set_A_periph(AT91_PIN_PC11, 0);	/* CTS1 */
+}
+
+static struct resource uart2_resources[] = {
+	[0] = {
+		.start	= AT572D940HF_BASE_US2,
+		.end	= AT572D940HF_BASE_US2 + SZ_16K - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= AT572D940HF_ID_US2,
+		.end	= AT572D940HF_ID_US2,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct atmel_uart_data uart2_data = {
+	.use_dma_tx	= 1,
+	.use_dma_rx	= 1,
+};
+
+static u64 uart2_dmamask = DMA_BIT_MASK(32);
+
+static struct platform_device at572d940hf_uart2_device = {
+	.name		= "atmel_usart",
+	.id		= 3,
+	.dev		= {
+				.dma_mask		= &uart2_dmamask,
+				.coherent_dma_mask	= DMA_BIT_MASK(32),
+				.platform_data		= &uart2_data,
+	},
+	.resource	= uart2_resources,
+	.num_resources	= ARRAY_SIZE(uart2_resources),
+};
+
+static inline void configure_usart2_pins(unsigned pins)
+{
+	at91_set_A_periph(AT91_PIN_PC15, 1);		/* TXD2 */
+	at91_set_A_periph(AT91_PIN_PC14, 0);		/* RXD2 */
+
+	if (pins & ATMEL_UART_RTS)
+		at91_set_A_periph(AT91_PIN_PC17, 0);	/* RTS2 */
+	if (pins & ATMEL_UART_CTS)
+		at91_set_A_periph(AT91_PIN_PC16, 0);	/* CTS2 */
+}
+
+static struct platform_device *__initdata at91_uarts[ATMEL_MAX_UART];	/* the UARTs to use */
+struct platform_device *atmel_default_console_device;	/* the serial console device */
+
+void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
+{
+	struct platform_device *pdev;
+
+	switch (id) {
+		case 0:		/* DBGU */
+			pdev = &at572d940hf_dbgu_device;
+			configure_dbgu_pins();
+			at91_clock_associate("mck", &pdev->dev, "usart");
+			break;
+		case AT572D940HF_ID_US0:
+			pdev = &at572d940hf_uart0_device;
+			configure_usart0_pins(pins);
+			at91_clock_associate("usart0_clk", &pdev->dev, "usart");
+			break;
+		case AT572D940HF_ID_US1:
+			pdev = &at572d940hf_uart1_device;
+			configure_usart1_pins(pins);
+			at91_clock_associate("usart1_clk", &pdev->dev, "usart");
+			break;
+		case AT572D940HF_ID_US2:
+			pdev = &at572d940hf_uart2_device;
+			configure_usart2_pins(pins);
+			at91_clock_associate("usart2_clk", &pdev->dev, "usart");
+			break;
+		default:
+			return;
+	}
+	pdev->id = portnr;		/* update to mapped ID */
+
+	if (portnr < ATMEL_MAX_UART)
+		at91_uarts[portnr] = pdev;
+}
+
+void __init at91_set_serial_console(unsigned portnr)
+{
+	if (portnr < ATMEL_MAX_UART)
+		atmel_default_console_device = at91_uarts[portnr];
+}
+
+void __init at91_add_device_serial(void)
+{
+	int i;
+
+	for (i = 0; i < ATMEL_MAX_UART; i++) {
+		if (at91_uarts[i])
+			platform_device_register(at91_uarts[i]);
+	}
+
+	if (!atmel_default_console_device)
+		printk(KERN_INFO "AT91: No default serial console defined.\n");
+}
+
+#else
+void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {}
+void __init at91_set_serial_console(unsigned portnr) {}
+void __init at91_add_device_serial(void) {}
+#endif
+
+
+/* --------------------------------------------------------------------
+ *  mAgic
+ * -------------------------------------------------------------------- */
+
+#ifdef CONFIG_MAGICV
+static struct resource mAgic_resources[] = {
+	{
+		.start = AT91_MAGIC_PM_BASE,
+		.end   = AT91_MAGIC_PM_BASE + AT91_MAGIC_PM_SIZE - 1,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = AT91_MAGIC_DM_I_BASE,
+		.end   = AT91_MAGIC_DM_I_BASE + AT91_MAGIC_DM_I_SIZE - 1,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = AT91_MAGIC_DM_F_BASE,
+		.end   = AT91_MAGIC_DM_F_BASE + AT91_MAGIC_DM_F_SIZE - 1,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = AT91_MAGIC_DM_DB_BASE,
+		.end   = AT91_MAGIC_DM_DB_BASE + AT91_MAGIC_DM_DB_SIZE - 1,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = AT91_MAGIC_REGS_BASE,
+		.end   = AT91_MAGIC_REGS_BASE + AT91_MAGIC_REGS_SIZE - 1,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = AT91_MAGIC_EXTPAGE_BASE,
+		.end   = AT91_MAGIC_EXTPAGE_BASE + AT91_MAGIC_EXTPAGE_SIZE - 1,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start  = AT572D940HF_ID_MSIRQ0,
+		.end    = AT572D940HF_ID_MSIRQ0,
+		.flags  = IORESOURCE_IRQ,
+	},
+	{
+		.start  = AT572D940HF_ID_MHALT,
+		.end    = AT572D940HF_ID_MHALT,
+		.flags  = IORESOURCE_IRQ,
+	},
+	{
+		.start  = AT572D940HF_ID_MEXC,
+		.end    = AT572D940HF_ID_MEXC,
+		.flags  = IORESOURCE_IRQ,
+	},
+	{
+		.start  = AT572D940HF_ID_MEDMA,
+		.end    = AT572D940HF_ID_MEDMA,
+		.flags  = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device mAgic_device = {
+	.name           = "mAgic",
+	.id             = -1,
+	.num_resources  = ARRAY_SIZE(mAgic_resources),
+	.resource       = mAgic_resources,
+};
+
+void __init at91_add_device_mAgic(void)
+{
+	platform_device_register(&mAgic_device);
+}
+#else
+void __init at91_add_device_mAgic(void) {}
+#endif
+
+
+/* -------------------------------------------------------------------- */
+
+/*
+ * These devices are always present and don't need any board-specific
+ * setup.
+ */
+static int __init at91_add_standard_devices(void)
+{
+	at91_add_device_rtt();
+	at91_add_device_watchdog();
+	at91_add_device_tc();
+	return 0;
+}
+
+arch_initcall(at91_add_standard_devices);

+ 328 - 0
arch/arm/mach-at91/board-at572d940hf_ek.c

@@ -0,0 +1,328 @@
+/*
+ * linux/arch/arm/mach-at91/board-at572d940hf_ek.c
+ *
+ * Copyright (C) 2008 Atmel Antonio R. Costa <costa.antonior@gmail.com>
+ * Copyright (C) 2005 SAN People
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/ds1305.h>
+#include <linux/irq.h>
+#include <linux/mtd/physmap.h>
+
+#include <mach/hardware.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/irq.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <mach/board.h>
+#include <mach/gpio.h>
+#include <mach/at91sam9_smc.h>
+
+#include "sam9_smc.h"
+#include "generic.h"
+
+
+static void __init eb_map_io(void)
+{
+	/* Initialize processor: 12.500 MHz crystal */
+	at572d940hf_initialize(12000000);
+
+	/* DBGU on ttyS0. (Rx & Tx only) */
+	at91_register_uart(0, 0, 0);
+
+	/* USART0 on ttyS1. (Rx & Tx only) */
+	at91_register_uart(AT572D940HF_ID_US0, 1, 0);
+
+	/* USART1 on ttyS2. (Rx & Tx only) */
+	at91_register_uart(AT572D940HF_ID_US1, 2, 0);
+
+	/* USART2 on ttyS3. (Tx & Rx only */
+	at91_register_uart(AT572D940HF_ID_US2, 3, 0);
+
+	/* set serial console to ttyS0 (ie, DBGU) */
+	at91_set_serial_console(0);
+}
+
+static void __init eb_init_irq(void)
+{
+	at572d940hf_init_interrupts(NULL);
+}
+
+
+/*
+ * USB Host Port
+ */
+static struct at91_usbh_data __initdata eb_usbh_data = {
+	.ports		= 2,
+};
+
+
+/*
+ * USB Device Port
+ */
+static struct at91_udc_data __initdata eb_udc_data = {
+	.vbus_pin	= 0,		/* no VBUS detection,UDC always on */
+	.pullup_pin	= 0,		/* pull-up driven by UDC */
+};
+
+
+/*
+ * MCI (SD/MMC)
+ */
+static struct at91_mmc_data __initdata eb_mmc_data = {
+	.wire4		= 1,
+/*	.det_pin	= ... not connected */
+/*	.wp_pin		= ... not connected */
+/*	.vcc_pin	= ... not connected */
+};
+
+
+/*
+ * MACB Ethernet device
+ */
+static struct at91_eth_data __initdata eb_eth_data = {
+	.phy_irq_pin	= AT91_PIN_PB25,
+	.is_rmii	= 1,
+};
+
+/*
+ * NOR flash
+ */
+
+static struct mtd_partition eb_nor_partitions[] = {
+	{
+		.name		= "Raw Environment",
+		.offset		= 0,
+		.size		= SZ_4M,
+		.mask_flags	= 0,
+	},
+	{
+		.name		= "OS FS",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= 3 * SZ_1M,
+		.mask_flags	= 0,
+	},
+	{
+		.name		= "APP FS",
+		.offset		= MTDPART_OFS_APPEND,
+		.size		= MTDPART_SIZ_FULL,
+		.mask_flags	= 0,
+	},
+};
+
+static void nor_flash_set_vpp(struct map_info* mi, int i) {
+};
+
+static struct physmap_flash_data nor_flash_data = {
+	.width		= 4,
+	.parts		= eb_nor_partitions,
+	.nr_parts	= ARRAY_SIZE(eb_nor_partitions),
+	.set_vpp	= nor_flash_set_vpp,
+};
+
+static struct resource nor_flash_resources[] = {
+	{
+		.start	= AT91_CHIPSELECT_0,
+		.end	= AT91_CHIPSELECT_0 + SZ_16M - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+};
+
+static struct platform_device nor_flash = {
+	.name		= "physmap-flash",
+	.id		= 0,
+	.dev		= {
+				.platform_data = &nor_flash_data,
+			},
+	.resource	= nor_flash_resources,
+	.num_resources	= ARRAY_SIZE(nor_flash_resources),
+};
+
+static struct sam9_smc_config __initdata eb_nor_smc_config = {
+	.ncs_read_setup		= 1,
+	.nrd_setup		= 1,
+	.ncs_write_setup	= 1,
+	.nwe_setup		= 1,
+
+	.ncs_read_pulse		= 7,
+	.nrd_pulse		= 7,
+	.ncs_write_pulse	= 7,
+	.nwe_pulse		= 7,
+
+	.read_cycle		= 9,
+	.write_cycle		= 9,
+
+	.mode			= AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE | AT91_SMC_BAT_WRITE | AT91_SMC_DBW_32,
+	.tdf_cycles		= 1,
+};
+
+static void __init eb_add_device_nor(void)
+{
+	/* configure chip-select 0 (NOR) */
+	sam9_smc_configure(0, &eb_nor_smc_config);
+	platform_device_register(&nor_flash);
+}
+
+/*
+ * NAND flash
+ */
+static struct mtd_partition __initdata eb_nand_partition[] = {
+	{
+		.name	= "Partition 1",
+		.offset	= 0,
+		.size	= SZ_16M,
+	},
+	{
+		.name	= "Partition 2",
+		.offset = MTDPART_OFS_NXTBLK,
+		.size	= MTDPART_SIZ_FULL,
+	}
+};
+
+static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
+{
+	*num_partitions = ARRAY_SIZE(eb_nand_partition);
+	return eb_nand_partition;
+}
+
+static struct atmel_nand_data __initdata eb_nand_data = {
+	.ale		= 22,
+	.cle		= 21,
+/*	.det_pin	= ... not connected */
+/*	.rdy_pin	= AT91_PIN_PC16, */
+	.enable_pin	= AT91_PIN_PA15,
+	.partition_info	= nand_partitions,
+#if defined(CONFIG_MTD_NAND_AT91_BUSWIDTH_16)
+	.bus_width_16	= 1,
+#else
+	.bus_width_16	= 0,
+#endif
+};
+
+static struct sam9_smc_config __initdata eb_nand_smc_config = {
+	.ncs_read_setup		= 0,
+	.nrd_setup		= 0,
+	.ncs_write_setup	= 1,
+	.nwe_setup		= 1,
+
+	.ncs_read_pulse		= 3,
+	.nrd_pulse		= 3,
+	.ncs_write_pulse	= 3,
+	.nwe_pulse		= 3,
+
+	.read_cycle		= 5,
+	.write_cycle		= 5,
+
+	.mode			= AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE,
+	.tdf_cycles		= 12,
+};
+
+static void __init eb_add_device_nand(void)
+{
+	/* setup bus-width (8 or 16) */
+	if (eb_nand_data.bus_width_16)
+		eb_nand_smc_config.mode |= AT91_SMC_DBW_16;
+	else
+		eb_nand_smc_config.mode |= AT91_SMC_DBW_8;
+
+	/* configure chip-select 3 (NAND) */
+	sam9_smc_configure(3, &eb_nand_smc_config);
+
+	at91_add_device_nand(&eb_nand_data);
+}
+
+
+/*
+ * SPI devices
+ */
+static struct resource rtc_resources[] = {
+	[0] = {
+		.start	= AT572D940HF_ID_IRQ1,
+		.end	= AT572D940HF_ID_IRQ1,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct ds1305_platform_data ds1306_data = {
+	.is_ds1306	= true,
+	.en_1hz		= false,
+};
+
+static struct spi_board_info eb_spi_devices[] = {
+	{	/* RTC Dallas DS1306 */
+		.modalias	= "rtc-ds1305",
+		.chip_select	= 3,
+		.mode		= SPI_CS_HIGH | SPI_CPOL | SPI_CPHA,
+		.max_speed_hz	= 500000,
+		.bus_num	= 0,
+		.irq		= AT572D940HF_ID_IRQ1,
+		.platform_data	= (void *) &ds1306_data,
+	},
+#if defined(CONFIG_MTD_AT91_DATAFLASH_CARD)
+	{	/* Dataflash card */
+		.modalias	= "mtd_dataflash",
+		.chip_select	= 0,
+		.max_speed_hz	= 15 * 1000 * 1000,
+		.bus_num	= 0,
+	},
+#endif
+};
+
+static void __init eb_board_init(void)
+{
+	/* Serial */
+	at91_add_device_serial();
+	/* USB Host */
+	at91_add_device_usbh(&eb_usbh_data);
+	/* USB Device */
+	at91_add_device_udc(&eb_udc_data);
+	/* I2C */
+	at91_add_device_i2c(NULL, 0);
+	/* NOR */
+	eb_add_device_nor();
+	/* NAND */
+	eb_add_device_nand();
+	/* SPI */
+	at91_add_device_spi(eb_spi_devices, ARRAY_SIZE(eb_spi_devices));
+	/* MMC */
+	at91_add_device_mmc(0, &eb_mmc_data);
+	/* Ethernet */
+	at91_add_device_eth(&eb_eth_data);
+	/* mAgic */
+	at91_add_device_mAgic();
+}
+
+MACHINE_START(AT572D940HFEB, "Atmel AT91D940HF-EB")
+	/* Maintainer: Atmel <costa.antonior@gmail.com> */
+	.phys_io	= AT91_BASE_SYS,
+	.io_pg_offst	= (AT91_VA_BASE_SYS >> 18) & 0xfffc,
+	.boot_params	= AT91_SDRAM_BASE + 0x100,
+	.timer		= &at91sam926x_timer,
+	.map_io		= eb_map_io,
+	.init_irq	= eb_init_irq,
+	.init_machine	= eb_board_init,
+MACHINE_END

+ 5 - 3
arch/arm/mach-at91/clock.c

@@ -29,6 +29,7 @@
 #include <mach/cpu.h>
 
 #include "clock.h"
+#include "generic.h"
 
 
 /*
@@ -628,7 +629,7 @@ static void __init at91_pllb_usbfs_clock_init(unsigned long main_clock)
 		at91_sys_write(AT91_PMC_SCER, AT91RM9200_PMC_MCKUDP);
 	} else if (cpu_is_at91sam9260() || cpu_is_at91sam9261() ||
 		   cpu_is_at91sam9263() || cpu_is_at91sam9g20() ||
-		   cpu_is_at91sam9g10()) {
+		   cpu_is_at91sam9g10() || cpu_is_at572d940hf()) {
 		uhpck.pmc_mask = AT91SAM926x_PMC_UHP;
 		udpck.pmc_mask = AT91SAM926x_PMC_UDP;
 	} else if (cpu_is_at91cap9()) {
@@ -711,12 +712,13 @@ int __init at91_clock_init(unsigned long main_clock)
 	/*
 	 * USB HS clock init
 	 */
-	if (cpu_has_utmi())
+	if (cpu_has_utmi()) {
 		/*
 		 * multiplier is hard-wired to 40
 		 * (obtain the USB High Speed 480 MHz when input is 12 MHz)
 		 */
 		utmi_clk.rate_hz = 40 * utmi_clk.parent->rate_hz;
+	}
 
 	/*
 	 * USB FS clock init
@@ -746,7 +748,7 @@ int __init at91_clock_init(unsigned long main_clock)
 		mck.rate_hz = (mckr & AT91_PMC_MDIV) == AT91SAM9_PMC_MDIV_3 ?
 			freq / 3 : freq / (1 << ((mckr & AT91_PMC_MDIV) >> 8));	/* mdiv */
 	} else {
-		mck.rate_hz = freq / (1 << ((mckr & AT91_PMC_MDIV) >> 8));      /* mdiv */
+		mck.rate_hz = freq / (1 << ((mckr & AT91_PMC_MDIV) >> 8));		/* mdiv */
 	}
 
 	/* Register the PMC's standard clocks */

+ 1 - 1
arch/arm/mach-at91/clock.h

@@ -22,7 +22,7 @@ struct clk {
 	struct clk	*parent;
 	u32		pmc_mask;
 	void		(*mode)(struct clk *, int);
-	unsigned	id:2;		/* PCK0..3, or 32k/main/a/b */
+	unsigned	id:3;		/* PCK0..4, or 32k/main/a/b */
 	unsigned	type;		/* clock type */
 	u16		users;
 };

+ 2 - 0
arch/arm/mach-at91/generic.h

@@ -17,6 +17,7 @@ extern void __init at91sam9rl_initialize(unsigned long main_clock);
 extern void __init at91sam9g45_initialize(unsigned long main_clock);
 extern void __init at91x40_initialize(unsigned long main_clock);
 extern void __init at91cap9_initialize(unsigned long main_clock);
+extern void __init at572d940hf_initialize(unsigned long main_clock);
 
  /* Interrupts */
 extern void __init at91rm9200_init_interrupts(unsigned int priority[]);
@@ -27,6 +28,7 @@ extern void __init at91sam9rl_init_interrupts(unsigned int priority[]);
 extern void __init at91sam9g45_init_interrupts(unsigned int priority[]);
 extern void __init at91x40_init_interrupts(unsigned int priority[]);
 extern void __init at91cap9_init_interrupts(unsigned int priority[]);
+extern void __init at572d940hf_init_interrupts(unsigned int priority[]);
 extern void __init at91_aic_init(unsigned int priority[]);
 
  /* Timer */

+ 123 - 0
arch/arm/mach-at91/include/mach/at572d940hf.h

@@ -0,0 +1,123 @@
+/*
+ * include/mach/at572d940hf.h
+ *
+ * Antonio R. Costa <costa.antonior@gmail.com>
+ * Copyright (C) 2008 Atmel
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#ifndef AT572D940HF_H
+#define AT572D940HF_H
+
+/*
+ * Peripheral identifiers/interrupts.
+ */
+#define AT91_ID_FIQ		0	/* Advanced Interrupt Controller (FIQ) */
+#define AT91_ID_SYS		1	/* System Peripherals */
+#define AT572D940HF_ID_PIOA	2	/* Parallel IO Controller A */
+#define AT572D940HF_ID_PIOB	3	/* Parallel IO Controller B */
+#define AT572D940HF_ID_PIOC	4	/* Parallel IO Controller C */
+#define AT572D940HF_ID_EMAC	5	/* MACB ethernet controller */
+#define AT572D940HF_ID_US0	6	/* USART 0 */
+#define AT572D940HF_ID_US1	7	/* USART 1 */
+#define AT572D940HF_ID_US2	8	/* USART 2 */
+#define AT572D940HF_ID_MCI	9	/* Multimedia Card Interface */
+#define AT572D940HF_ID_UDP	10	/* USB Device Port */
+#define AT572D940HF_ID_TWI0	11	/* Two-Wire Interface 0 */
+#define AT572D940HF_ID_SPI0	12	/* Serial Peripheral Interface 0 */
+#define AT572D940HF_ID_SPI1	13	/* Serial Peripheral Interface 1 */
+#define AT572D940HF_ID_SSC0	14	/* Serial Synchronous Controller 0 */
+#define AT572D940HF_ID_SSC1	15	/* Serial Synchronous Controller 1 */
+#define AT572D940HF_ID_SSC2	16	/* Serial Synchronous Controller 2 */
+#define AT572D940HF_ID_TC0	17	/* Timer Counter 0 */
+#define AT572D940HF_ID_TC1	18	/* Timer Counter 1 */
+#define AT572D940HF_ID_TC2	19	/* Timer Counter 2 */
+#define AT572D940HF_ID_UHP	20	/* USB Host port */
+#define AT572D940HF_ID_SSC3	21	/* Serial Synchronous Controller 3 */
+#define AT572D940HF_ID_TWI1	22	/* Two-Wire Interface 1 */
+#define AT572D940HF_ID_CAN0	23	/* CAN Controller 0 */
+#define AT572D940HF_ID_CAN1	24	/* CAN Controller 1 */
+#define AT572D940HF_ID_MHALT	25	/* mAgicV HALT line */
+#define AT572D940HF_ID_MSIRQ0	26	/* mAgicV SIRQ0 line */
+#define AT572D940HF_ID_MEXC	27	/* mAgicV exception line */
+#define AT572D940HF_ID_MEDMA	28	/* mAgicV end of DMA line */
+#define AT572D940HF_ID_IRQ0	29	/* External Interrupt Source (IRQ0) */
+#define AT572D940HF_ID_IRQ1	30	/* External Interrupt Source (IRQ1) */
+#define AT572D940HF_ID_IRQ2	31	/* External Interrupt Source (IRQ2) */
+
+
+/*
+ * User Peripheral physical base addresses.
+ */
+#define AT572D940HF_BASE_TCB	0xfffa0000
+#define AT572D940HF_BASE_TC0	0xfffa0000
+#define AT572D940HF_BASE_TC1	0xfffa0040
+#define AT572D940HF_BASE_TC2	0xfffa0080
+#define AT572D940HF_BASE_UDP	0xfffa4000
+#define AT572D940HF_BASE_MCI	0xfffa8000
+#define AT572D940HF_BASE_TWI0	0xfffac000
+#define AT572D940HF_BASE_US0	0xfffb0000
+#define AT572D940HF_BASE_US1	0xfffb4000
+#define AT572D940HF_BASE_US2	0xfffb8000
+#define AT572D940HF_BASE_SSC0	0xfffbc000
+#define AT572D940HF_BASE_SSC1	0xfffc0000
+#define AT572D940HF_BASE_SSC2	0xfffc4000
+#define AT572D940HF_BASE_SPI0	0xfffc8000
+#define AT572D940HF_BASE_SPI1	0xfffcc000
+#define AT572D940HF_BASE_SSC3	0xfffd0000
+#define AT572D940HF_BASE_TWI1	0xfffd4000
+#define AT572D940HF_BASE_EMAC	0xfffd8000
+#define AT572D940HF_BASE_CAN0	0xfffdc000
+#define AT572D940HF_BASE_CAN1	0xfffe0000
+#define AT91_BASE_SYS		0xffffea00
+
+
+/*
+ * System Peripherals (offset from AT91_BASE_SYS)
+ */
+#define AT91_SDRAMC	(0xffffea00 - AT91_BASE_SYS)
+#define AT91_SMC	(0xffffec00 - AT91_BASE_SYS)
+#define AT91_MATRIX	(0xffffee00 - AT91_BASE_SYS)
+#define AT91_AIC	(0xfffff000 - AT91_BASE_SYS)
+#define AT91_DBGU	(0xfffff200 - AT91_BASE_SYS)
+#define AT91_PIOA	(0xfffff400 - AT91_BASE_SYS)
+#define AT91_PIOB	(0xfffff600 - AT91_BASE_SYS)
+#define AT91_PIOC	(0xfffff800 - AT91_BASE_SYS)
+#define AT91_PMC	(0xfffffc00 - AT91_BASE_SYS)
+#define AT91_RSTC	(0xfffffd00 - AT91_BASE_SYS)
+#define AT91_RTT	(0xfffffd20 - AT91_BASE_SYS)
+#define AT91_PIT	(0xfffffd30 - AT91_BASE_SYS)
+#define AT91_WDT	(0xfffffd40 - AT91_BASE_SYS)
+
+#define AT91_USART0	AT572D940HF_ID_US0
+#define AT91_USART1	AT572D940HF_ID_US1
+#define AT91_USART2	AT572D940HF_ID_US2
+
+
+/*
+ * Internal Memory.
+ */
+#define AT572D940HF_SRAM_BASE	0x00300000	/* Internal SRAM base address */
+#define AT572D940HF_SRAM_SIZE	(48 * SZ_1K)	/* Internal SRAM size (48Kb) */
+
+#define AT572D940HF_ROM_BASE	0x00400000	/* Internal ROM base address */
+#define AT572D940HF_ROM_SIZE	SZ_32K		/* Internal ROM size (32Kb) */
+
+#define AT572D940HF_UHP_BASE	0x00500000	/* USB Host controller */
+
+
+#endif

Some files were not shown because too many files changed in this diff