Browse Source

Merge tag 'tegra-for-3.10-fixes-for-mmc' of git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra into mmc-next

ARM: tegra: DT-related fixes needed by the MMC tree

In order to convert the Tegra MMC driver to using mmc_of_parse(), some
bugs in the Tegra device-tree content need to be fixed first; it's
currently wrong but unused, and mmc_of_parse() causes that data to be
used for the first time.
Chris Ball 12 years ago
parent
commit
2c06aeb25c
100 changed files with 7521 additions and 175 deletions
  1. 2 0
      Documentation/00-INDEX
  2. 42 3
      Documentation/ABI/testing/sysfs-bus-fcoe
  3. 83 0
      Documentation/ABI/testing/sysfs-platform-msi-laptop
  4. 5 4
      Documentation/DMA-API-HOWTO.txt
  5. 12 6
      Documentation/IPMI.txt
  6. 58 0
      Documentation/block/cfq-iosched.txt
  7. 4 34
      Documentation/blockdev/nbd.txt
  8. 25 12
      Documentation/cgroups/blkio-controller.txt
  9. 4 0
      Documentation/coccinelle.txt
  10. 77 0
      Documentation/device-mapper/cache-policies.txt
  11. 243 0
      Documentation/device-mapper/cache.txt
  12. 24 0
      Documentation/devicetree/bindings/arc/interrupts.txt
  13. 6 0
      Documentation/devicetree/bindings/arm/armadeus.txt
  14. 8 0
      Documentation/devicetree/bindings/arm/fsl.txt
  15. 1 0
      Documentation/devicetree/bindings/clock/imx5-clock.txt
  16. 2 0
      Documentation/devicetree/bindings/clock/imx6q-clock.txt
  17. 36 34
      Documentation/devicetree/bindings/dma/snps-dma.txt
  18. 82 0
      Documentation/devicetree/bindings/metag/meta-intc.txt
  19. 47 0
      Documentation/devicetree/bindings/mips/cpu_irq.txt
  20. 16 0
      Documentation/devicetree/bindings/mtd/elm.txt
  21. 3 0
      Documentation/devicetree/bindings/mtd/mtd-physmap.txt
  22. 16 0
      Documentation/devicetree/bindings/serial/lantiq_asc.txt
  23. 18 0
      Documentation/devicetree/bindings/thermal/dove-thermal.txt
  24. 15 0
      Documentation/devicetree/bindings/thermal/kirkwood-thermal.txt
  25. 29 0
      Documentation/devicetree/bindings/thermal/rcar-thermal.txt
  26. 7 4
      Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
  27. 19 0
      Documentation/devicetree/bindings/w1/fsl-imx-owire.txt
  28. 9 0
      Documentation/devicetree/bindings/watchdog/atmel-at91rm9200-wdt.txt
  29. 4 0
      Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
  30. 5 0
      Documentation/devicetree/bindings/watchdog/marvel.txt
  31. 4 0
      Documentation/devicetree/bindings/watchdog/pnx4008-wdt.txt
  32. 13 0
      Documentation/devicetree/bindings/watchdog/qca-ar7130-wdt.txt
  33. 3 0
      Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
  34. 5 1
      Documentation/dma-buf-sharing.txt
  35. 0 23
      Documentation/kbuild/kconfig-language.txt
  36. 6 0
      Documentation/kbuild/kconfig.txt
  37. 9 36
      Documentation/kernel-parameters.txt
  38. 4 0
      Documentation/metag/00-INDEX
  39. 256 0
      Documentation/metag/kernel-ABI.txt
  40. 9 0
      Documentation/scsi/ChangeLog.megaraid_sas
  41. 53 0
      Documentation/thermal/exynos_thermal_emulation
  42. 307 0
      Documentation/thermal/intel_powerclamp.txt
  43. 16 2
      Documentation/thermal/sysfs-api.txt
  44. 13 1
      Documentation/watchdog/watchdog-kernel-api.txt
  45. 43 10
      MAINTAINERS
  46. 5 5
      Makefile
  47. 23 0
      arch/Kconfig
  48. 1 0
      arch/alpha/Kconfig
  49. 2 0
      arch/arc/Kbuild
  50. 453 0
      arch/arc/Kconfig
  51. 34 0
      arch/arc/Kconfig.debug
  52. 126 0
      arch/arc/Makefile
  53. 26 0
      arch/arc/boot/Makefile
  54. 13 0
      arch/arc/boot/dts/Makefile
  55. 55 0
      arch/arc/boot/dts/angel4.dts
  56. 10 0
      arch/arc/boot/dts/skeleton.dts
  57. 37 0
      arch/arc/boot/dts/skeleton.dtsi
  58. 61 0
      arch/arc/configs/fpga_defconfig
  59. 49 0
      arch/arc/include/asm/Kbuild
  60. 433 0
      arch/arc/include/asm/arcregs.h
  61. 9 0
      arch/arc/include/asm/asm-offsets.h
  62. 232 0
      arch/arc/include/asm/atomic.h
  63. 42 0
      arch/arc/include/asm/barrier.h
  64. 516 0
      arch/arc/include/asm/bitops.h
  65. 37 0
      arch/arc/include/asm/bug.h
  66. 75 0
      arch/arc/include/asm/cache.h
  67. 67 0
      arch/arc/include/asm/cacheflush.h
  68. 101 0
      arch/arc/include/asm/checksum.h
  69. 22 0
      arch/arc/include/asm/clk.h
  70. 143 0
      arch/arc/include/asm/cmpxchg.h
  71. 32 0
      arch/arc/include/asm/current.h
  72. 56 0
      arch/arc/include/asm/defines.h
  73. 68 0
      arch/arc/include/asm/delay.h
  74. 116 0
      arch/arc/include/asm/disasm.h
  75. 221 0
      arch/arc/include/asm/dma-mapping.h
  76. 14 0
      arch/arc/include/asm/dma.h
  77. 78 0
      arch/arc/include/asm/elf.h
  78. 724 0
      arch/arc/include/asm/entry.h
  79. 15 0
      arch/arc/include/asm/exec.h
  80. 151 0
      arch/arc/include/asm/futex.h
  81. 105 0
      arch/arc/include/asm/io.h
  82. 25 0
      arch/arc/include/asm/irq.h
  83. 153 0
      arch/arc/include/asm/irqflags.h
  84. 19 0
      arch/arc/include/asm/kdebug.h
  85. 61 0
      arch/arc/include/asm/kgdb.h
  86. 62 0
      arch/arc/include/asm/kprobes.h
  87. 63 0
      arch/arc/include/asm/linkage.h
  88. 87 0
      arch/arc/include/asm/mach_desc.h
  89. 23 0
      arch/arc/include/asm/mmu.h
  90. 213 0
      arch/arc/include/asm/mmu_context.h
  91. 28 0
      arch/arc/include/asm/module.h
  92. 18 0
      arch/arc/include/asm/mutex.h
  93. 109 0
      arch/arc/include/asm/page.h
  94. 13 0
      arch/arc/include/asm/perf_event.h
  95. 134 0
      arch/arc/include/asm/pgalloc.h
  96. 405 0
      arch/arc/include/asm/pgtable.h
  97. 151 0
      arch/arc/include/asm/processor.h
  98. 14 0
      arch/arc/include/asm/prom.h
  99. 130 0
      arch/arc/include/asm/ptrace.h
  100. 18 0
      arch/arc/include/asm/sections.h

+ 2 - 0
Documentation/00-INDEX

@@ -299,6 +299,8 @@ memory-hotplug.txt
 	- Hotpluggable memory support, how to use and current status.
 memory.txt
 	- info on typical Linux memory problems.
+metag/
+	- directory with info about Linux on Meta architecture.
 mips/
 	- directory with info about Linux on MIPS architecture.
 misc-devices/

+ 42 - 3
Documentation/ABI/testing/sysfs-bus-fcoe

@@ -1,14 +1,53 @@
-What:		/sys/bus/fcoe/ctlr_X
+What:		/sys/bus/fcoe/
+Date:		August 2012
+KernelVersion:	TBD
+Contact:	Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
+Description:	The FCoE bus. Attributes in this directory are control interfaces.
+Attributes:
+
+	ctlr_create: 'FCoE Controller' instance creation interface. Writing an
+		     <ifname> to this file will allocate and populate sysfs with a
+		     fcoe_ctlr_device (ctlr_X). The user can then configure any
+		     per-port settings and finally write to the fcoe_ctlr_device's
+		     'start' attribute to begin the kernel's discovery and login
+		     process.
+
+	ctlr_destroy: 'FCoE Controller' instance removal interface. Writing a
+		       fcoe_ctlr_device's sysfs name to this file will log the
+		       fcoe_ctlr_device out of the fabric or otherwise connected
+		       FCoE devices. It will also free all kernel memory allocated
+		       for this fcoe_ctlr_device and any structures associated
+		       with it, this includes the scsi_host.
+
+What:		/sys/bus/fcoe/devices/ctlr_X
 Date:		March 2012
 KernelVersion:	TBD
 Contact:	Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
-Description:	'FCoE Controller' instances on the fcoe bus
+Description:	'FCoE Controller' instances on the fcoe bus.
+		The FCoE Controller now has a three stage creation process.
+		1) Write interface name to ctlr_create 2) Configure the FCoE
+		Controller (ctlr_X) 3) Enable the FCoE Controller to begin
+		discovery and login. The FCoE Controller is destroyed by
+		writing it's name, i.e. ctlr_X to the ctlr_delete file.
+
 Attributes:
 
 	fcf_dev_loss_tmo: Device loss timeout peroid (see below). Changing
 			  this value will change the dev_loss_tmo for all
 			  FCFs discovered by this controller.
 
+	mode:		  Display or change the FCoE Controller's mode. Possible
+			  modes are 'Fabric' and 'VN2VN'. If a FCoE Controller
+			  is started in 'Fabric' mode then FIP FCF discovery is
+			  initiated and ultimately a fabric login is attempted.
+			  If a FCoE Controller is started in 'VN2VN' mode then
+			  FIP VN2VN discovery and login is performed. A FCoE
+			  Controller only supports one mode at a time.
+
+	enabled:	  Whether an FCoE controller is enabled or disabled.
+			  0 if disabled, 1 if enabled. Writing either 0 or 1
+			  to this file will enable or disable the FCoE controller.
+
 	lesb/link_fail:   Link Error Status Block (LESB) link failure count.
 
 	lesb/vlink_fail:  Link Error Status Block (LESB) virtual link
@@ -26,7 +65,7 @@ Attributes:
 
 Notes: ctlr_X (global increment starting at 0)
 
-What:		/sys/bus/fcoe/fcf_X
+What:		/sys/bus/fcoe/devices/fcf_X
 Date:		March 2012
 KernelVersion:	TBD
 Contact:	Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org

+ 83 - 0
Documentation/ABI/testing/sysfs-platform-msi-laptop

@@ -0,0 +1,83 @@
+What:		/sys/devices/platform/msi-laptop-pf/lcd_level
+Date:		Oct 2006
+KernelVersion:	2.6.19
+Contact:	"Lennart Poettering <mzxreary@0pointer.de>"
+Description:
+		Screen brightness: contains a single integer in the range 0..8.
+
+What:		/sys/devices/platform/msi-laptop-pf/auto_brightness
+Date:		Oct 2006
+KernelVersion:	2.6.19
+Contact:	"Lennart Poettering <mzxreary@0pointer.de>"
+Description:
+		Enable automatic brightness control: contains either 0 or 1. If
+		set to 1 the hardware adjusts the screen brightness
+		automatically when the power cord is plugged/unplugged.
+
+What:		/sys/devices/platform/msi-laptop-pf/wlan
+Date:		Oct 2006
+KernelVersion:	2.6.19
+Contact:	"Lennart Poettering <mzxreary@0pointer.de>"
+Description:
+		WLAN subsystem enabled: contains either 0 or 1.
+
+What:		/sys/devices/platform/msi-laptop-pf/bluetooth
+Date:		Oct 2006
+KernelVersion:	2.6.19
+Contact:	"Lennart Poettering <mzxreary@0pointer.de>"
+Description:
+		Bluetooth subsystem enabled: contains either 0 or 1. Please
+		note that this file is constantly 0 if no Bluetooth hardware is
+		available.
+
+What:		/sys/devices/platform/msi-laptop-pf/touchpad
+Date:		Nov 2012
+KernelVersion:	3.8
+Contact:	"Maxim Mikityanskiy <maxtram95@gmail.com>"
+Description:
+		Contains either 0 or 1 and indicates if touchpad is turned on.
+		Touchpad state can only be toggled by pressing Fn+F3.
+
+What:		/sys/devices/platform/msi-laptop-pf/turbo_mode
+Date:		Nov 2012
+KernelVersion:	3.8
+Contact:	"Maxim Mikityanskiy <maxtram95@gmail.com>"
+Description:
+		Contains either 0 or 1 and indicates if turbo mode is turned
+		on. In turbo mode power LED is orange and processor is
+		overclocked. Turbo mode is available only if charging. It is
+		only possible to toggle turbo mode state by pressing Fn+F10,
+		and there is a few seconds cooldown between subsequent toggles.
+		If user presses Fn+F10 too frequent, turbo mode state is not
+		changed.
+
+What:		/sys/devices/platform/msi-laptop-pf/eco_mode
+Date:		Nov 2012
+KernelVersion:	3.8
+Contact:	"Maxim Mikityanskiy <maxtram95@gmail.com>"
+Description:
+		Contains either 0 or 1 and indicates if ECO mode is turned on.
+		In ECO mode power LED is green and userspace should do some
+		powersaving actions. ECO mode is available only on battery
+		power. ECO mode can only be toggled by pressing Fn+F10.
+
+What:		/sys/devices/platform/msi-laptop-pf/turbo_cooldown
+Date:		Nov 2012
+KernelVersion:	3.8
+Contact:	"Maxim Mikityanskiy <maxtram95@gmail.com>"
+Description:
+		Contains value in range 0..3:
+			* 0 -> Turbo mode is off
+			* 1 -> Turbo mode is on, cannot be turned off yet
+			* 2 -> Turbo mode is off, cannot be turned on yet
+			* 3 -> Turbo mode is on
+
+What:		/sys/devices/platform/msi-laptop-pf/auto_fan
+Date:		Nov 2012
+KernelVersion:	3.8
+Contact:	"Maxim Mikityanskiy <maxtram95@gmail.com>"
+Description:
+		Contains either 0 or 1 and indicates if fan speed is controlled
+		automatically (1) or fan runs at maximal speed (0). Can be
+		toggled in software.
+

+ 5 - 4
Documentation/DMA-API-HOWTO.txt

@@ -488,9 +488,10 @@ will invoke the generic mapping error check interface. Doing so will ensure
 that the mapping code will work correctly on all dma implementations without
 any dependency on the specifics of the underlying implementation. Using the
 returned address without checking for errors could result in failures ranging
-from panics to silent data corruption. Couple of example of incorrect ways to
-check for errors that make assumptions about the underlying dma implementation
-are as follows and these are applicable to dma_map_page() as well.
+from panics to silent data corruption. A couple of examples of incorrect ways
+to check for errors that make assumptions about the underlying dma
+implementation are as follows and these are applicable to dma_map_page() as
+well.
 
 Incorrect example 1:
 	dma_addr_t dma_handle;
@@ -751,7 +752,7 @@ Example 1:
 		dma_unmap_single(dma_handle1);
 	map_error_handling1:
 
-Example 2: (if buffers are allocated a loop, unmap all mapped buffers when
+Example 2: (if buffers are allocated in a loop, unmap all mapped buffers when
 	    mapping error is detected in the middle)
 
 	dma_addr_t dma_addr;

+ 12 - 6
Documentation/IPMI.txt

@@ -348,34 +348,40 @@ You can change this at module load time (for a module) with:
 
   modprobe ipmi_si.o type=<type1>,<type2>....
        ports=<port1>,<port2>... addrs=<addr1>,<addr2>...
-       irqs=<irq1>,<irq2>... trydefaults=[0|1]
+       irqs=<irq1>,<irq2>...
        regspacings=<sp1>,<sp2>,... regsizes=<size1>,<size2>,...
        regshifts=<shift1>,<shift2>,...
        slave_addrs=<addr1>,<addr2>,...
        force_kipmid=<enable1>,<enable2>,...
        kipmid_max_busy_us=<ustime1>,<ustime2>,...
        unload_when_empty=[0|1]
+       trydefaults=[0|1] trydmi=[0|1] tryacpi=[0|1]
+       tryplatform=[0|1] trypci=[0|1]
 
-Each of these except si_trydefaults is a list, the first item for the
+Each of these except try... items is a list, the first item for the
 first interface, second item for the second interface, etc.
 
 The si_type may be either "kcs", "smic", or "bt".  If you leave it blank, it
 defaults to "kcs".
 
-If you specify si_addrs as non-zero for an interface, the driver will
+If you specify addrs as non-zero for an interface, the driver will
 use the memory address given as the address of the device.  This
 overrides si_ports.
 
-If you specify si_ports as non-zero for an interface, the driver will
+If you specify ports as non-zero for an interface, the driver will
 use the I/O port given as the device address.
 
-If you specify si_irqs as non-zero for an interface, the driver will
+If you specify irqs as non-zero for an interface, the driver will
 attempt to use the given interrupt for the device.
 
-si_trydefaults sets whether the standard IPMI interface at 0xca2 and
+trydefaults sets whether the standard IPMI interface at 0xca2 and
 any interfaces specified by ACPE are tried.  By default, the driver
 tries it, set this value to zero to turn this off.
 
+The other try... items disable discovery by their corresponding
+names.  These are all enabled by default, set them to zero to disable
+them.  The tryplatform disables openfirmware.
+
 The next three parameters have to do with register layout.  The
 registers used by the interfaces may not appear at successive
 locations and they may not be in 8-bit registers.  These parameters

+ 58 - 0
Documentation/block/cfq-iosched.txt

@@ -102,6 +102,64 @@ processing of request. Therefore, increasing the value can imporve the
 performace although this can cause the latency of some I/O to increase due
 to more number of requests.
 
+CFQ Group scheduling
+====================
+
+CFQ supports blkio cgroup and has "blkio." prefixed files in each
+blkio cgroup directory. It is weight-based and there are four knobs
+for configuration - weight[_device] and leaf_weight[_device].
+Internal cgroup nodes (the ones with children) can also have tasks in
+them, so the former two configure how much proportion the cgroup as a
+whole is entitled to at its parent's level while the latter two
+configure how much proportion the tasks in the cgroup have compared to
+its direct children.
+
+Another way to think about it is assuming that each internal node has
+an implicit leaf child node which hosts all the tasks whose weight is
+configured by leaf_weight[_device]. Let's assume a blkio hierarchy
+composed of five cgroups - root, A, B, AA and AB - with the following
+weights where the names represent the hierarchy.
+
+        weight leaf_weight
+ root :  125    125
+ A    :  500    750
+ B    :  250    500
+ AA   :  500    500
+ AB   : 1000    500
+
+root never has a parent making its weight is meaningless. For backward
+compatibility, weight is always kept in sync with leaf_weight. B, AA
+and AB have no child and thus its tasks have no children cgroup to
+compete with. They always get 100% of what the cgroup won at the
+parent level. Considering only the weights which matter, the hierarchy
+looks like the following.
+
+          root
+       /    |   \
+      A     B    leaf
+     500   250   125
+   /  |  \
+  AA  AB  leaf
+ 500 1000 750
+
+If all cgroups have active IOs and competing with each other, disk
+time will be distributed like the following.
+
+Distribution below root. The total active weight at this level is
+A:500 + B:250 + C:125 = 875.
+
+ root-leaf :   125 /  875      =~ 14%
+ A         :   500 /  875      =~ 57%
+ B(-leaf)  :   250 /  875      =~ 28%
+
+A has children and further distributes its 57% among the children and
+the implicit leaf node. The total active weight at this level is
+AA:500 + AB:1000 + A-leaf:750 = 2250.
+
+ A-leaf    : ( 750 / 2250) * A =~ 19%
+ AA(-leaf) : ( 500 / 2250) * A =~ 12%
+ AB(-leaf) : (1000 / 2250) * A =~ 25%
+
 CFQ IOPS Mode for group scheduling
 ===================================
 Basic CFQ design is to provide priority based time slices. Higher priority

+ 4 - 34
Documentation/blockdev/nbd.txt

@@ -4,43 +4,13 @@
    can use a remote server as one of its block devices. So every time
    the client computer wants to read, e.g., /dev/nb0, it sends a
    request over TCP to the server, which will reply with the data read.
-   This can be used for stations with low disk space (or even diskless -
-   if you boot from floppy) to borrow disk space from another computer.
-   Unlike NFS, it is possible to put any filesystem on it, etc. It should
-   even be possible to use NBD as a root filesystem (I've never tried),
-   but it requires a user-level program to be in the initrd to start.
-   It also allows you to run block-device in user land (making server
-   and client physically the same computer, communicating using loopback).
-   
-   Current state: It currently works. Network block device is stable.
-   I originally thought that it was impossible to swap over TCP. It
-   turned out not to be true - swapping over TCP now works and seems
-   to be deadlock-free, but it requires heavy patches into Linux's
-   network layer.
-   
+   This can be used for stations with low disk space (or even diskless)
+   to borrow disk space from another computer.
+   Unlike NFS, it is possible to put any filesystem on it, etc.
+
    For more information, or to download the nbd-client and nbd-server
    tools, go to http://nbd.sf.net/.
 
-   Howto: To setup nbd, you can simply do the following:
-
-   First, serve a device or file from a remote server:
-
-   nbd-server <port-number> <device-or-file-to-serve-to-client>
-
-   e.g.,
-	root@server1 # nbd-server 1234 /dev/sdb1
-
-	(serves sdb1 partition on TCP port 1234)
-
-   Then, on the local (client) system:
-
-   nbd-client <server-name-or-IP> <server-port-number> /dev/nb[0-n]
-
-   e.g.,
-	root@client1 # nbd-client server1 1234 /dev/nb0
-
-	(creates the nb0 device on client1)
-
    The nbd kernel module need only be installed on the client
    system, as the nbd-server is completely in userspace. In fact,
    the nbd-server has been successfully ported to other operating

+ 25 - 12
Documentation/cgroups/blkio-controller.txt

@@ -75,7 +75,7 @@ Throttling/Upper Limit policy
         mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
 
 - Specify a bandwidth rate on particular device for root group. The format
-  for policy is "<major>:<minor>  <byes_per_second>".
+  for policy is "<major>:<minor>  <bytes_per_second>".
 
         echo "8:16  1048576" > /sys/fs/cgroup/blkio/blkio.throttle.read_bps_device
 
@@ -94,13 +94,11 @@ Throttling/Upper Limit policy
 
 Hierarchical Cgroups
 ====================
-- Currently none of the IO control policy supports hierarchical groups. But
-  cgroup interface does allow creation of hierarchical cgroups and internally
-  IO policies treat them as flat hierarchy.
+- Currently only CFQ supports hierarchical groups. For throttling,
+  cgroup interface does allow creation of hierarchical cgroups and
+  internally it treats them as flat hierarchy.
 
-  So this patch will allow creation of cgroup hierarchcy but at the backend
-  everything will be treated as flat. So if somebody created a hierarchy like
-  as follows.
+  If somebody created a hierarchy like as follows.
 
 			root
 			/  \
@@ -108,16 +106,20 @@ Hierarchical Cgroups
 			|
 		     test3
 
-  CFQ and throttling will practically treat all groups at same level.
+  CFQ will handle the hierarchy correctly but and throttling will
+  practically treat all groups at same level. For details on CFQ
+  hierarchy support, refer to Documentation/block/cfq-iosched.txt.
+  Throttling will treat the hierarchy as if it looks like the
+  following.
 
 				pivot
 			     /  /   \  \
 			root  test1 test2  test3
 
-  Down the line we can implement hierarchical accounting/control support
-  and also introduce a new cgroup file "use_hierarchy" which will control
-  whether cgroup hierarchy is viewed as flat or hierarchical by the policy..
-  This is how memory controller also has implemented the things.
+  Nesting cgroups, while allowed, isn't officially supported and blkio
+  genereates warning when cgroups nest. Once throttling implements
+  hierarchy support, hierarchy will be supported and the warning will
+  be removed.
 
 Various user visible config options
 ===================================
@@ -172,6 +174,12 @@ Proportional weight policy files
 	  dev     weight
 	  8:16    300
 
+- blkio.leaf_weight[_device]
+	- Equivalents of blkio.weight[_device] for the purpose of
+          deciding how much weight tasks in the given cgroup has while
+          competing with the cgroup's child cgroups. For details,
+          please refer to Documentation/block/cfq-iosched.txt.
+
 - blkio.time
 	- disk time allocated to cgroup per device in milliseconds. First
 	  two fields specify the major and minor number of the device and
@@ -279,6 +287,11 @@ Proportional weight policy files
 	  and minor number of the device and third field specifies the number
 	  of times a group was dequeued from a particular device.
 
+- blkio.*_recursive
+	- Recursive version of various stats. These files show the
+          same information as their non-recursive counterparts but
+          include stats from all the descendant cgroups.
+
 Throttling/Upper limit policy files
 -----------------------------------
 - blkio.throttle.read_bps_device

+ 4 - 0
Documentation/coccinelle.txt

@@ -87,6 +87,10 @@ As any static code analyzer, Coccinelle produces false
 positives. Thus, reports must be carefully checked, and patches
 reviewed.
 
+To enable verbose messages set the V= variable, for example:
+
+   make coccicheck MODE=report V=1
+
 
  Using Coccinelle with a single semantic patch
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

+ 77 - 0
Documentation/device-mapper/cache-policies.txt

@@ -0,0 +1,77 @@
+Guidance for writing policies
+=============================
+
+Try to keep transactionality out of it.  The core is careful to
+avoid asking about anything that is migrating.  This is a pain, but
+makes it easier to write the policies.
+
+Mappings are loaded into the policy at construction time.
+
+Every bio that is mapped by the target is referred to the policy.
+The policy can return a simple HIT or MISS or issue a migration.
+
+Currently there's no way for the policy to issue background work,
+e.g. to start writing back dirty blocks that are going to be evicte
+soon.
+
+Because we map bios, rather than requests it's easy for the policy
+to get fooled by many small bios.  For this reason the core target
+issues periodic ticks to the policy.  It's suggested that the policy
+doesn't update states (eg, hit counts) for a block more than once
+for each tick.  The core ticks by watching bios complete, and so
+trying to see when the io scheduler has let the ios run.
+
+
+Overview of supplied cache replacement policies
+===============================================
+
+multiqueue
+----------
+
+This policy is the default.
+
+The multiqueue policy has two sets of 16 queues: one set for entries
+waiting for the cache and another one for those in the cache.
+Cache entries in the queues are aged based on logical time. Entry into
+the cache is based on variable thresholds and queue selection is based
+on hit count on entry. The policy aims to take different cache miss
+costs into account and to adjust to varying load patterns automatically.
+
+Message and constructor argument pairs are:
+	'sequential_threshold <#nr_sequential_ios>' and
+	'random_threshold <#nr_random_ios>'.
+
+The sequential threshold indicates the number of contiguous I/Os
+required before a stream is treated as sequential.  The random threshold
+is the number of intervening non-contiguous I/Os that must be seen
+before the stream is treated as random again.
+
+The sequential and random thresholds default to 512 and 4 respectively.
+
+Large, sequential ios are probably better left on the origin device
+since spindles tend to have good bandwidth. The io_tracker counts
+contiguous I/Os to try to spot when the io is in one of these sequential
+modes.
+
+cleaner
+-------
+
+The cleaner writes back all dirty blocks in a cache to decommission it.
+
+Examples
+========
+
+The syntax for a table is:
+	cache <metadata dev> <cache dev> <origin dev> <block size>
+	<#feature_args> [<feature arg>]*
+	<policy> <#policy_args> [<policy arg>]*
+
+The syntax to send a message using the dmsetup command is:
+	dmsetup message <mapped device> 0 sequential_threshold 1024
+	dmsetup message <mapped device> 0 random_threshold 8
+
+Using dmsetup:
+	dmsetup create blah --table "0 268435456 cache /dev/sdb /dev/sdc \
+	    /dev/sdd 512 0 mq 4 sequential_threshold 1024 random_threshold 8"
+	creates a 128GB large mapped device named 'blah' with the
+	sequential threshold set to 1024 and the random_threshold set to 8.

+ 243 - 0
Documentation/device-mapper/cache.txt

@@ -0,0 +1,243 @@
+Introduction
+============
+
+dm-cache is a device mapper target written by Joe Thornber, Heinz
+Mauelshagen, and Mike Snitzer.
+
+It aims to improve performance of a block device (eg, a spindle) by
+dynamically migrating some of its data to a faster, smaller device
+(eg, an SSD).
+
+This device-mapper solution allows us to insert this caching at
+different levels of the dm stack, for instance above the data device for
+a thin-provisioning pool.  Caching solutions that are integrated more
+closely with the virtual memory system should give better performance.
+
+The target reuses the metadata library used in the thin-provisioning
+library.
+
+The decision as to what data to migrate and when is left to a plug-in
+policy module.  Several of these have been written as we experiment,
+and we hope other people will contribute others for specific io
+scenarios (eg. a vm image server).
+
+Glossary
+========
+
+  Migration -  Movement of the primary copy of a logical block from one
+	       device to the other.
+  Promotion -  Migration from slow device to fast device.
+  Demotion  -  Migration from fast device to slow device.
+
+The origin device always contains a copy of the logical block, which
+may be out of date or kept in sync with the copy on the cache device
+(depending on policy).
+
+Design
+======
+
+Sub-devices
+-----------
+
+The target is constructed by passing three devices to it (along with
+other parameters detailed later):
+
+1. An origin device - the big, slow one.
+
+2. A cache device - the small, fast one.
+
+3. A small metadata device - records which blocks are in the cache,
+   which are dirty, and extra hints for use by the policy object.
+   This information could be put on the cache device, but having it
+   separate allows the volume manager to configure it differently,
+   e.g. as a mirror for extra robustness.
+
+Fixed block size
+----------------
+
+The origin is divided up into blocks of a fixed size.  This block size
+is configurable when you first create the cache.  Typically we've been
+using block sizes of 256k - 1024k.
+
+Having a fixed block size simplifies the target a lot.  But it is
+something of a compromise.  For instance, a small part of a block may be
+getting hit a lot, yet the whole block will be promoted to the cache.
+So large block sizes are bad because they waste cache space.  And small
+block sizes are bad because they increase the amount of metadata (both
+in core and on disk).
+
+Writeback/writethrough
+----------------------
+
+The cache has two modes, writeback and writethrough.
+
+If writeback, the default, is selected then a write to a block that is
+cached will go only to the cache and the block will be marked dirty in
+the metadata.
+
+If writethrough is selected then a write to a cached block will not
+complete until it has hit both the origin and cache devices.  Clean
+blocks should remain clean.
+
+A simple cleaner policy is provided, which will clean (write back) all
+dirty blocks in a cache.  Useful for decommissioning a cache.
+
+Migration throttling
+--------------------
+
+Migrating data between the origin and cache device uses bandwidth.
+The user can set a throttle to prevent more than a certain amount of
+migration occuring at any one time.  Currently we're not taking any
+account of normal io traffic going to the devices.  More work needs
+doing here to avoid migrating during those peak io moments.
+
+For the time being, a message "migration_threshold <#sectors>"
+can be used to set the maximum number of sectors being migrated,
+the default being 204800 sectors (or 100MB).
+
+Updating on-disk metadata
+-------------------------
+
+On-disk metadata is committed every time a REQ_SYNC or REQ_FUA bio is
+written.  If no such requests are made then commits will occur every
+second.  This means the cache behaves like a physical disk that has a
+write cache (the same is true of the thin-provisioning target).  If
+power is lost you may lose some recent writes.  The metadata should
+always be consistent in spite of any crash.
+
+The 'dirty' state for a cache block changes far too frequently for us
+to keep updating it on the fly.  So we treat it as a hint.  In normal
+operation it will be written when the dm device is suspended.  If the
+system crashes all cache blocks will be assumed dirty when restarted.
+
+Per-block policy hints
+----------------------
+
+Policy plug-ins can store a chunk of data per cache block.  It's up to
+the policy how big this chunk is, but it should be kept small.  Like the
+dirty flags this data is lost if there's a crash so a safe fallback
+value should always be possible.
+
+For instance, the 'mq' policy, which is currently the default policy,
+uses this facility to store the hit count of the cache blocks.  If
+there's a crash this information will be lost, which means the cache
+may be less efficient until those hit counts are regenerated.
+
+Policy hints affect performance, not correctness.
+
+Policy messaging
+----------------
+
+Policies will have different tunables, specific to each one, so we
+need a generic way of getting and setting these.  Device-mapper
+messages are used.  Refer to cache-policies.txt.
+
+Discard bitset resolution
+-------------------------
+
+We can avoid copying data during migration if we know the block has
+been discarded.  A prime example of this is when mkfs discards the
+whole block device.  We store a bitset tracking the discard state of
+blocks.  However, we allow this bitset to have a different block size
+from the cache blocks.  This is because we need to track the discard
+state for all of the origin device (compare with the dirty bitset
+which is just for the smaller cache device).
+
+Target interface
+================
+
+Constructor
+-----------
+
+ cache <metadata dev> <cache dev> <origin dev> <block size>
+       <#feature args> [<feature arg>]*
+       <policy> <#policy args> [policy args]*
+
+ metadata dev    : fast device holding the persistent metadata
+ cache dev	 : fast device holding cached data blocks
+ origin dev	 : slow device holding original data blocks
+ block size      : cache unit size in sectors
+
+ #feature args   : number of feature arguments passed
+ feature args    : writethrough.  (The default is writeback.)
+
+ policy          : the replacement policy to use
+ #policy args    : an even number of arguments corresponding to
+                   key/value pairs passed to the policy
+ policy args     : key/value pairs passed to the policy
+		   E.g. 'sequential_threshold 1024'
+		   See cache-policies.txt for details.
+
+Optional feature arguments are:
+   writethrough  : write through caching that prohibits cache block
+		   content from being different from origin block content.
+		   Without this argument, the default behaviour is to write
+		   back cache block contents later for performance reasons,
+		   so they may differ from the corresponding origin blocks.
+
+A policy called 'default' is always registered.  This is an alias for
+the policy we currently think is giving best all round performance.
+
+As the default policy could vary between kernels, if you are relying on
+the characteristics of a specific policy, always request it by name.
+
+Status
+------
+
+<#used metadata blocks>/<#total metadata blocks> <#read hits> <#read misses>
+<#write hits> <#write misses> <#demotions> <#promotions> <#blocks in cache>
+<#dirty> <#features> <features>* <#core args> <core args>* <#policy args>
+<policy args>*
+
+#used metadata blocks    : Number of metadata blocks used
+#total metadata blocks   : Total number of metadata blocks
+#read hits               : Number of times a READ bio has been mapped
+			     to the cache
+#read misses             : Number of times a READ bio has been mapped
+			     to the origin
+#write hits              : Number of times a WRITE bio has been mapped
+			     to the cache
+#write misses            : Number of times a WRITE bio has been
+			     mapped to the origin
+#demotions               : Number of times a block has been removed
+			     from the cache
+#promotions              : Number of times a block has been moved to
+			     the cache
+#blocks in cache         : Number of blocks resident in the cache
+#dirty                   : Number of blocks in the cache that differ
+			     from the origin
+#feature args            : Number of feature args to follow
+feature args             : 'writethrough' (optional)
+#core args               : Number of core arguments (must be even)
+core args                : Key/value pairs for tuning the core
+			     e.g. migration_threshold
+#policy args             : Number of policy arguments to follow (must be even)
+policy args              : Key/value pairs
+			     e.g. 'sequential_threshold 1024
+
+Messages
+--------
+
+Policies will have different tunables, specific to each one, so we
+need a generic way of getting and setting these.  Device-mapper
+messages are used.  (A sysfs interface would also be possible.)
+
+The message format is:
+
+   <key> <value>
+
+E.g.
+   dmsetup message my_cache 0 sequential_threshold 1024
+
+Examples
+========
+
+The test suite can be found here:
+
+https://github.com/jthornber/thinp-test-suite
+
+dmsetup create my_cache --table '0 41943040 cache /dev/mapper/metadata \
+	/dev/mapper/ssd /dev/mapper/origin 512 1 writeback default 0'
+dmsetup create my_cache --table '0 41943040 cache /dev/mapper/metadata \
+	/dev/mapper/ssd /dev/mapper/origin 1024 1 writeback \
+	mq 4 sequential_threshold 1024 random_threshold 8'

+ 24 - 0
Documentation/devicetree/bindings/arc/interrupts.txt

@@ -0,0 +1,24 @@
+* ARC700 incore Interrupt Controller
+
+  The core interrupt controller provides 32 prioritised interrupts (2 levels)
+  to ARC700 core.
+
+Properties:
+
+- compatible: "snps,arc700-intc"
+- interrupt-controller: This is an interrupt controller.
+- #interrupt-cells: Must be <1>.
+
+  Single Cell "interrupts" property of a device specifies the IRQ number
+  between 0 to 31
+
+  intc accessed via the special ARC AUX register interface, hence "reg" property
+  is not specified.
+
+Example:
+
+	intc: interrupt-controller {
+		compatible = "snps,arc700-intc";
+		interrupt-controller;
+		#interrupt-cells = <1>;
+	};

+ 6 - 0
Documentation/devicetree/bindings/arm/armadeus.txt

@@ -0,0 +1,6 @@
+Armadeus i.MX Platforms Device Tree Bindings
+-----------------------------------------------
+
+APF51: i.MX51 based module.
+Required root node properties:
+    - compatible = "armadeus,imx51-apf51", "fsl,imx51";

+ 8 - 0
Documentation/devicetree/bindings/arm/fsl.txt

@@ -5,6 +5,14 @@ i.MX23 Evaluation Kit
 Required root node properties:
     - compatible = "fsl,imx23-evk", "fsl,imx23";
 
+i.MX25 Product Development Kit
+Required root node properties:
+    - compatible = "fsl,imx25-pdk", "fsl,imx25";
+
+i.MX27 Product Development Kit
+Required root node properties:
+    - compatible = "fsl,imx27-pdk", "fsl,imx27";
+
 i.MX28 Evaluation Kit
 Required root node properties:
     - compatible = "fsl,imx28-evk", "fsl,imx28";

+ 1 - 0
Documentation/devicetree/bindings/clock/imx5-clock.txt

@@ -171,6 +171,7 @@ clocks and IDs.
 	can_sel			156
 	can1_serial_gate	157
 	can1_ipg_gate		158
+	owire_gate		159
 
 Examples (for mx53):
 

+ 2 - 0
Documentation/devicetree/bindings/clock/imx6q-clock.txt

@@ -203,6 +203,8 @@ clocks and IDs.
 	pcie_ref		188
 	pcie_ref_125m		189
 	enet_ref		190
+	usbphy1_gate		191
+	usbphy2_gate		192
 
 Examples:
 

+ 36 - 34
Documentation/devicetree/bindings/dma/snps-dma.txt

@@ -3,59 +3,61 @@
 Required properties:
 - compatible: "snps,dma-spear1340"
 - reg: Address range of the DMAC registers
-- interrupt-parent: Should be the phandle for the interrupt controller
-  that services interrupts for this device
 - interrupt: Should contain the DMAC interrupt number
-- nr_channels: Number of channels supported by hardware
-- is_private: The device channels should be marked as private and not for by the
-  general purpose DMA channel allocator. False if not passed.
+- dma-channels: Number of channels supported by hardware
+- dma-requests: Number of DMA request lines supported, up to 16
+- dma-masters: Number of AHB masters supported by the controller
+- #dma-cells: must be <3>
 - chan_allocation_order: order of allocation of channel, 0 (default): ascending,
   1: descending
 - chan_priority: priority of channels. 0 (default): increase from chan 0->n, 1:
   increase from chan n->0
 - block_size: Maximum block size supported by the controller
-- nr_masters: Number of AHB masters supported by the controller
 - data_width: Maximum data width supported by hardware per AHB master
   (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
-- slave_info:
-	- bus_id: name of this device channel, not just a device name since
-	  devices may have more than one channel e.g. "foo_tx". For using the
-	  dw_generic_filter(), slave drivers must pass exactly this string as
-	  param to filter function.
-	- cfg_hi: Platform-specific initializer for the CFG_HI register
-	- cfg_lo: Platform-specific initializer for the CFG_LO register
-	- src_master: src master for transfers on allocated channel.
-	- dst_master: dest master for transfers on allocated channel.
+
+
+Optional properties:
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- is_private: The device channels should be marked as private and not for by the
+  general purpose DMA channel allocator. False if not passed.
 
 Example:
 
-	dma@fc000000 {
+	dmahost: dma@fc000000 {
 		compatible = "snps,dma-spear1340";
 		reg = <0xfc000000 0x1000>;
 		interrupt-parent = <&vic1>;
 		interrupts = <12>;
 
-		nr_channels = <8>;
+		dma-channels = <8>;
+		dma-requests = <16>;
+		dma-masters = <2>;
+		#dma-cells = <3>;
 		chan_allocation_order = <1>;
 		chan_priority = <1>;
 		block_size = <0xfff>;
-		nr_masters = <2>;
 		data_width = <3 3 0 0>;
+	};
 
-		slave_info {
-			uart0-tx {
-				bus_id = "uart0-tx";
-				cfg_hi = <0x4000>;	/* 0x8 << 11 */
-				cfg_lo = <0>;
-				src_master = <0>;
-				dst_master = <1>;
-			};
-			spi0-tx {
-				bus_id = "spi0-tx";
-				cfg_hi = <0x2000>;	/* 0x4 << 11 */
-				cfg_lo = <0>;
-				src_master = <0>;
-				dst_master = <0>;
-			};
-		};
+DMA clients connected to the Designware DMA controller must use the format
+described in the dma.txt file, using a four-cell specifier for each channel.
+The four cells in order are:
+
+1. A phandle pointing to the DMA controller
+2. The DMA request line number
+3. Source master for transfers on allocated channel
+4. Destination master for transfers on allocated channel
+
+Example:
+	
+	serial@e0000000 {
+		compatible = "arm,pl011", "arm,primecell";
+		reg = <0xe0000000 0x1000>;
+		interrupts = <0 35 0x4>;
+		status = "disabled";
+		dmas = <&dmahost 12 0 1>,
+			<&dmahost 13 0 1 0>;
+		dma-names = "rx", "rx";
 	};

+ 82 - 0
Documentation/devicetree/bindings/metag/meta-intc.txt

@@ -0,0 +1,82 @@
+* Meta External Trigger Controller Binding
+
+This binding specifies what properties must be available in the device tree
+representation of a Meta external trigger controller.
+
+Required properties:
+
+    - compatible: Specifies the compatibility list for the interrupt controller.
+      The type shall be <string> and the value shall include "img,meta-intc".
+
+    - num-banks: Specifies the number of interrupt banks (each of which can
+      handle 32 interrupt sources).
+
+    - interrupt-controller: The presence of this property identifies the node
+      as an interupt controller. No property value shall be defined.
+
+    - #interrupt-cells: Specifies the number of cells needed to encode an
+      interrupt source. The type shall be a <u32> and the value shall be 2.
+
+    - #address-cells: Specifies the number of cells needed to encode an
+      address. The type shall be <u32> and the value shall be 0. As such,
+      'interrupt-map' nodes do not have to specify a parent unit address.
+
+Optional properties:
+
+    - no-mask: The controller doesn't have any mask registers.
+
+* Interrupt Specifier Definition
+
+  Interrupt specifiers consists of 2 cells encoded as follows:
+
+    - <1st-cell>: The interrupt-number that identifies the interrupt source.
+
+    - <2nd-cell>: The Linux interrupt flags containing level-sense information,
+                  encoded as follows:
+                    1 = edge triggered
+                    4 = level-sensitive
+
+* Examples
+
+Example 1:
+
+	/*
+	 * Meta external trigger block
+	 */
+	intc: intc {
+		// This is an interrupt controller node.
+		interrupt-controller;
+
+		// No address cells so that 'interrupt-map' nodes which
+		// reference this interrupt controller node do not need a parent
+		// address specifier.
+		#address-cells = <0>;
+
+		// Two cells to encode interrupt sources.
+		#interrupt-cells = <2>;
+
+		// Number of interrupt banks
+		num-banks = <2>;
+
+		// No HWMASKEXT is available (specify on Chorus2 and Comet ES1)
+		no-mask;
+
+		// Compatible with Meta hardware trigger block.
+		compatible = "img,meta-intc";
+	};
+
+Example 2:
+
+	/*
+	 * An interrupt generating device that is wired to a Meta external
+	 * trigger block.
+	 */
+	uart1: uart@0x02004c00 {
+		// Interrupt source '5' that is level-sensitive.
+		// Note that there are only two cells as specified in the
+		// interrupt parent's '#interrupt-cells' property.
+		interrupts = <5 4 /* level */>;
+
+		// The interrupt controller that this device is wired to.
+		interrupt-parent = <&intc>;
+	};

+ 47 - 0
Documentation/devicetree/bindings/mips/cpu_irq.txt

@@ -0,0 +1,47 @@
+MIPS CPU interrupt controller
+
+On MIPS the mips_cpu_intc_init() helper can be used to initialize the 8 CPU
+IRQs from a devicetree file and create a irq_domain for IRQ controller.
+
+With the irq_domain in place we can describe how the 8 IRQs are wired to the
+platforms internal interrupt controller cascade.
+
+Below is an example of a platform describing the cascade inside the devicetree
+and the code used to load it inside arch_init_irq().
+
+Required properties:
+- compatible : Should be "mti,cpu-interrupt-controller"
+
+Example devicetree:
+	cpu-irq: cpu-irq@0 {
+		#address-cells = <0>;
+
+		interrupt-controller;
+		#interrupt-cells = <1>;
+
+		compatible = "mti,cpu-interrupt-controller";
+	};
+
+	intc: intc@200 {
+		compatible = "ralink,rt2880-intc";
+		reg = <0x200 0x100>;
+
+		interrupt-controller;
+		#interrupt-cells = <1>;
+
+		interrupt-parent = <&cpu-irq>;
+		interrupts = <2>;
+	};
+
+
+Example platform irq.c:
+static struct of_device_id __initdata of_irq_ids[] = {
+	{ .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_intc_init },
+	{ .compatible = "ralink,rt2880-intc", .data = intc_of_init },
+	{},
+};
+
+void __init arch_init_irq(void)
+{
+	of_irq_init(of_irq_ids);
+}

+ 16 - 0
Documentation/devicetree/bindings/mtd/elm.txt

@@ -0,0 +1,16 @@
+Error location module
+
+Required properties:
+- compatible: Must be "ti,am33xx-elm"
+- reg: physical base address and size of the registers map.
+- interrupts: Interrupt number for the elm.
+
+Optional properties:
+- ti,hwmods: Name of the hwmod associated to the elm
+
+Example:
+elm: elm@0 {
+	compatible = "ti,am3352-elm";
+	reg = <0x48080000 0x2000>;
+	interrupts = <4>;
+};

+ 3 - 0
Documentation/devicetree/bindings/mtd/mtd-physmap.txt

@@ -26,6 +26,9 @@ file systems on embedded devices.
  - linux,mtd-name: allow to specify the mtd name for retro capability with
    physmap-flash drivers as boot loader pass the mtd partition via the old
    device name physmap-flash.
+ - use-advanced-sector-protection: boolean to enable support for the
+   advanced sector protection (Spansion: PPB - Persistent Protection
+   Bits) locking.
 
 For JEDEC compatible devices, the following additional properties
 are defined:

+ 16 - 0
Documentation/devicetree/bindings/serial/lantiq_asc.txt

@@ -0,0 +1,16 @@
+Lantiq SoC ASC serial controller
+
+Required properties:
+- compatible : Should be "lantiq,asc"
+- reg : Address and length of the register set for the device
+- interrupts: the 3 (tx rx err) interrupt numbers. The interrupt specifier
+  depends on the interrupt-parent interrupt controller.
+
+Example:
+
+asc1: serial@E100C00 {
+	compatible = "lantiq,asc";
+	reg = <0xE100C00 0x400>;
+	interrupt-parent = <&icu0>;
+	interrupts = <112 113 114>;
+};

+ 18 - 0
Documentation/devicetree/bindings/thermal/dove-thermal.txt

@@ -0,0 +1,18 @@
+* Dove Thermal
+
+This driver is for Dove SoCs which contain a thermal sensor.
+
+Required properties:
+- compatible : "marvell,dove-thermal"
+- reg : Address range of the thermal registers
+
+The reg properties should contain two ranges. The first is for the
+three Thermal Manager registers, while the second range contains the
+Thermal Diode Control Registers.
+
+Example:
+
+	thermal@10078 {
+		compatible = "marvell,dove-thermal";
+		reg = <0xd001c 0x0c>, <0xd005c 0x08>;
+	};

+ 15 - 0
Documentation/devicetree/bindings/thermal/kirkwood-thermal.txt

@@ -0,0 +1,15 @@
+* Kirkwood Thermal
+
+This version is for Kirkwood 88F8262 & 88F6283 SoCs. Other kirkwoods
+don't contain a thermal sensor.
+
+Required properties:
+- compatible : "marvell,kirkwood-thermal"
+- reg : Address range of the thermal registers
+
+Example:
+
+	thermal@10078 {
+		compatible = "marvell,kirkwood-thermal";
+		reg = <0x10078 0x4>;
+	};

+ 29 - 0
Documentation/devicetree/bindings/thermal/rcar-thermal.txt

@@ -0,0 +1,29 @@
+* Renesas R-Car Thermal
+
+Required properties:
+- compatible		: "renesas,rcar-thermal"
+- reg			: Address range of the thermal registers.
+			  The 1st reg will be recognized as common register
+			  if it has "interrupts".
+
+Option properties:
+
+- interrupts		: use interrupt
+
+Example (non interrupt support):
+
+thermal@e61f0100 {
+	compatible = "renesas,rcar-thermal";
+	reg = <0xe61f0100 0x38>;
+};
+
+Example (interrupt support):
+
+thermal@e61f0000 {
+	compatible = "renesas,rcar-thermal";
+	reg = <0xe61f0000 0x14
+		0xe61f0100 0x38
+		0xe61f0200 0x38
+		0xe61f0300 0x38>;
+	interrupts = <0 69 4>;
+};

+ 7 - 4
Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt → Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt

@@ -1,10 +1,13 @@
-Marvell Armada 370 and Armada XP Global Timers
-----------------------------------------------
+Marvell Armada 370 and Armada XP Timers
+---------------------------------------
 
 Required properties:
 - compatible: Should be "marvell,armada-370-xp-timer"
-- interrupts: Should contain the list of Global Timer interrupts
-- reg: Should contain the base address of the Global Timer registers
+- interrupts: Should contain the list of Global Timer interrupts and
+  then local timer interrupts
+- reg: Should contain location and length for timers register. First
+  pair for the Global Timer registers, second pair for the
+  local/private timers.
 - clocks: clock driving the timer hardware
 
 Optional properties:

+ 19 - 0
Documentation/devicetree/bindings/w1/fsl-imx-owire.txt

@@ -0,0 +1,19 @@
+* Freescale i.MX One wire bus master controller
+
+Required properties:
+- compatible : should be "fsl,imx21-owire"
+- reg : Address and length of the register set for the device
+
+Optional properties:
+- clocks : phandle of clock that supplies the module (required if platform
+		clock bindings use device tree)
+
+Example:
+
+- From imx53.dtsi:
+owire: owire@63fa4000 {
+	compatible = "fsl,imx53-owire", "fsl,imx21-owire";
+	reg = <0x63fa4000 0x4000>;
+	clocks = <&clks 159>;
+	status = "disabled";
+};

+ 9 - 0
Documentation/devicetree/bindings/watchdog/atmel-at91rm9200-wdt.txt

@@ -0,0 +1,9 @@
+Atmel AT91RM9200 System Timer Watchdog
+
+Required properties:
+- compatible: must be "atmel,at91sam9260-wdt".
+
+Example:
+	watchdog@fffffd00 {
+		compatible = "atmel,at91rm9200-wdt";
+	};

+ 4 - 0
Documentation/devicetree/bindings/watchdog/atmel-wdt.txt

@@ -7,9 +7,13 @@ Required properties:
 - reg: physical base address of the controller and length of memory mapped
   region.
 
+Optional properties:
+- timeout-sec: contains the watchdog timeout in seconds.
+
 Example:
 
 	watchdog@fffffd40 {
 		compatible = "atmel,at91sam9260-wdt";
 		reg = <0xfffffd40 0x10>;
+		timeout-sec = <10>;
 	};

+ 5 - 0
Documentation/devicetree/bindings/watchdog/marvel.txt

@@ -5,10 +5,15 @@ Required Properties:
 - Compatibility : "marvell,orion-wdt"
 - reg		: Address of the timer registers
 
+Optional properties:
+
+- timeout-sec	: Contains the watchdog timeout in seconds
+
 Example:
 
 	wdt@20300 {
 		compatible = "marvell,orion-wdt";
 		reg = <0x20300 0x28>;
+		timeout-sec = <10>;
 		status = "okay";
 	};

+ 4 - 0
Documentation/devicetree/bindings/watchdog/pnx4008-wdt.txt

@@ -5,9 +5,13 @@ Required properties:
 - reg: physical base address of the controller and length of memory mapped
   region.
 
+Optional properties:
+- timeout-sec: contains the watchdog timeout in seconds.
+
 Example:
 
 	watchdog@4003C000 {
 		compatible = "nxp,pnx4008-wdt";
 		reg = <0x4003C000 0x1000>;
+		timeout-sec = <10>;
 	};

+ 13 - 0
Documentation/devicetree/bindings/watchdog/qca-ar7130-wdt.txt

@@ -0,0 +1,13 @@
+* Qualcomm Atheros AR7130 Watchdog Timer (WDT) Controller
+
+Required properties:
+- compatible: must be "qca,ar7130-wdt"
+- reg: physical base address of the controller and length of memory mapped
+  region.
+
+Example:
+
+wdt@18060008 {
+	compatible = "qca,ar9330-wdt", "qca,ar7130-wdt";
+	reg = <0x18060008 0x8>;
+};

+ 3 - 0
Documentation/devicetree/bindings/watchdog/samsung-wdt.txt

@@ -9,3 +9,6 @@ Required properties:
 - reg : base physical address of the controller and length of memory mapped
 	region.
 - interrupts : interrupt number to the cpu.
+
+Optional properties:
+- timeout-sec : contains the watchdog timeout in seconds.

+ 5 - 1
Documentation/dma-buf-sharing.txt

@@ -302,7 +302,11 @@ Access to a dma_buf from the kernel context involves three steps:
       void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
 
    The vmap call can fail if there is no vmap support in the exporter, or if it
-   runs out of vmalloc space. Fallback to kmap should be implemented.
+   runs out of vmalloc space. Fallback to kmap should be implemented. Note that
+   the dma-buf layer keeps a reference count for all vmap access and calls down
+   into the exporter's vmap function only when no vmapping exists, and only
+   unmaps it once. Protection against concurrent vmap/vunmap calls is provided
+   by taking the dma_buf->lock mutex.
 
 3. Finish access
 

+ 0 - 23
Documentation/kbuild/kconfig-language.txt

@@ -388,26 +388,3 @@ config FOO
 	depends on BAR && m
 
 limits FOO to module (=m) or disabled (=n).
-
-Kconfig symbol existence
-~~~~~~~~~~~~~~~~~~~~~~~~
-The following two methods produce the same kconfig symbol dependencies
-but differ greatly in kconfig symbol existence (production) in the
-generated config file.
-
-case 1:
-
-config FOO
-	tristate "about foo"
-	depends on BAR
-
-vs. case 2:
-
-if BAR
-config FOO
-	tristate "about foo"
-endif
-
-In case 1, the symbol FOO will always exist in the config file (given
-no other dependencies).  In case 2, the symbol FOO will only exist in
-the config file if BAR is enabled.

+ 6 - 0
Documentation/kbuild/kconfig.txt

@@ -46,6 +46,12 @@ KCONFIG_OVERWRITECONFIG
 If you set KCONFIG_OVERWRITECONFIG in the environment, Kconfig will not
 break symlinks when .config is a symlink to somewhere else.
 
+CONFIG_
+--------------------------------------------------
+If you set CONFIG_ in the environment, Kconfig will prefix all symbols
+with its value when saving the configuration, instead of using the default,
+"CONFIG_".
+
 ______________________________________________________________________
 Environment variables for '{allyes/allmod/allno/rand}config'
 

+ 9 - 36
Documentation/kernel-parameters.txt

@@ -564,6 +564,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 			UART at the specified I/O port or MMIO address,
 			switching to the matching ttyS device later.  The
 			options are the same as for ttyS, above.
+		hvc<n>	Use the hypervisor console device <n>. This is for
+			both Xen and PowerPC hypervisors.
 
                 If the device connected to the port is not a TTY but a braille
                 device, prepend "brl," before the device type, for instance
@@ -757,6 +759,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
 	earlyprintk=	[X86,SH,BLACKFIN]
 			earlyprintk=vga
+			earlyprintk=xen
 			earlyprintk=serial[,ttySn[,baudrate]]
 			earlyprintk=ttySn[,baudrate]
 			earlyprintk=dbgp[debugController#]
@@ -774,6 +777,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 			The VGA output is eventually overwritten by the real
 			console.
 
+			The xen output can only be used by Xen PV guests.
+
 	ekgdboc=	[X86,KGDB] Allow early kernel console debugging
 			ekgdboc=kbd
 
@@ -973,6 +978,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 			       If specified, z/VM IUCV HVC accepts connections
 			       from listed z/VM user IDs only.
 
+	hwthread_map=	[METAG] Comma-separated list of Linux cpu id to
+			        hardware thread id mappings.
+				Format: <cpu>:<hwthread>
+
 	keep_bootcon	[KNL]
 			Do not unregister boot console at start. This is only
 			useful for debugging when something happens in the window
@@ -1640,42 +1649,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 			that the amount of memory usable for all allocations
 			is not too small.
 
-	movablemem_map=acpi
-			[KNL,X86,IA-64,PPC] This parameter is similar to
-			memmap except it specifies the memory map of
-			ZONE_MOVABLE.
-			This option inform the kernel to use Hot Pluggable bit
-			in flags from SRAT from ACPI BIOS to determine which
-			memory devices could be hotplugged. The corresponding
-			memory ranges will be set as ZONE_MOVABLE.
-			NOTE: Whatever node the kernel resides in will always
-			      be un-hotpluggable.
-
-	movablemem_map=nn[KMG]@ss[KMG]
-			[KNL,X86,IA-64,PPC] This parameter is similar to
-			memmap except it specifies the memory map of
-			ZONE_MOVABLE.
-			If user specifies memory ranges, the info in SRAT will
-			be ingored. And it works like the following:
-			- If more ranges are all within one node, then from
-			  lowest ss to the end of the node will be ZONE_MOVABLE.
-			- If a range is within a node, then from ss to the end
-			  of the node will be ZONE_MOVABLE.
-			- If a range covers two or more nodes, then from ss to
-			  the end of the 1st node will be ZONE_MOVABLE, and all
-			  the rest nodes will only have ZONE_MOVABLE.
-			If memmap is specified at the same time, the
-			movablemem_map will be limited within the memmap
-			areas. If kernelcore or movablecore is also specified,
-			movablemem_map will have higher priority to be
-			satisfied. So the administrator should be careful that
-			the amount of movablemem_map areas are not too large.
-			Otherwise kernel won't have enough memory to start.
-			NOTE: We don't stop users specifying the node the
-			      kernel resides in as hotpluggable so that this
-			      option can be used as a workaround of firmware
-                              bugs.
-
 	MTD_Partition=	[MTD]
 			Format: <name>,<region-number>,<size>,<offset>
 

+ 4 - 0
Documentation/metag/00-INDEX

@@ -0,0 +1,4 @@
+00-INDEX
+	- this file
+kernel-ABI.txt
+	- Documents metag ABI details

+ 256 - 0
Documentation/metag/kernel-ABI.txt

@@ -0,0 +1,256 @@
+			==========================
+			KERNEL ABIS FOR METAG ARCH
+			==========================
+
+This document describes the Linux ABIs for the metag architecture, and has the
+following sections:
+
+ (*) Outline of registers
+ (*) Userland registers
+ (*) Kernel registers
+ (*) System call ABI
+ (*) Calling conventions
+
+
+====================
+OUTLINE OF REGISTERS
+====================
+
+The main Meta core registers are arranged in units:
+
+	UNIT	Type	DESCRIPTION	GP	EXT	PRIV	GLOBAL
+	=======	=======	===============	=======	=======	=======	=======
+	CT	Special	Control unit
+	D0	General	Data unit 0	0-7	8-15	16-31	16-31
+	D1	General	Data unit 1	0-7	8-15	16-31	16-31
+	A0	General	Address unit 0	0-3	4-7	 8-15	 8-15
+	A1	General	Address unit 1	0-3	4-7	 8-15	 8-15
+	PC	Special	PC unit		0		 1
+	PORT	Special	Ports
+	TR	Special	Trigger unit			 0-7
+	TT	Special	Trace unit			 0-5
+	FX	General	FP unit			0-15
+
+GP registers form part of the main context.
+
+Extended context registers (EXT) may not be present on all hardware threads and
+can be context switched if support is enabled and the appropriate bits are set
+in e.g. the D0.8 register to indicate what extended state to preserve.
+
+Global registers are shared between threads and are privilege protected.
+
+See arch/metag/include/asm/metag_regs.h for definitions relating to core
+registers and the fields and bits they contain. See the TRMs for further details
+about special registers.
+
+Several special registers are preserved in the main context, these are the
+interesting ones:
+
+	REG	(ALIAS)		PURPOSE
+	=======================	===============================================
+	CT.1	(TXMODE)	Processor mode bits (particularly for DSP)
+	CT.2	(TXSTATUS)	Condition flags and LSM_STEP (MGET/MSET step)
+	CT.3	(TXRPT)		Branch repeat counter
+	PC.0	(PC)		Program counter
+
+Some of the general registers have special purposes in the ABI and therefore
+have aliases:
+
+	D0 REG	(ALIAS)	PURPOSE		D1 REG	(ALIAS)	PURPOSE
+	===============	===============	===============	=======================
+	D0.0	(D0Re0)	32bit result	D1.0	(D1Re0)	Top half of 64bit result
+	D0.1	(D0Ar6)	Argument 6	D1.1	(D1Ar5)	Argument 5
+	D0.2	(D0Ar4)	Argument 4	D1.2	(D1Ar3)	Argument 3
+	D0.3	(D0Ar2)	Argument 2	D1.3	(D1Ar1)	Argument 1
+	D0.4	(D0FrT)	Frame temp	D1.4	(D1RtP)	Return pointer
+	D0.5		Call preserved	D1.5		Call preserved
+	D0.6		Call preserved	D1.6		Call preserved
+	D0.7		Call preserved	D1.7		Call preserved
+
+	A0 REG	(ALIAS)	PURPOSE		A1 REG	(ALIAS)	PURPOSE
+	===============	===============	===============	=======================
+	A0.0	(A0StP)	Stack pointer	A1.0	(A1GbP)	Global base pointer
+	A0.1	(A0FrP)	Frame pointer	A1.1	(A1LbP)	Local base pointer
+	A0.2				A1.2
+	A0.3				A1.3
+
+
+==================
+USERLAND REGISTERS
+==================
+
+All the general purpose D0, D1, A0, A1 registers are preserved when entering the
+kernel (including asynchronous events such as interrupts and timer ticks) except
+the following which have special purposes in the ABI:
+
+	REGISTERS	WHEN	STATUS		PURPOSE
+	===============	=======	===============	===============================
+	D0.8		DSP	Preserved	ECH, determines what extended
+						DSP state to preserve.
+	A0.0	(A0StP)	ALWAYS	Preserved	Stack >= A0StP may be clobbered
+						at any time by the creation of a
+						signal frame.
+	A1.0	(A1GbP)	SMP	Clobbered	Used as temporary for loading
+						kernel stack pointer and saving
+						core context.
+	A0.15		!SMP	Protected	Stores kernel stack pointer.
+	A1.15		ALWAYS	Protected	Stores kernel base pointer.
+
+On UP A0.15 is used to store the kernel stack pointer for storing the userland
+context. A0.15 is global between hardware threads though which means it cannot
+be used on SMP for this purpose. Since no protected local registers are
+available A1GbP is reserved for use as a temporary to allow a percpu stack
+pointer to be loaded for storing the rest of the context.
+
+
+================
+KERNEL REGISTERS
+================
+
+When in the kernel the following registers have special purposes in the ABI:
+
+	REGISTERS	WHEN	STATUS		PURPOSE
+	===============	=======	===============	===============================
+	A0.0	(A0StP)	ALWAYS	Preserved	Stack >= A0StP may be clobbered
+						at any time by the creation of
+						an irq signal frame.
+	A1.0	(A1GbP)	ALWAYS	Preserved	Reserved (kernel base pointer).
+
+
+===============
+SYSTEM CALL ABI
+===============
+
+When a system call is made, the following registers are effective:
+
+	REGISTERS	CALL			RETURN
+	===============	=======================	===============================
+	D0.0	(D0Re0)				Return value (or -errno)
+	D1.0	(D1Re0)	System call number	Clobbered
+	D0.1	(D0Ar6)	Syscall arg #6		Preserved
+	D1.1	(D1Ar5)	Syscall arg #5		Preserved
+	D0.2	(D0Ar4)	Syscall arg #4		Preserved
+	D1.2	(D1Ar3)	Syscall arg #3		Preserved
+	D0.3	(D0Ar2)	Syscall arg #2		Preserved
+	D1.3	(D1Ar1)	Syscall arg #1		Preserved
+
+Due to the limited number of argument registers and some system calls with badly
+aligned 64-bit arguments, 64-bit values are always packed in consecutive
+arguments, even if this is contrary to the normal calling conventions (where the
+two halves would go in a matching pair of data registers).
+
+For example fadvise64_64 usually has the signature:
+
+	long sys_fadvise64_64(i32 fd, i64 offs, i64 len, i32 advice);
+
+But for metag fadvise64_64 is wrapped so that the 64-bit arguments are packed:
+
+	long sys_fadvise64_64_metag(i32 fd,      i32 offs_lo,
+				    i32 offs_hi, i32 len_lo,
+				    i32 len_hi,  i32 advice)
+
+So the arguments are packed in the registers like this:
+
+	D0 REG	(ALIAS)	VALUE		D1 REG	(ALIAS)	VALUE
+	===============	===============	===============	=======================
+	D0.1	(D0Ar6)	advice		D1.1	(D1Ar5)	hi(len)
+	D0.2	(D0Ar4)	lo(len)		D1.2	(D1Ar3)	hi(offs)
+	D0.3	(D0Ar2)	lo(offs)	D1.3	(D1Ar1)	fd
+
+
+===================
+CALLING CONVENTIONS
+===================
+
+These calling conventions apply to both user and kernel code. The stack grows
+from low addresses to high addresses in the metag ABI. The stack pointer (A0StP)
+should always point to the next free address on the stack and should at all
+times be 64-bit aligned. The following registers are effective at the point of a
+call:
+
+	REGISTERS	CALL			RETURN
+	===============	=======================	===============================
+	D0.0	(D0Re0)				32bit return value
+	D1.0	(D1Re0)				Upper half of 64bit return value
+	D0.1	(D0Ar6)	32bit argument #6	Clobbered
+	D1.1	(D1Ar5)	32bit argument #5	Clobbered
+	D0.2	(D0Ar4)	32bit argument #4	Clobbered
+	D1.2	(D1Ar3)	32bit argument #3	Clobbered
+	D0.3	(D0Ar2)	32bit argument #2	Clobbered
+	D1.3	(D1Ar1)	32bit argument #1	Clobbered
+	D0.4	(D0FrT)				Clobbered
+	D1.4	(D1RtP)	Return pointer		Clobbered
+	D{0-1}.{5-7}				Preserved
+	A0.0	(A0StP)	Stack pointer		Preserved
+	A1.0	(A0GbP)				Preserved
+	A0.1	(A0FrP)	Frame pointer		Preserved
+	A1.1	(A0LbP)				Preserved
+	A{0-1},{2-3}				Clobbered
+
+64-bit arguments are placed in matching pairs of registers (i.e. the same
+register number in both D0 and D1 units), with the least significant half in D0
+and the most significant half in D1, leaving a gap where necessary. Futher
+arguments are stored on the stack in reverse order (earlier arguments at higher
+addresses):
+
+	ADDRESS		0     1     2     3	4     5     6     7
+	===============	===== ===== ===== =====	===== ===== ===== =====
+	A0StP       -->
+	A0StP-0x08	32bit argument #8	32bit argument #7
+	A0StP-0x10	32bit argument #10	32bit argument #9
+
+Function prologues tend to look a bit like this:
+
+	/* If frame pointer in use, move it to frame temp register so it can be
+	   easily pushed onto stack */
+	MOV	D0FrT,A0FrP
+
+	/* If frame pointer in use, set it to stack pointer */
+	ADD	A0FrP,A0StP,#0
+
+	/* Preserve D0FrT, D1RtP, D{0-1}.{5-7} on stack, incrementing A0StP */
+	MSETL	[A0StP++],D0FrT,D0.5,D0.6,D0.7
+
+	/* Allocate some stack space for local variables */
+	ADD	A0StP,A0StP,#0x10
+
+At this point the stack would look like this:
+
+	ADDRESS		0     1     2     3	4     5     6     7
+	===============	===== ===== ===== =====	===== ===== ===== =====
+	A0StP       -->
+	A0StP-0x08
+	A0StP-0x10
+	A0StP-0x18	Old D0.7		Old D1.7
+	A0StP-0x20	Old D0.6		Old D1.6
+	A0StP-0x28	Old D0.5		Old D1.5
+	A0FrP       -->	Old A0FrP (frame ptr)	Old D1RtP (return ptr)
+	A0FrP-0x08	32bit argument #8	32bit argument #7
+	A0FrP-0x10	32bit argument #10	32bit argument #9
+
+Function epilogues tend to differ depending on the use of a frame pointer. An
+example of a frame pointer epilogue:
+
+	/* Restore D0FrT, D1RtP, D{0-1}.{5-7} from stack, incrementing A0FrP */
+	MGETL	D0FrT,D0.5,D0.6,D0.7,[A0FrP++]
+	/* Restore stack pointer to where frame pointer was before increment */
+	SUB	A0StP,A0FrP,#0x20
+	/* Restore frame pointer from frame temp */
+	MOV	A0FrP,D0FrT
+	/* Return to caller via restored return pointer */
+	MOV	PC,D1RtP
+
+If the function hasn't touched the frame pointer, MGETL cannot be safely used
+with A0StP as it always increments and that would expose the stack to clobbering
+by interrupts (kernel) or signals (user). Therefore it's common to see the MGETL
+split into separate GETL instructions:
+
+	/* Restore D0FrT, D1RtP, D{0-1}.{5-7} from stack */
+	GETL	D0FrT,D1RtP,[A0StP+#-0x30]
+	GETL	D0.5,D1.5,[A0StP+#-0x28]
+	GETL	D0.6,D1.6,[A0StP+#-0x20]
+	GETL	D0.7,D1.7,[A0StP+#-0x18]
+	/* Restore stack pointer */
+	SUB	A0StP,A0StP,#0x30
+	/* Return to caller via restored return pointer */
+	MOV	PC,D1RtP

+ 9 - 0
Documentation/scsi/ChangeLog.megaraid_sas

@@ -1,3 +1,12 @@
+Release Date    : Sat. Feb 9, 2013 17:00:00 PST 2013 -
+			(emaild-id:megaraidlinux@lsi.com)
+			Adam Radford
+Current Version : 06.506.00.00-rc1
+Old Version     : 06.504.01.00-rc1
+    1. Add 4k FastPath DIF support.
+    2. Dont load DevHandle unless FastPath enabled.
+    3. Version and Changelog update.
+-------------------------------------------------------------------------------
 Release Date    : Mon. Oct 1, 2012 17:00:00 PST 2012 -
 			(emaild-id:megaraidlinux@lsi.com)
 			Adam Radford

+ 53 - 0
Documentation/thermal/exynos_thermal_emulation

@@ -0,0 +1,53 @@
+EXYNOS EMULATION MODE
+========================
+
+Copyright (C) 2012 Samsung Electronics
+
+Written by Jonghwa Lee <jonghwa3.lee@samsung.com>
+
+Description
+-----------
+
+Exynos 4x12 (4212, 4412) and 5 series provide emulation mode for thermal management unit.
+Thermal emulation mode supports software debug for TMU's operation. User can set temperature
+manually with software code and TMU will read current temperature from user value not from
+sensor's value.
+
+Enabling CONFIG_EXYNOS_THERMAL_EMUL option will make this support in available.
+When it's enabled, sysfs node will be created under
+/sys/bus/platform/devices/'exynos device name'/ with name of 'emulation'.
+
+The sysfs node, 'emulation', will contain value 0 for the initial state. When you input any
+temperature you want to update to sysfs node, it automatically enable emulation mode and
+current temperature will be changed into it.
+(Exynos also supports user changable delay time which would be used to delay of
+ changing temperature. However, this node only uses same delay of real sensing time, 938us.)
+
+Exynos emulation mode requires synchronous of value changing and enabling. It means when you
+want to update the any value of delay or next temperature, then you have to enable emulation
+mode at the same time. (Or you have to keep the mode enabling.) If you don't, it fails to
+change the value to updated one and just use last succeessful value repeatedly. That's why
+this node gives users the right to change termerpature only. Just one interface makes it more
+simply to use.
+
+Disabling emulation mode only requires writing value 0 to sysfs node.
+
+
+TEMP	120 |
+	    |
+	100 |
+	    |
+	 80 |
+	    |		     	 	 +-----------
+	 60 |      		     	 |	    |
+	    |	           +-------------|          |
+	 40 |              |         	 |          |
+	    |		   |	     	 |          |
+	 20 |		   |	     	 |          +----------
+	    |	 	   |	     	 |          |          |
+	  0 |______________|_____________|__________|__________|_________
+		   A	    	 A	    A	   	       A     TIME
+		   |<----->|	 |<----->|  |<----->|	       |
+		   | 938us |  	 |	 |  |       |          |
+emulation    :  0  50	   |  	 70      |  20      |          0
+current temp :   sensor   50		 70         20	      sensor

+ 307 - 0
Documentation/thermal/intel_powerclamp.txt

@@ -0,0 +1,307 @@
+			 =======================
+			 INTEL POWERCLAMP DRIVER
+			 =======================
+By: Arjan van de Ven <arjan@linux.intel.com>
+    Jacob Pan <jacob.jun.pan@linux.intel.com>
+
+Contents:
+	(*) Introduction
+	    - Goals and Objectives
+
+	(*) Theory of Operation
+	    - Idle Injection
+	    - Calibration
+
+	(*) Performance Analysis
+	    - Effectiveness and Limitations
+	    - Power vs Performance
+	    - Scalability
+	    - Calibration
+	    - Comparison with Alternative Techniques
+
+	(*) Usage and Interfaces
+	    - Generic Thermal Layer (sysfs)
+	    - Kernel APIs (TBD)
+
+============
+INTRODUCTION
+============
+
+Consider the situation where a system’s power consumption must be
+reduced at runtime, due to power budget, thermal constraint, or noise
+level, and where active cooling is not preferred. Software managed
+passive power reduction must be performed to prevent the hardware
+actions that are designed for catastrophic scenarios.
+
+Currently, P-states, T-states (clock modulation), and CPU offlining
+are used for CPU throttling.
+
+On Intel CPUs, C-states provide effective power reduction, but so far
+they’re only used opportunistically, based on workload. With the
+development of intel_powerclamp driver, the method of synchronizing
+idle injection across all online CPU threads was introduced. The goal
+is to achieve forced and controllable C-state residency.
+
+Test/Analysis has been made in the areas of power, performance,
+scalability, and user experience. In many cases, clear advantage is
+shown over taking the CPU offline or modulating the CPU clock.
+
+
+===================
+THEORY OF OPERATION
+===================
+
+Idle Injection
+--------------
+
+On modern Intel processors (Nehalem or later), package level C-state
+residency is available in MSRs, thus also available to the kernel.
+
+These MSRs are:
+      #define MSR_PKG_C2_RESIDENCY	0x60D
+      #define MSR_PKG_C3_RESIDENCY	0x3F8
+      #define MSR_PKG_C6_RESIDENCY	0x3F9
+      #define MSR_PKG_C7_RESIDENCY	0x3FA
+
+If the kernel can also inject idle time to the system, then a
+closed-loop control system can be established that manages package
+level C-state. The intel_powerclamp driver is conceived as such a
+control system, where the target set point is a user-selected idle
+ratio (based on power reduction), and the error is the difference
+between the actual package level C-state residency ratio and the target idle
+ratio.
+
+Injection is controlled by high priority kernel threads, spawned for
+each online CPU.
+
+These kernel threads, with SCHED_FIFO class, are created to perform
+clamping actions of controlled duty ratio and duration. Each per-CPU
+thread synchronizes its idle time and duration, based on the rounding
+of jiffies, so accumulated errors can be prevented to avoid a jittery
+effect. Threads are also bound to the CPU such that they cannot be
+migrated, unless the CPU is taken offline. In this case, threads
+belong to the offlined CPUs will be terminated immediately.
+
+Running as SCHED_FIFO and relatively high priority, also allows such
+scheme to work for both preemptable and non-preemptable kernels.
+Alignment of idle time around jiffies ensures scalability for HZ
+values. This effect can be better visualized using a Perf timechart.
+The following diagram shows the behavior of kernel thread
+kidle_inject/cpu. During idle injection, it runs monitor/mwait idle
+for a given "duration", then relinquishes the CPU to other tasks,
+until the next time interval.
+
+The NOHZ schedule tick is disabled during idle time, but interrupts
+are not masked. Tests show that the extra wakeups from scheduler tick
+have a dramatic impact on the effectiveness of the powerclamp driver
+on large scale systems (Westmere system with 80 processors).
+
+CPU0
+		  ____________          ____________
+kidle_inject/0   |   sleep    |  mwait |  sleep     |
+	_________|            |________|            |_______
+			       duration
+CPU1
+		  ____________          ____________
+kidle_inject/1   |   sleep    |  mwait |  sleep     |
+	_________|            |________|            |_______
+			      ^
+			      |
+			      |
+			      roundup(jiffies, interval)
+
+Only one CPU is allowed to collect statistics and update global
+control parameters. This CPU is referred to as the controlling CPU in
+this document. The controlling CPU is elected at runtime, with a
+policy that favors BSP, taking into account the possibility of a CPU
+hot-plug.
+
+In terms of dynamics of the idle control system, package level idle
+time is considered largely as a non-causal system where its behavior
+cannot be based on the past or current input. Therefore, the
+intel_powerclamp driver attempts to enforce the desired idle time
+instantly as given input (target idle ratio). After injection,
+powerclamp moniors the actual idle for a given time window and adjust
+the next injection accordingly to avoid over/under correction.
+
+When used in a causal control system, such as a temperature control,
+it is up to the user of this driver to implement algorithms where
+past samples and outputs are included in the feedback. For example, a
+PID-based thermal controller can use the powerclamp driver to
+maintain a desired target temperature, based on integral and
+derivative gains of the past samples.
+
+
+
+Calibration
+-----------
+During scalability testing, it is observed that synchronized actions
+among CPUs become challenging as the number of cores grows. This is
+also true for the ability of a system to enter package level C-states.
+
+To make sure the intel_powerclamp driver scales well, online
+calibration is implemented. The goals for doing such a calibration
+are:
+
+a) determine the effective range of idle injection ratio
+b) determine the amount of compensation needed at each target ratio
+
+Compensation to each target ratio consists of two parts:
+
+        a) steady state error compensation
+	This is to offset the error occurring when the system can
+	enter idle without extra wakeups (such as external interrupts).
+
+	b) dynamic error compensation
+	When an excessive amount of wakeups occurs during idle, an
+	additional idle ratio can be added to quiet interrupts, by
+	slowing down CPU activities.
+
+A debugfs file is provided for the user to examine compensation
+progress and results, such as on a Westmere system.
+[jacob@nex01 ~]$ cat
+/sys/kernel/debug/intel_powerclamp/powerclamp_calib
+controlling cpu: 0
+pct confidence steady dynamic (compensation)
+0	0	0	0
+1	1	0	0
+2	1	1	0
+3	3	1	0
+4	3	1	0
+5	3	1	0
+6	3	1	0
+7	3	1	0
+8	3	1	0
+...
+30	3	2	0
+31	3	2	0
+32	3	1	0
+33	3	2	0
+34	3	1	0
+35	3	2	0
+36	3	1	0
+37	3	2	0
+38	3	1	0
+39	3	2	0
+40	3	3	0
+41	3	1	0
+42	3	2	0
+43	3	1	0
+44	3	1	0
+45	3	2	0
+46	3	3	0
+47	3	0	0
+48	3	2	0
+49	3	3	0
+
+Calibration occurs during runtime. No offline method is available.
+Steady state compensation is used only when confidence levels of all
+adjacent ratios have reached satisfactory level. A confidence level
+is accumulated based on clean data collected at runtime. Data
+collected during a period without extra interrupts is considered
+clean.
+
+To compensate for excessive amounts of wakeup during idle, additional
+idle time is injected when such a condition is detected. Currently,
+we have a simple algorithm to double the injection ratio. A possible
+enhancement might be to throttle the offending IRQ, such as delaying
+EOI for level triggered interrupts. But it is a challenge to be
+non-intrusive to the scheduler or the IRQ core code.
+
+
+CPU Online/Offline
+------------------
+Per-CPU kernel threads are started/stopped upon receiving
+notifications of CPU hotplug activities. The intel_powerclamp driver
+keeps track of clamping kernel threads, even after they are migrated
+to other CPUs, after a CPU offline event.
+
+
+=====================
+Performance Analysis
+=====================
+This section describes the general performance data collected on
+multiple systems, including Westmere (80P) and Ivy Bridge (4P, 8P).
+
+Effectiveness and Limitations
+-----------------------------
+The maximum range that idle injection is allowed is capped at 50
+percent. As mentioned earlier, since interrupts are allowed during
+forced idle time, excessive interrupts could result in less
+effectiveness. The extreme case would be doing a ping -f to generated
+flooded network interrupts without much CPU acknowledgement. In this
+case, little can be done from the idle injection threads. In most
+normal cases, such as scp a large file, applications can be throttled
+by the powerclamp driver, since slowing down the CPU also slows down
+network protocol processing, which in turn reduces interrupts.
+
+When control parameters change at runtime by the controlling CPU, it
+may take an additional period for the rest of the CPUs to catch up
+with the changes. During this time, idle injection is out of sync,
+thus not able to enter package C- states at the expected ratio. But
+this effect is minor, in that in most cases change to the target
+ratio is updated much less frequently than the idle injection
+frequency.
+
+Scalability
+-----------
+Tests also show a minor, but measurable, difference between the 4P/8P
+Ivy Bridge system and the 80P Westmere server under 50% idle ratio.
+More compensation is needed on Westmere for the same amount of
+target idle ratio. The compensation also increases as the idle ratio
+gets larger. The above reason constitutes the need for the
+calibration code.
+
+On the IVB 8P system, compared to an offline CPU, powerclamp can
+achieve up to 40% better performance per watt. (measured by a spin
+counter summed over per CPU counting threads spawned for all running
+CPUs).
+
+====================
+Usage and Interfaces
+====================
+The powerclamp driver is registered to the generic thermal layer as a
+cooling device. Currently, it’s not bound to any thermal zones.
+
+jacob@chromoly:/sys/class/thermal/cooling_device14$ grep . *
+cur_state:0
+max_state:50
+type:intel_powerclamp
+
+Example usage:
+- To inject 25% idle time
+$ sudo sh -c "echo 25 > /sys/class/thermal/cooling_device80/cur_state
+"
+
+If the system is not busy and has more than 25% idle time already,
+then the powerclamp driver will not start idle injection. Using Top
+will not show idle injection kernel threads.
+
+If the system is busy (spin test below) and has less than 25% natural
+idle time, powerclamp kernel threads will do idle injection, which
+appear running to the scheduler. But the overall system idle is still
+reflected. In this example, 24.1% idle is shown. This helps the
+system admin or user determine the cause of slowdown, when a
+powerclamp driver is in action.
+
+
+Tasks: 197 total,   1 running, 196 sleeping,   0 stopped,   0 zombie
+Cpu(s): 71.2%us,  4.7%sy,  0.0%ni, 24.1%id,  0.0%wa,  0.0%hi,  0.0%si,  0.0%st
+Mem:   3943228k total,  1689632k used,  2253596k free,    74960k buffers
+Swap:  4087804k total,        0k used,  4087804k free,   945336k cached
+
+  PID USER      PR  NI  VIRT  RES  SHR S %CPU %MEM    TIME+  COMMAND
+ 3352 jacob     20   0  262m  644  428 S  286  0.0   0:17.16 spin
+ 3341 root     -51   0     0    0    0 D   25  0.0   0:01.62 kidle_inject/0
+ 3344 root     -51   0     0    0    0 D   25  0.0   0:01.60 kidle_inject/3
+ 3342 root     -51   0     0    0    0 D   25  0.0   0:01.61 kidle_inject/1
+ 3343 root     -51   0     0    0    0 D   25  0.0   0:01.60 kidle_inject/2
+ 2935 jacob     20   0  696m 125m  35m S    5  3.3   0:31.11 firefox
+ 1546 root      20   0  158m  20m 6640 S    3  0.5   0:26.97 Xorg
+ 2100 jacob     20   0 1223m  88m  30m S    3  2.3   0:23.68 compiz
+
+Tests have shown that by using the powerclamp driver as a cooling
+device, a PID based userspace thermal controller can manage to
+control CPU temperature effectively, when no other thermal influence
+is added. For example, a UltraBook user can compile the kernel under
+certain temperature (below most active trip points).

+ 16 - 2
Documentation/thermal/sysfs-api.txt

@@ -55,6 +55,8 @@ temperature) and throttle appropriate devices.
 	.get_trip_type: get the type of certain trip point.
 	.get_trip_temp: get the temperature above which the certain trip point
 			will be fired.
+	.set_emul_temp: set the emulation temperature which helps in debugging
+			different threshold temperature points.
 
 1.1.2 void thermal_zone_device_unregister(struct thermal_zone_device *tz)
 
@@ -153,6 +155,7 @@ Thermal zone device sys I/F, created once it's registered:
     |---trip_point_[0-*]_temp:	Trip point temperature
     |---trip_point_[0-*]_type:	Trip point type
     |---trip_point_[0-*]_hyst:	Hysteresis value for this trip point
+    |---emul_temp:		Emulated temperature set node
 
 Thermal cooling device sys I/F, created once it's registered:
 /sys/class/thermal/cooling_device[0-*]:
@@ -252,6 +255,16 @@ passive
 	Valid values: 0 (disabled) or greater than 1000
 	RW, Optional
 
+emul_temp
+	Interface to set the emulated temperature method in thermal zone
+	(sensor). After setting this temperature, the thermal zone may pass
+	this temperature to platform emulation function if registered or
+	cache it locally. This is useful in debugging different temperature
+	threshold and its associated cooling action. This is write only node
+	and writing 0 on this node should disable emulation.
+	Unit: millidegree Celsius
+	WO, Optional
+
 *****************************
 * Cooling device attributes *
 *****************************
@@ -329,8 +342,9 @@ The framework includes a simple notification mechanism, in the form of a
 netlink event. Netlink socket initialization is done during the _init_
 of the framework. Drivers which intend to use the notification mechanism
 just need to call thermal_generate_netlink_event() with two arguments viz
-(originator, event). Typically the originator will be an integer assigned
-to a thermal_zone_device when it registers itself with the framework. The
+(originator, event). The originator is a pointer to struct thermal_zone_device
+from where the event has been originated. An integer which represents the
+thermal zone device will be used in the message to identify the zone. The
 event will be one of:{THERMAL_AUX0, THERMAL_AUX1, THERMAL_CRITICAL,
 THERMAL_DEV_FAULT}. Notification can be sent when the current temperature
 crosses any of the configured thresholds.

+ 13 - 1
Documentation/watchdog/watchdog-kernel-api.txt

@@ -1,6 +1,6 @@
 The Linux WatchDog Timer Driver Core kernel API.
 ===============================================
-Last reviewed: 22-May-2012
+Last reviewed: 12-Feb-2013
 
 Wim Van Sebroeck <wim@iguana.be>
 
@@ -212,3 +212,15 @@ driver specific data to and a pointer to the data itself.
 The watchdog_get_drvdata function allows you to retrieve driver specific data.
 The argument of this function is the watchdog device where you want to retrieve
 data from. The function returns the pointer to the driver specific data.
+
+To initialize the timeout field, the following function can be used:
+
+extern int watchdog_init_timeout(struct watchdog_device *wdd,
+                                  unsigned int timeout_parm, struct device *dev);
+
+The watchdog_init_timeout function allows you to initialize the timeout field
+using the module timeout parameter or by retrieving the timeout-sec property from
+the device tree (if the module timeout parameter is invalid). Best practice is
+to set the default timeout value as timeout value in the watchdog_device and
+then use this function to set the user "preferred" timeout value.
+This routine returns zero on success and a negative errno code for failure.

+ 43 - 10
MAINTAINERS

@@ -97,12 +97,13 @@ Descriptions of section entries:
 	   X:	net/ipv6/
 	   matches all files in and below net excluding net/ipv6/
 	K: Keyword perl extended regex pattern to match content in a
-	   patch or file.  For instance:
+	   patch or file, or an affected filename.  For instance:
 	   K: of_get_profile
-	      matches patches or files that contain "of_get_profile"
+	      matches patch or file content, or filenames, that contain
+	      "of_get_profile"
 	   K: \b(printk|pr_(info|err))\b
-	      matches patches or files that contain one or more of the words
-	      printk, pr_info or pr_err
+	      matches patch or file content, or filenames, that contain one or
+	      more of the words printk, pr_info or pr_err
 	   One regex pattern per line.  Multiple K: lines acceptable.
 
 Note: For the hard of thinking, this list is meant to remain in alphabetical
@@ -1799,7 +1800,8 @@ F:	drivers/bcma/
 F:	include/linux/bcma/
 
 BROCADE BFA FC SCSI DRIVER
-M:	Krishna C Gudipati <kgudipat@brocade.com>
+M:	Anil Gurumurthy <agurumur@brocade.com>
+M:	Vijaya Mohan Guvva <vmohan@brocade.com>
 L:	linux-scsi@vger.kernel.org
 S:	Supported
 F:	drivers/scsi/bfa/
@@ -2073,8 +2075,8 @@ S:	Maintained
 F:	include/linux/clk.h
 
 CISCO FCOE HBA DRIVER
-M:	Abhijeet Joglekar <abjoglek@cisco.com>
-M:	Venkata Siva Vijayendra Bhamidipati <vbhamidi@cisco.com>
+M:	Hiral Patel <hiralpat@cisco.com>
+M:	Suma Ramars <sramars@cisco.com>
 M:	Brian Uchino <buchino@cisco.com>
 L:	linux-scsi@vger.kernel.org
 S:	Supported
@@ -2902,6 +2904,13 @@ W:	bluesmoke.sourceforge.net
 S:	Maintained
 F:	drivers/edac/e7xxx_edac.c
 
+EDAC-GHES
+M:	Mauro Carvalho Chehab <mchehab@redhat.com>
+L:	linux-edac@vger.kernel.org
+W:	bluesmoke.sourceforge.net
+S:	Maintained
+F:	drivers/edac/ghes-edac.c
+
 EDAC-I82443BXGX
 M:	Tim Small <tim@buttersideup.com>
 L:	linux-edac@vger.kernel.org
@@ -5195,6 +5204,18 @@ F:	drivers/mtd/
 F:	include/linux/mtd/
 F:	include/uapi/mtd/
 
+METAG ARCHITECTURE
+M:	James Hogan <james.hogan@imgtec.com>
+S:	Supported
+F:	arch/metag/
+F:	Documentation/metag/
+F:	Documentation/devicetree/bindings/metag/
+F:	drivers/clocksource/metag_generic.c
+F:	drivers/irqchip/irq-metag.c
+F:	drivers/irqchip/irq-metag-ext.c
+F:	drivers/tty/metag_da.c
+F:	fs/imgdafs/
+
 MICROBLAZE ARCHITECTURE
 M:	Michal Simek <monstr@monstr.eu>
 L:	microblaze-uclinux@itee.uq.edu.au (moderated for non-subscribers)
@@ -5437,6 +5458,7 @@ F:	net/netrom/
 NETWORK BLOCK DEVICE (NBD)
 M:	Paul Clements <Paul.Clements@steeleye.com>
 S:	Maintained
+L:	nbd-general@lists.sourceforge.net
 F:	Documentation/blockdev/nbd.txt
 F:	drivers/block/nbd.c
 F:	include/linux/nbd.h
@@ -6512,6 +6534,12 @@ S:	Maintained
 F:	Documentation/blockdev/ramdisk.txt
 F:	drivers/block/brd.c
 
+RAMSAM DRIVER (IBM RamSan 70/80 PCI SSD Flash Card)
+M:	Joshua Morris <josh.h.morris@us.ibm.com>
+M:	Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+S:	Maintained
+F:	drivers/block/rsxx/
+
 RANDOM NUMBER DRIVER
 M:	Theodore Ts'o" <tytso@mit.edu>
 S:	Maintained
@@ -7539,6 +7567,7 @@ STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec)
 M:	Julian Andres Klode <jak@jak-linux.org>
 M:	Marc Dietrich <marvin24@gmx.de>
 L:	ac100@lists.launchpad.net (moderated for non-subscribers)
+L:	linux-tegra@vger.kernel.org
 S:	Maintained
 F:	drivers/staging/nvec/
 
@@ -7665,6 +7694,12 @@ F:	lib/swiotlb.c
 F:	arch/*/kernel/pci-swiotlb.c
 F:	include/linux/swiotlb.h
 
+SYNOPSYS ARC ARCHITECTURE
+M:	Vineet Gupta <vgupta@synopsys.com>
+L:	linux-snps-arc@vger.kernel.org
+S:	Supported
+F:	arch/arc/
+
 SYSV FILESYSTEM
 M:	Christoph Hellwig <hch@infradead.org>
 S:	Maintained
@@ -7831,9 +7866,7 @@ L:	linux-tegra@vger.kernel.org
 Q:	http://patchwork.ozlabs.org/project/linux-tegra/list/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git
 S:	Supported
-F:	arch/arm/mach-tegra
-F:	arch/arm/boot/dts/tegra*
-F:	arch/arm/configs/tegra_defconfig
+K:	(?i)[^a-z]tegra
 
 TEHUTI ETHERNET DRIVER
 M:	Andy Gospodarek <andy@greyhouse.net>

+ 5 - 5
Makefile

@@ -1,7 +1,7 @@
 VERSION = 3
-PATCHLEVEL = 8
+PATCHLEVEL = 9
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
 NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*
@@ -192,7 +192,6 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
 # "make" in the configured kernel build directory always uses that.
 # Default value for CROSS_COMPILE is not to prefix executables
 # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
-export KBUILD_BUILDHOST := $(SUBARCH)
 ARCH		?= $(SUBARCH)
 CROSS_COMPILE	?= $(CONFIG_CROSS_COMPILE:"%"=%)
 
@@ -620,7 +619,8 @@ KBUILD_AFLAGS	+= -gdwarf-2
 endif
 
 ifdef CONFIG_DEBUG_INFO_REDUCED
-KBUILD_CFLAGS 	+= $(call cc-option, -femit-struct-debug-baseonly)
+KBUILD_CFLAGS 	+= $(call cc-option, -femit-struct-debug-baseonly) \
+		   $(call cc-option,-fno-var-tracking)
 endif
 
 ifdef CONFIG_FUNCTION_TRACER
@@ -1398,7 +1398,7 @@ quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN   $(wildcard $(rm-files))
 # Run depmod only if we have System.map and depmod is executable
 quiet_cmd_depmod = DEPMOD  $(KERNELRELEASE)
       cmd_depmod = $(CONFIG_SHELL) $(srctree)/scripts/depmod.sh $(DEPMOD) \
-                   $(KERNELRELEASE)
+                   $(KERNELRELEASE) "$(patsubst "%",%,$(CONFIG_SYMBOL_PREFIX))"
 
 # Create temporary dir for module support files
 # clean it up only when building all modules

+ 23 - 0
arch/Kconfig

@@ -103,6 +103,22 @@ config UPROBES
 
 	  If in doubt, say "N".
 
+config HAVE_64BIT_ALIGNED_ACCESS
+	def_bool 64BIT && !HAVE_EFFICIENT_UNALIGNED_ACCESS
+	help
+	  Some architectures require 64 bit accesses to be 64 bit
+	  aligned, which also requires structs containing 64 bit values
+	  to be 64 bit aligned too. This includes some 32 bit
+	  architectures which can do 64 bit accesses, as well as 64 bit
+	  architectures without unaligned access.
+
+	  This symbol should be selected by an architecture if 64 bit
+	  accesses are required to be 64 bit aligned in this way even
+	  though it is not a 64 bit architecture.
+
+	  See Documentation/unaligned-memory-access.txt for more
+	  information on the topic of unaligned memory accesses.
+
 config HAVE_EFFICIENT_UNALIGNED_ACCESS
 	bool
 	help
@@ -303,6 +319,13 @@ config ARCH_WANT_OLD_COMPAT_IPC
 	select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
 	bool
 
+config HAVE_VIRT_TO_BUS
+	bool
+	help
+	  An architecture should select this if it implements the
+	  deprecated interface virt_to_bus().  All new architectures
+	  should probably not select this.
+
 config HAVE_ARCH_SECCOMP_FILTER
 	bool
 	help

+ 1 - 0
arch/alpha/Kconfig

@@ -9,6 +9,7 @@ config ALPHA
 	select HAVE_PERF_EVENTS
 	select HAVE_DMA_ATTRS
 	select HAVE_GENERIC_HARDIRQS
+	select HAVE_VIRT_TO_BUS
 	select GENERIC_IRQ_PROBE
 	select AUTO_IRQ_AFFINITY if SMP
 	select GENERIC_IRQ_SHOW

+ 2 - 0
arch/arc/Kbuild

@@ -0,0 +1,2 @@
+obj-y += kernel/
+obj-y += mm/

+ 453 - 0
arch/arc/Kconfig

@@ -0,0 +1,453 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+config ARC
+	def_bool y
+	select CLONE_BACKWARDS
+	# ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev
+	select DEVTMPFS if !INITRAMFS_SOURCE=""
+	select GENERIC_ATOMIC64
+	select GENERIC_CLOCKEVENTS
+	select GENERIC_FIND_FIRST_BIT
+	# for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP
+	select GENERIC_IRQ_SHOW
+	select GENERIC_KERNEL_EXECVE
+	select GENERIC_KERNEL_THREAD
+	select GENERIC_PENDING_IRQ if SMP
+	select GENERIC_SMP_IDLE_THREAD
+	select HAVE_ARCH_KGDB
+	select HAVE_ARCH_TRACEHOOK
+	select HAVE_GENERIC_HARDIRQS
+	select HAVE_IOREMAP_PROT
+	select HAVE_KPROBES
+	select HAVE_KRETPROBES
+	select HAVE_MEMBLOCK
+	select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND
+	select HAVE_OPROFILE
+	select HAVE_PERF_EVENTS
+	select IRQ_DOMAIN
+	select MODULES_USE_ELF_RELA
+	select NO_BOOTMEM
+	select OF
+	select OF_EARLY_FLATTREE
+	select PERF_USE_VMALLOC
+
+config SCHED_OMIT_FRAME_POINTER
+	def_bool y
+
+config GENERIC_CSUM
+	def_bool y
+
+config RWSEM_GENERIC_SPINLOCK
+	def_bool y
+
+config ARCH_FLATMEM_ENABLE
+	def_bool y
+
+config MMU
+	def_bool y
+
+config NO_IOPORT
+	def_bool y
+
+config GENERIC_CALIBRATE_DELAY
+	def_bool y
+
+config GENERIC_HWEIGHT
+	def_bool y
+
+config BINFMT_ELF
+	def_bool y
+
+config STACKTRACE_SUPPORT
+	def_bool y
+	select STACKTRACE
+
+config HAVE_LATENCYTOP_SUPPORT
+	def_bool y
+
+config NO_DMA
+	def_bool n
+
+source "init/Kconfig"
+source "kernel/Kconfig.freezer"
+
+menu "ARC Architecture Configuration"
+
+menu "ARC Platform/SoC/Board"
+
+source "arch/arc/plat-arcfpga/Kconfig"
+#New platform adds here
+
+endmenu
+
+menu "ARC CPU Configuration"
+
+choice
+	prompt "ARC Core"
+	default ARC_CPU_770
+
+config ARC_CPU_750D
+	bool "ARC750D"
+	help
+	  Support for ARC750 core
+
+config ARC_CPU_770
+	bool "ARC770"
+	select ARC_CPU_REL_4_10
+	help
+	  Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
+	  This core has a bunch of cool new features:
+	  -MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
+                   Shared Address Spaces (for sharing TLB entires in MMU)
+	  -Caches: New Prog Model, Region Flush
+	  -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
+
+endchoice
+
+config CPU_BIG_ENDIAN
+	bool "Enable Big Endian Mode"
+	default n
+	help
+	  Build kernel for Big Endian Mode of ARC CPU
+
+# If a platform can't work with 0x8000_0000 based dma_addr_t
+config ARC_PLAT_NEEDS_CPU_TO_DMA
+	bool
+
+config SMP
+	bool "Symmetric Multi-Processing (Incomplete)"
+	default n
+	select USE_GENERIC_SMP_HELPERS
+	help
+	  This enables support for systems with more than one CPU. If you have
+	  a system with only one CPU, like most personal computers, say N. If
+	  you have a system with more than one CPU, say Y.
+
+if SMP
+
+config ARC_HAS_COH_CACHES
+	def_bool n
+
+config ARC_HAS_COH_LLSC
+	def_bool n
+
+config ARC_HAS_COH_RTSC
+	def_bool n
+
+config ARC_HAS_REENTRANT_IRQ_LV2
+	def_bool n
+
+endif
+
+config NR_CPUS
+	int "Maximum number of CPUs (2-32)"
+	range 2 32
+	depends on SMP
+	default "2"
+
+menuconfig ARC_CACHE
+	bool "Enable Cache Support"
+	default y
+	# if SMP, cache enabled ONLY if ARC implementation has cache coherency
+	depends on !SMP || ARC_HAS_COH_CACHES
+
+if ARC_CACHE
+
+config ARC_CACHE_LINE_SHIFT
+	int "Cache Line Length (as power of 2)"
+	range 5 7
+	default "6"
+	help
+	  Starting with ARC700 4.9, Cache line length is configurable,
+	  This option specifies "N", with Line-len = 2 power N
+	  So line lengths of 32, 64, 128 are specified by 5,6,7, respectively
+	  Linux only supports same line lengths for I and D caches.
+
+config ARC_HAS_ICACHE
+	bool "Use Instruction Cache"
+	default y
+
+config ARC_HAS_DCACHE
+	bool "Use Data Cache"
+	default y
+
+config ARC_CACHE_PAGES
+	bool "Per Page Cache Control"
+	default y
+	depends on ARC_HAS_ICACHE || ARC_HAS_DCACHE
+	help
+	  This can be used to over-ride the global I/D Cache Enable on a
+	  per-page basis (but only for pages accessed via MMU such as
+	  Kernel Virtual address or User Virtual Address)
+	  TLB entries have a per-page Cache Enable Bit.
+	  Note that Global I/D ENABLE + Per Page DISABLE works but corollary
+	  Global DISABLE + Per Page ENABLE won't work
+
+endif	#ARC_CACHE
+
+config ARC_HAS_ICCM
+	bool "Use ICCM"
+	help
+	  Single Cycle RAMS to store Fast Path Code
+	default n
+
+config ARC_ICCM_SZ
+	int "ICCM Size in KB"
+	default "64"
+	depends on ARC_HAS_ICCM
+
+config ARC_HAS_DCCM
+	bool "Use DCCM"
+	help
+	  Single Cycle RAMS to store Fast Path Data
+	default n
+
+config ARC_DCCM_SZ
+	int "DCCM Size in KB"
+	default "64"
+	depends on ARC_HAS_DCCM
+
+config ARC_DCCM_BASE
+	hex "DCCM map address"
+	default "0xA0000000"
+	depends on ARC_HAS_DCCM
+
+config ARC_HAS_HW_MPY
+	bool "Use Hardware Multiplier (Normal or Faster XMAC)"
+	default y
+	help
+	  Influences how gcc generates code for MPY operations.
+	  If enabled, MPYxx insns are generated, provided by Standard/XMAC
+	  Multipler. Otherwise software multipy lib is used
+
+choice
+	prompt "ARC700 MMU Version"
+	default ARC_MMU_V3 if ARC_CPU_770
+	default ARC_MMU_V2 if ARC_CPU_750D
+
+config ARC_MMU_V1
+	bool "MMU v1"
+	help
+	  Orig ARC700 MMU
+
+config ARC_MMU_V2
+	bool "MMU v2"
+	help
+	  Fixed the deficiency of v1 - possible thrashing in memcpy sceanrio
+	  when 2 D-TLB and 1 I-TLB entries index into same 2way set.
+
+config ARC_MMU_V3
+	bool "MMU v3"
+	depends on ARC_CPU_770
+	help
+	  Introduced with ARC700 4.10: New Features
+	  Variable Page size (1k-16k), var JTLB size 128 x (2 or 4)
+	  Shared Address Spaces (SASID)
+
+endchoice
+
+
+choice
+	prompt "MMU Page Size"
+	default ARC_PAGE_SIZE_8K
+
+config ARC_PAGE_SIZE_8K
+	bool "8KB"
+	help
+	  Choose between 8k vs 16k
+
+config ARC_PAGE_SIZE_16K
+	bool "16KB"
+	depends on ARC_MMU_V3
+
+config ARC_PAGE_SIZE_4K
+	bool "4KB"
+	depends on ARC_MMU_V3
+
+endchoice
+
+config ARC_COMPACT_IRQ_LEVELS
+	bool "ARCompact IRQ Priorities: High(2)/Low(1)"
+	default n
+	# Timer HAS to be high priority, for any other high priority config
+	select ARC_IRQ3_LV2
+	# if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
+	depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2
+
+if ARC_COMPACT_IRQ_LEVELS
+
+config ARC_IRQ3_LV2
+	bool
+
+config ARC_IRQ5_LV2
+	bool
+
+config ARC_IRQ6_LV2
+	bool
+
+endif
+
+config ARC_FPU_SAVE_RESTORE
+	bool "Enable FPU state persistence across context switch"
+	default n
+	help
+	  Double Precision Floating Point unit had dedictaed regs which
+	  need to be saved/restored across context-switch.
+	  Note that ARC FPU is overly simplistic, unlike say x86, which has
+	  hardware pieces to allow software to conditionally save/restore,
+	  based on actual usage of FPU by a task. Thus our implemn does
+	  this for all tasks in system.
+
+menuconfig ARC_CPU_REL_4_10
+	bool "Enable support for Rel 4.10 features"
+	default n
+	help
+	  -ARC770 (and dependent features) enabled
+	  -ARC750 also shares some of the new features with 770
+
+config ARC_HAS_LLSC
+	bool "Insn: LLOCK/SCOND (efficient atomic ops)"
+	default y
+	depends on ARC_CPU_770
+	# if SMP, enable LLSC ONLY if ARC implementation has coherent atomics
+	depends on !SMP || ARC_HAS_COH_LLSC
+
+config ARC_HAS_SWAPE
+	bool "Insn: SWAPE (endian-swap)"
+	default y
+	depends on ARC_CPU_REL_4_10
+
+config ARC_HAS_RTSC
+	bool "Insn: RTSC (64-bit r/o cycle counter)"
+	default y
+	depends on ARC_CPU_REL_4_10
+	# if SMP, enable RTSC only if counter is coherent across cores
+	depends on !SMP || ARC_HAS_COH_RTSC
+
+endmenu   # "ARC CPU Configuration"
+
+config LINUX_LINK_BASE
+	hex "Linux Link Address"
+	default "0x80000000"
+	help
+	  ARC700 divides the 32 bit phy address space into two equal halves
+	  -Lower 2G (0 - 0x7FFF_FFFF ) is user virtual, translated by MMU
+	  -Upper 2G (0x8000_0000 onwards) is untranslated, for kernel
+	  Typically Linux kernel is linked at the start of untransalted addr,
+	  hence the default value of 0x8zs.
+	  However some customers have peripherals mapped at this addr, so
+	  Linux needs to be scooted a bit.
+	  If you don't know what the above means, leave this setting alone.
+
+config ARC_CURR_IN_REG
+	bool "Dedicate Register r25 for current_task pointer"
+	default y
+	help
+	  This reserved Register R25 to point to Current Task in
+	  kernel mode. This saves memory access for each such access
+
+
+config ARC_MISALIGN_ACCESS
+	bool "Emulate unaligned memory access (userspace only)"
+	default N
+	select SYSCTL_ARCH_UNALIGN_NO_WARN
+	select SYSCTL_ARCH_UNALIGN_ALLOW
+	help
+	  This enables misaligned 16 & 32 bit memory access from user space.
+	  Use ONLY-IF-ABS-NECESSARY as it will be very slow and also can hide
+	  potential bugs in code
+
+config ARC_STACK_NONEXEC
+	bool "Make stack non-executable"
+	default n
+	help
+	  To disable the execute permissions of stack/heap of processes
+	  which are enabled by default.
+
+config HZ
+	int "Timer Frequency"
+	default 100
+
+config ARC_METAWARE_HLINK
+	bool "Support for Metaware debugger assisted Host access"
+	default n
+	help
+	  This options allows a Linux userland apps to directly access
+	  host file system (open/creat/read/write etc) with help from
+	  Metaware Debugger. This can come in handy for Linux-host communication
+	  when there is no real usable peripheral such as EMAC.
+
+menuconfig ARC_DBG
+	bool "ARC debugging"
+	default y
+
+config ARC_DW2_UNWIND
+	bool "Enable DWARF specific kernel stack unwind"
+	depends on ARC_DBG
+	default y
+	select KALLSYMS
+	help
+	  Compiles the kernel with DWARF unwind information and can be used
+	  to get stack backtraces.
+
+	  If you say Y here the resulting kernel image will be slightly larger
+	  but not slower, and it will give very useful debugging information.
+	  If you don't debug the kernel, you can say N, but we may not be able
+	  to solve problems without frame unwind information
+
+config ARC_DBG_TLB_PARANOIA
+	bool "Paranoia Checks in Low Level TLB Handlers"
+	depends on ARC_DBG
+	default n
+
+config ARC_DBG_TLB_MISS_COUNT
+	bool "Profile TLB Misses"
+	default n
+	select DEBUG_FS
+	depends on ARC_DBG
+	help
+	  Counts number of I and D TLB Misses and exports them via Debugfs
+	  The counters can be cleared via Debugfs as well
+
+config CMDLINE
+	string "Kernel command line to built-in"
+	default "print-fatal-signals=1"
+	help
+	  The default command line which will be appended to the optional
+	  u-boot provided command line (see below)
+
+config CMDLINE_UBOOT
+	bool "Support U-boot kernel command line passing"
+	default n
+	help
+	  If you are using U-boot (www.denx.de) and wish to pass the kernel
+	  command line from the U-boot environment to the Linux kernel then
+	  switch this option on.
+	  ARC U-boot will setup the cmdline in RAM/flash and set r2 to point
+	  to it. kernel startup code will copy the string into cmdline buffer
+	  and also append CONFIG_CMDLINE.
+
+config ARC_BUILTIN_DTB_NAME
+	string "Built in DTB"
+	help
+	  Set the name of the DTB to embed in the vmlinux binary
+	  Leaving it blank selects the minimal "skeleton" dtb
+
+source "kernel/Kconfig.preempt"
+
+endmenu	 # "ARC Architecture Configuration"
+
+source "mm/Kconfig"
+source "net/Kconfig"
+source "drivers/Kconfig"
+source "fs/Kconfig"
+source "arch/arc/Kconfig.debug"
+source "security/Kconfig"
+source "crypto/Kconfig"
+source "lib/Kconfig"

+ 34 - 0
arch/arc/Kconfig.debug

@@ -0,0 +1,34 @@
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config EARLY_PRINTK
+	bool "Early printk" if EMBEDDED
+	default y
+	help
+	  Write kernel log output directly into the VGA buffer or to a serial
+	  port.
+
+	  This is useful for kernel debugging when your machine crashes very
+	  early before the console code is initialized. For normal operation
+	  it is not recommended because it looks ugly and doesn't cooperate
+	  with klogd/syslogd or the X server. You should normally N here,
+	  unless you want to debug such a crash.
+
+config DEBUG_STACKOVERFLOW
+	bool "Check for stack overflows"
+	depends on DEBUG_KERNEL
+	help
+	  This option will cause messages to be printed if free stack space
+	  drops below a certain limit.
+
+config 16KSTACKS
+	bool "Use 16Kb for kernel stacks instead of 8Kb"
+	help
+	  If you say Y here the kernel will use a  16Kb stacksize for the
+	  kernel stack attached to each process/thread. The default is 8K.
+	  This increases the resident kernel footprint and will cause less
+	  threads to run on the system and also increase the pressure
+	  on the VM subsystem for higher order allocations.
+
+endmenu

+ 126 - 0
arch/arc/Makefile

@@ -0,0 +1,126 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+UTS_MACHINE := arc
+
+KBUILD_DEFCONFIG := fpga_defconfig
+
+cflags-y	+= -mA7 -fno-common -pipe -fno-builtin -D__linux__
+
+LINUXINCLUDE	+=  -include ${src}/arch/arc/include/asm/defines.h
+
+ifdef CONFIG_ARC_CURR_IN_REG
+# For a global register defintion, make sure it gets passed to every file
+# We had a customer reported bug where some code built in kernel was NOT using
+# any kernel headers, and missing the r25 global register
+# Can't do unconditionally (like above) because of recursive include issues
+# due to <linux/thread_info.h>
+LINUXINCLUDE	+=  -include ${src}/arch/arc/include/asm/current.h
+endif
+
+atleast_gcc44 :=  $(call cc-ifversion, -gt, 0402, y)
+cflags-$(atleast_gcc44)			+= -fsection-anchors
+
+cflags-$(CONFIG_ARC_HAS_LLSC)		+= -mlock
+cflags-$(CONFIG_ARC_HAS_SWAPE)		+= -mswape
+cflags-$(CONFIG_ARC_HAS_RTSC)		+= -mrtsc
+cflags-$(CONFIG_ARC_DW2_UNWIND)		+= -fasynchronous-unwind-tables
+
+ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
+# Generic build system uses -O2, we want -O3
+cflags-y  += -O3
+endif
+
+# small data is default for elf32 tool-chain. If not usable, disable it
+# This also allows repurposing GP as scratch reg to gcc reg allocator
+disable_small_data := y
+cflags-$(disable_small_data)		+= -mno-sdata -fcall-used-gp
+
+cflags-$(CONFIG_CPU_BIG_ENDIAN)		+= -mbig-endian
+ldflags-$(CONFIG_CPU_BIG_ENDIAN)	+= -EB
+
+# STAR 9000518362:
+# arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept
+# --build-id w/o "-marclinux".
+# Default arc-elf32-ld is OK
+ldflags-y				+= -marclinux
+
+ARC_LIBGCC				:= -mA7
+cflags-$(CONFIG_ARC_HAS_HW_MPY)		+= -multcost=16
+
+ifndef CONFIG_ARC_HAS_HW_MPY
+	cflags-y	+= -mno-mpy
+
+# newlib for ARC700 assumes MPY to be always present, which is generally true
+# However, if someone really doesn't want MPY, we need to use the 600 ver
+# which coupled with -mno-mpy will use mpy emulation
+# With gcc 4.4.7, -mno-mpy is enough to make any other related adjustments,
+# e.g. increased cost of MPY. With gcc 4.2.1 this had to be explicitly hinted
+
+	ARC_LIBGCC		:= -marc600
+	ifneq ($(atleast_gcc44),y)
+		cflags-y	+= -multcost=30
+	endif
+endif
+
+LIBGCC	:= $(shell $(CC) $(ARC_LIBGCC) $(cflags-y) --print-libgcc-file-name)
+
+# Modules with short calls might break for calls into builtin-kernel
+KBUILD_CFLAGS_MODULE	+= -mlong-calls
+
+# Finally dump eveything into kernel build system
+KBUILD_CFLAGS	+= $(cflags-y)
+KBUILD_AFLAGS	+= $(KBUILD_CFLAGS)
+LDFLAGS		+= $(ldflags-y)
+
+head-y		:= arch/arc/kernel/head.o
+
+# See arch/arc/Kbuild for content of core part of the kernel
+core-y		+= arch/arc/
+
+# w/o this dtb won't embed into kernel binary
+core-y		+= arch/arc/boot/dts/
+
+core-$(CONFIG_ARC_PLAT_FPGA_LEGACY)	+= arch/arc/plat-arcfpga/
+
+drivers-$(CONFIG_OPROFILE)	+= arch/arc/oprofile/
+
+libs-y		+= arch/arc/lib/ $(LIBGCC)
+
+#default target for make without any arguements.
+KBUILD_IMAGE := bootpImage
+
+all:	$(KBUILD_IMAGE)
+boot	:= arch/arc/boot
+
+bootpImage: vmlinux
+
+uImage: vmlinux
+	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+%.dtb %.dtb.S %.dtb.o: scripts
+	$(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
+
+dtbs: scripts
+	$(Q)$(MAKE) $(build)=$(boot)/dts dtbs
+
+archclean:
+	$(Q)$(MAKE) $(clean)=$(boot)
+
+# Hacks to enable final link due to absence of link-time branch relexation
+# and gcc choosing optimal(shorter) branches at -O3
+#
+# vineetg Feb 2010: -mlong-calls switched off for overall kernel build
+# However lib/decompress_inflate.o (.init.text) calls
+# zlib_inflate_workspacesize (.text) causing relocation errors.
+# Thus forcing all exten calls in this file to be long calls
+export CFLAGS_decompress_inflate.o = -mmedium-calls
+export CFLAGS_initramfs.o = -mmedium-calls
+ifdef CONFIG_SMP
+export CFLAGS_core.o = -mmedium-calls
+endif

+ 26 - 0
arch/arc/boot/Makefile

@@ -0,0 +1,26 @@
+targets := vmlinux.bin vmlinux.bin.gz uImage
+
+# uImage build relies on mkimage being availble on your host for ARC target
+# You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
+# and make sure it's reacable from your PATH
+MKIMAGE := $(srctree)/scripts/mkuboot.sh
+
+OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S
+
+LINUX_START_TEXT = $$(readelf -h vmlinux | \
+			grep "Entry point address" | grep -o 0x.*)
+
+UIMAGE_LOADADDR    = $(CONFIG_LINUX_LINK_BASE)
+UIMAGE_ENTRYADDR   = $(LINUX_START_TEXT)
+UIMAGE_COMPRESSION = gzip
+
+$(obj)/vmlinux.bin: vmlinux FORCE
+	$(call if_changed,objcopy)
+
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
+	$(call if_changed,gzip)
+
+$(obj)/uImage: $(obj)/vmlinux.bin.gz FORCE
+	$(call if_changed,uimage)
+
+PHONY += FORCE

+ 13 - 0
arch/arc/boot/dts/Makefile

@@ -0,0 +1,13 @@
+# Built-in dtb
+builtindtb-y		:= angel4
+
+ifneq ($(CONFIG_ARC_BUILTIN_DTB_NAME),"")
+	builtindtb-y	:= $(patsubst "%",%,$(CONFIG_ARC_BUILTIN_DTB_NAME))
+endif
+
+obj-y   += $(builtindtb-y).dtb.o
+targets += $(builtindtb-y).dtb
+
+dtbs:  $(addprefix  $(obj)/, $(builtindtb-y).dtb)
+
+clean-files := *.dtb

+ 55 - 0
arch/arc/boot/dts/angel4.dts

@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
+
+/ {
+	compatible = "snps,arc-angel4";
+	clock-frequency = <80000000>;	/* 80 MHZ */
+	#address-cells = <1>;
+	#size-cells = <1>;
+	interrupt-parent = <&intc>;
+
+	chosen {
+		bootargs = "console=ttyARC0,115200n8";
+	};
+
+	aliases {
+		serial0 = &arcuart0;
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>;	/* 256M */
+	};
+
+	fpga {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		/* child and parent address space 1:1 mapped */
+		ranges;
+
+		intc: interrupt-controller {
+			compatible = "snps,arc700-intc";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+		};
+
+		arcuart0: serial@c0fc1000 {
+			compatible = "snps,arc-uart";
+			reg = <0xc0fc1000 0x100>;
+			interrupts = <5>;
+			clock-frequency = <80000000>;
+			current-speed = <115200>;
+			status = "okay";
+		};
+	};
+};

+ 10 - 0
arch/arc/boot/dts/skeleton.dts

@@ -0,0 +1,10 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"

+ 37 - 0
arch/arc/boot/dts/skeleton.dtsi

@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Skeleton device tree; the bare minimum needed to boot; just include and
+ * add a compatible value.
+ */
+
+/ {
+	compatible = "snps,arc";
+	clock-frequency = <80000000>;	/* 80 MHZ */
+	#address-cells = <1>;
+	#size-cells = <1>;
+	chosen { };
+	aliases { };
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "snps,arc770d";
+			reg = <0>;
+		};
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>;	/* 256M */
+	};
+};

+ 61 - 0
arch/arc/configs/fpga_defconfig

@@ -0,0 +1,61 @@
+CONFIG_CROSS_COMPILE="arc-elf32-"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_DEFAULT_HOSTNAME="ARCLinux"
+# CONFIG_SWAP is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../arc_initramfs"
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARC_PLAT_FPGA_LEGACY=y
+CONFIG_ARC_BOARD_ML509=y
+# CONFIG_ARC_HAS_RTSC is not set
+CONFIG_ARC_BUILTIN_DTB_NAME="angel4"
+# CONFIG_COMPACTION is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IPV6 is not set
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_BLK_DEV is not set
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_ARC=y
+CONFIG_SERIAL_ARC_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_XZ_DEC=y

+ 49 - 0
arch/arc/include/asm/Kbuild

@@ -0,0 +1,49 @@
+generic-y += auxvec.h
+generic-y += bugs.h
+generic-y += bitsperlong.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += fb.h
+generic-y += ftrace.h
+generic-y += hardirq.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kmap_types.h
+generic-y += kvm_para.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += siginfo.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += xor.h

+ 433 - 0
arch/arc/include/asm/arcregs.h

@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_ARCREGS_H
+#define _ASM_ARC_ARCREGS_H
+
+#ifdef __KERNEL__
+
+/* Build Configuration Registers */
+#define ARC_REG_DCCMBASE_BCR	0x61	/* DCCM Base Addr */
+#define ARC_REG_CRC_BCR		0x62
+#define ARC_REG_DVFB_BCR	0x64
+#define ARC_REG_EXTARITH_BCR	0x65
+#define ARC_REG_VECBASE_BCR	0x68
+#define ARC_REG_PERIBASE_BCR	0x69
+#define ARC_REG_FP_BCR		0x6B	/* Single-Precision FPU */
+#define ARC_REG_DPFP_BCR	0x6C	/* Dbl Precision FPU */
+#define ARC_REG_MMU_BCR		0x6f
+#define ARC_REG_DCCM_BCR	0x74	/* DCCM Present + SZ */
+#define ARC_REG_TIMERS_BCR	0x75
+#define ARC_REG_ICCM_BCR	0x78
+#define ARC_REG_XY_MEM_BCR	0x79
+#define ARC_REG_MAC_BCR		0x7a
+#define ARC_REG_MUL_BCR		0x7b
+#define ARC_REG_SWAP_BCR	0x7c
+#define ARC_REG_NORM_BCR	0x7d
+#define ARC_REG_MIXMAX_BCR	0x7e
+#define ARC_REG_BARREL_BCR	0x7f
+#define ARC_REG_D_UNCACH_BCR	0x6A
+
+/* status32 Bits Positions */
+#define STATUS_H_BIT		0	/* CPU Halted */
+#define STATUS_E1_BIT		1	/* Int 1 enable */
+#define STATUS_E2_BIT		2	/* Int 2 enable */
+#define STATUS_A1_BIT		3	/* Int 1 active */
+#define STATUS_A2_BIT		4	/* Int 2 active */
+#define STATUS_AE_BIT		5	/* Exception active */
+#define STATUS_DE_BIT		6	/* PC is in delay slot */
+#define STATUS_U_BIT		7	/* User/Kernel mode */
+#define STATUS_L_BIT		12	/* Loop inhibit */
+
+/* These masks correspond to the status word(STATUS_32) bits */
+#define STATUS_H_MASK		(1<<STATUS_H_BIT)
+#define STATUS_E1_MASK		(1<<STATUS_E1_BIT)
+#define STATUS_E2_MASK		(1<<STATUS_E2_BIT)
+#define STATUS_A1_MASK		(1<<STATUS_A1_BIT)
+#define STATUS_A2_MASK		(1<<STATUS_A2_BIT)
+#define STATUS_AE_MASK		(1<<STATUS_AE_BIT)
+#define STATUS_DE_MASK		(1<<STATUS_DE_BIT)
+#define STATUS_U_MASK		(1<<STATUS_U_BIT)
+#define STATUS_L_MASK		(1<<STATUS_L_BIT)
+
+/*
+ * ECR: Exception Cause Reg bits-n-pieces
+ * [23:16] = Exception Vector
+ * [15: 8] = Exception Cause Code
+ * [ 7: 0] = Exception Parameters (for certain types only)
+ */
+#define ECR_VEC_MASK			0xff0000
+#define ECR_CODE_MASK			0x00ff00
+#define ECR_PARAM_MASK			0x0000ff
+
+/* Exception Cause Vector Values */
+#define ECR_V_INSN_ERR			0x02
+#define ECR_V_MACH_CHK			0x20
+#define ECR_V_ITLB_MISS			0x21
+#define ECR_V_DTLB_MISS			0x22
+#define ECR_V_PROTV			0x23
+
+/* Protection Violation Exception Cause Code Values */
+#define ECR_C_PROTV_INST_FETCH		0x00
+#define ECR_C_PROTV_LOAD		0x01
+#define ECR_C_PROTV_STORE		0x02
+#define ECR_C_PROTV_XCHG		0x03
+#define ECR_C_PROTV_MISALIG_DATA	0x04
+
+/* DTLB Miss Exception Cause Code Values */
+#define ECR_C_BIT_DTLB_LD_MISS		8
+#define ECR_C_BIT_DTLB_ST_MISS		9
+
+
+/* Auxiliary registers */
+#define AUX_IDENTITY		4
+#define AUX_INTR_VEC_BASE	0x25
+#define AUX_IRQ_LEV		0x200	/* IRQ Priority: L1 or L2 */
+#define AUX_IRQ_HINT		0x201	/* For generating Soft Interrupts */
+#define AUX_IRQ_LV12		0x43	/* interrupt level register */
+
+#define AUX_IENABLE		0x40c
+#define AUX_ITRIGGER		0x40d
+#define AUX_IPULSE		0x415
+
+/* Timer related Aux registers */
+#define ARC_REG_TIMER0_LIMIT	0x23	/* timer 0 limit */
+#define ARC_REG_TIMER0_CTRL	0x22	/* timer 0 control */
+#define ARC_REG_TIMER0_CNT	0x21	/* timer 0 count */
+#define ARC_REG_TIMER1_LIMIT	0x102	/* timer 1 limit */
+#define ARC_REG_TIMER1_CTRL	0x101	/* timer 1 control */
+#define ARC_REG_TIMER1_CNT	0x100	/* timer 1 count */
+
+#define TIMER_CTRL_IE		(1 << 0) /* Interupt when Count reachs limit */
+#define TIMER_CTRL_NH		(1 << 1) /* Count only when CPU NOT halted */
+
+/* MMU Management regs */
+#define ARC_REG_TLBPD0		0x405
+#define ARC_REG_TLBPD1		0x406
+#define ARC_REG_TLBINDEX	0x407
+#define ARC_REG_TLBCOMMAND	0x408
+#define ARC_REG_PID		0x409
+#define ARC_REG_SCRATCH_DATA0	0x418
+
+/* Bits in MMU PID register */
+#define MMU_ENABLE		(1 << 31)	/* Enable MMU for process */
+
+/* Error code if probe fails */
+#define TLB_LKUP_ERR		0x80000000
+
+/* TLB Commands */
+#define TLBWrite    0x1
+#define TLBRead     0x2
+#define TLBGetIndex 0x3
+#define TLBProbe    0x4
+
+#if (CONFIG_ARC_MMU_VER >= 2)
+#define TLBWriteNI  0x5		/* write JTLB without inv uTLBs */
+#define TLBIVUTLB   0x6		/* explicitly inv uTLBs */
+#else
+#undef TLBWriteNI		/* These cmds don't exist on older MMU */
+#undef TLBIVUTLB
+#endif
+
+/* Instruction cache related Auxiliary registers */
+#define ARC_REG_IC_BCR		0x77	/* Build Config reg */
+#define ARC_REG_IC_IVIC		0x10
+#define ARC_REG_IC_CTRL		0x11
+#define ARC_REG_IC_IVIL		0x19
+#if (CONFIG_ARC_MMU_VER > 2)
+#define ARC_REG_IC_PTAG		0x1E
+#endif
+
+/* Bit val in IC_CTRL */
+#define IC_CTRL_CACHE_DISABLE   0x1
+
+/* Data cache related Auxiliary registers */
+#define ARC_REG_DC_BCR		0x72
+#define ARC_REG_DC_IVDC		0x47
+#define ARC_REG_DC_CTRL		0x48
+#define ARC_REG_DC_IVDL		0x4A
+#define ARC_REG_DC_FLSH		0x4B
+#define ARC_REG_DC_FLDL		0x4C
+#if (CONFIG_ARC_MMU_VER > 2)
+#define ARC_REG_DC_PTAG		0x5C
+#endif
+
+/* Bit val in DC_CTRL */
+#define DC_CTRL_INV_MODE_FLUSH  0x40
+#define DC_CTRL_FLUSH_STATUS    0x100
+
+/* MMU Management regs */
+#define ARC_REG_PID		0x409
+#define ARC_REG_SCRATCH_DATA0	0x418
+
+/* Bits in MMU PID register */
+#define MMU_ENABLE		(1 << 31)	/* Enable MMU for process */
+
+/*
+ * Floating Pt Registers
+ * Status regs are read-only (build-time) so need not be saved/restored
+ */
+#define ARC_AUX_FP_STAT         0x300
+#define ARC_AUX_DPFP_1L         0x301
+#define ARC_AUX_DPFP_1H         0x302
+#define ARC_AUX_DPFP_2L         0x303
+#define ARC_AUX_DPFP_2H         0x304
+#define ARC_AUX_DPFP_STAT       0x305
+
+#ifndef __ASSEMBLY__
+
+/*
+ ******************************************************************
+ *      Inline ASM macros to read/write AUX Regs
+ *      Essentially invocation of lr/sr insns from "C"
+ */
+
+#if 1
+
+#define read_aux_reg(reg)	__builtin_arc_lr(reg)
+
+/* gcc builtin sr needs reg param to be long immediate */
+#define write_aux_reg(reg_immed, val)		\
+		__builtin_arc_sr((unsigned int)val, reg_immed)
+
+#else
+
+#define read_aux_reg(reg)		\
+({					\
+	unsigned int __ret;		\
+	__asm__ __volatile__(		\
+	"	lr    %0, [%1]"		\
+	: "=r"(__ret)			\
+	: "i"(reg));			\
+	__ret;				\
+})
+
+/*
+ * Aux Reg address is specified as long immediate by caller
+ * e.g.
+ *    write_aux_reg(0x69, some_val);
+ * This generates tightest code.
+ */
+#define write_aux_reg(reg_imm, val)	\
+({					\
+	__asm__ __volatile__(		\
+	"	sr   %0, [%1]	\n"	\
+	:				\
+	: "ir"(val), "i"(reg_imm));	\
+})
+
+/*
+ * Aux Reg address is specified in a variable
+ *  * e.g.
+ *      reg_num = 0x69
+ *      write_aux_reg2(reg_num, some_val);
+ * This has to generate glue code to load the reg num from
+ *  memory to a reg hence not recommended.
+ */
+#define write_aux_reg2(reg_in_var, val)		\
+({						\
+	unsigned int tmp;			\
+						\
+	__asm__ __volatile__(			\
+	"	ld   %0, [%2]	\n\t"		\
+	"	sr   %1, [%0]	\n\t"		\
+	: "=&r"(tmp)				\
+	: "r"(val), "memory"(&reg_in_var));	\
+})
+
+#endif
+
+#define READ_BCR(reg, into)				\
+{							\
+	unsigned int tmp;				\
+	tmp = read_aux_reg(reg);			\
+	if (sizeof(tmp) == sizeof(into)) {		\
+		into = *((typeof(into) *)&tmp);		\
+	} else {					\
+		extern void bogus_undefined(void);	\
+		bogus_undefined();			\
+	}						\
+}
+
+#define WRITE_BCR(reg, into)				\
+{							\
+	unsigned int tmp;				\
+	if (sizeof(tmp) == sizeof(into)) {		\
+		tmp = (*(unsigned int *)(into));	\
+		write_aux_reg(reg, tmp);		\
+	} else  {					\
+		extern void bogus_undefined(void);	\
+		bogus_undefined();			\
+	}						\
+}
+
+/* Helpers */
+#define TO_KB(bytes)		((bytes) >> 10)
+#define TO_MB(bytes)		(TO_KB(bytes) >> 10)
+#define PAGES_TO_KB(n_pages)	((n_pages) << (PAGE_SHIFT - 10))
+#define PAGES_TO_MB(n_pages)	(PAGES_TO_KB(n_pages) >> 10)
+
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+/* These DPFP regs need to be saved/restored across ctx-sw */
+struct arc_fpu {
+	struct {
+		unsigned int l, h;
+	} aux_dpfp[2];
+};
+#endif
+
+/*
+ ***************************************************************
+ * Build Configuration Registers, with encoded hardware config
+ */
+struct bcr_identity {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int chip_id:16, cpu_id:8, family:8;
+#else
+	unsigned int family:8, cpu_id:8, chip_id:16;
+#endif
+};
+
+struct bcr_mmu_1_2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
+#else
+	unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
+#endif
+};
+
+struct bcr_mmu_3 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4,
+		     u_itlb:4, u_dtlb:4;
+#else
+	unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4,
+		     ways:4, ver:8;
+#endif
+};
+
+#define EXTN_SWAP_VALID     0x1
+#define EXTN_NORM_VALID     0x2
+#define EXTN_MINMAX_VALID   0x2
+#define EXTN_BARREL_VALID   0x2
+
+struct bcr_extn {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int pad:20, crc:1, ext_arith:2, mul:2, barrel:2, minmax:2,
+		     norm:2, swap:1;
+#else
+	unsigned int swap:1, norm:2, minmax:2, barrel:2, mul:2, ext_arith:2,
+		     crc:1, pad:20;
+#endif
+};
+
+/* DSP Options Ref Manual */
+struct bcr_extn_mac_mul {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int pad:16, type:8, ver:8;
+#else
+	unsigned int ver:8, type:8, pad:16;
+#endif
+};
+
+struct bcr_extn_xymem {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int ram_org:2, num_banks:4, bank_sz:4, ver:8;
+#else
+	unsigned int ver:8, bank_sz:4, num_banks:4, ram_org:2;
+#endif
+};
+
+struct bcr_cache {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
+#else
+	unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
+#endif
+};
+
+struct bcr_perip {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int start:8, pad2:8, sz:8, pad:8;
+#else
+	unsigned int pad:8, sz:8, pad2:8, start:8;
+#endif
+};
+struct bcr_iccm {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int base:16, pad:5, sz:3, ver:8;
+#else
+	unsigned int ver:8, sz:3, pad:5, base:16;
+#endif
+};
+
+/* DCCM Base Address Register: ARC_REG_DCCMBASE_BCR */
+struct bcr_dccm_base {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int addr:24, ver:8;
+#else
+	unsigned int ver:8, addr:24;
+#endif
+};
+
+/* DCCM RAM Configuration Register: ARC_REG_DCCM_BCR */
+struct bcr_dccm {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int res:21, sz:3, ver:8;
+#else
+	unsigned int ver:8, sz:3, res:21;
+#endif
+};
+
+/* Both SP and DP FPU BCRs have same format */
+struct bcr_fp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	unsigned int fast:1, ver:8;
+#else
+	unsigned int ver:8, fast:1;
+#endif
+};
+
+/*
+ *******************************************************************
+ * Generic structures to hold build configuration used at runtime
+ */
+
+struct cpuinfo_arc_mmu {
+	unsigned int ver, pg_sz, sets, ways, u_dtlb, u_itlb, num_tlb;
+};
+
+struct cpuinfo_arc_cache {
+	unsigned int has_aliasing, sz, line_len, assoc, ver;
+};
+
+struct cpuinfo_arc_ccm {
+	unsigned int base_addr, sz;
+};
+
+struct cpuinfo_arc {
+	struct cpuinfo_arc_cache icache, dcache;
+	struct cpuinfo_arc_mmu mmu;
+	struct bcr_identity core;
+	unsigned int timers;
+	unsigned int vec_base;
+	unsigned int uncached_base;
+	struct cpuinfo_arc_ccm iccm, dccm;
+	struct bcr_extn extn;
+	struct bcr_extn_xymem extn_xymem;
+	struct bcr_extn_mac_mul extn_mac_mul;
+	struct bcr_fp fp, dpfp;
+};
+
+extern struct cpuinfo_arc cpuinfo_arc700[];
+
+#endif /* __ASEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_ARC_ARCREGS_H */

+ 9 - 0
arch/arc/include/asm/asm-offsets.h

@@ -0,0 +1,9 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <generated/asm-offsets.h>

+ 232 - 0
arch/arc/include/asm/atomic.h

@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_ATOMIC_H
+#define _ASM_ARC_ATOMIC_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+#include <asm/smp.h>
+
+#define atomic_read(v)  ((v)->counter)
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+	unsigned int temp;
+
+	__asm__ __volatile__(
+	"1:	llock   %0, [%1]	\n"
+	"	add     %0, %0, %2	\n"
+	"	scond   %0, [%1]	\n"
+	"	bnz     1b		\n"
+	: "=&r"(temp)	/* Early clobber, to prevent reg reuse */
+	: "r"(&v->counter), "ir"(i)
+	: "cc");
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+	unsigned int temp;
+
+	__asm__ __volatile__(
+	"1:	llock   %0, [%1]	\n"
+	"	sub     %0, %0, %2	\n"
+	"	scond   %0, [%1]	\n"
+	"	bnz     1b		\n"
+	: "=&r"(temp)
+	: "r"(&v->counter), "ir"(i)
+	: "cc");
+}
+
+/* add and also return the new value */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+	unsigned int temp;
+
+	__asm__ __volatile__(
+	"1:	llock   %0, [%1]	\n"
+	"	add     %0, %0, %2	\n"
+	"	scond   %0, [%1]	\n"
+	"	bnz     1b		\n"
+	: "=&r"(temp)
+	: "r"(&v->counter), "ir"(i)
+	: "cc");
+
+	return temp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+	unsigned int temp;
+
+	__asm__ __volatile__(
+	"1:	llock   %0, [%1]	\n"
+	"	sub     %0, %0, %2	\n"
+	"	scond   %0, [%1]	\n"
+	"	bnz     1b		\n"
+	: "=&r"(temp)
+	: "r"(&v->counter), "ir"(i)
+	: "cc");
+
+	return temp;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+	unsigned int temp;
+
+	__asm__ __volatile__(
+	"1:	llock   %0, [%1]	\n"
+	"	bic     %0, %0, %2	\n"
+	"	scond   %0, [%1]	\n"
+	"	bnz     1b		\n"
+	: "=&r"(temp)
+	: "r"(addr), "ir"(mask)
+	: "cc");
+}
+
+#else	/* !CONFIG_ARC_HAS_LLSC */
+
+#ifndef CONFIG_SMP
+
+ /* violating atomic_xxx API locking protocol in UP for optimization sake */
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+#else
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+	/*
+	 * Independent of hardware support, all of the atomic_xxx() APIs need
+	 * to follow the same locking rules to make sure that a "hardware"
+	 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
+	 * sequence
+	 *
+	 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
+	 * requires the locking.
+	 */
+	unsigned long flags;
+
+	atomic_ops_lock(flags);
+	v->counter = i;
+	atomic_ops_unlock(flags);
+}
+#endif
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ */
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+	unsigned long flags;
+
+	atomic_ops_lock(flags);
+	v->counter += i;
+	atomic_ops_unlock(flags);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+	unsigned long flags;
+
+	atomic_ops_lock(flags);
+	v->counter -= i;
+	atomic_ops_unlock(flags);
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+	unsigned long flags;
+	unsigned long temp;
+
+	atomic_ops_lock(flags);
+	temp = v->counter;
+	temp += i;
+	v->counter = temp;
+	atomic_ops_unlock(flags);
+
+	return temp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+	unsigned long flags;
+	unsigned long temp;
+
+	atomic_ops_lock(flags);
+	temp = v->counter;
+	temp -= i;
+	v->counter = temp;
+	atomic_ops_unlock(flags);
+
+	return temp;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+	unsigned long flags;
+
+	atomic_ops_lock(flags);
+	*addr &= ~mask;
+	atomic_ops_unlock(flags);
+}
+
+#endif /* !CONFIG_ARC_HAS_LLSC */
+
+/**
+ * __atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v
+ */
+#define __atomic_add_unless(v, a, u)					\
+({									\
+	int c, old;							\
+	c = atomic_read(v);						\
+	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
+		c = old;						\
+	c;								\
+})
+
+#define atomic_inc_not_zero(v)		atomic_add_unless((v), 1, 0)
+
+#define atomic_inc(v)			atomic_add(1, v)
+#define atomic_dec(v)			atomic_sub(1, v)
+
+#define atomic_inc_and_test(v)		(atomic_add_return(1, v) == 0)
+#define atomic_dec_and_test(v)		(atomic_sub_return(1, v) == 0)
+#define atomic_inc_return(v)		atomic_add_return(1, (v))
+#define atomic_dec_return(v)		atomic_sub_return(1, (v))
+#define atomic_sub_and_test(i, v)	(atomic_sub_return(i, v) == 0)
+
+#define atomic_add_negative(i, v)	(atomic_add_return(i, v) < 0)
+
+#define ATOMIC_INIT(i)			{ (i) }
+
+#include <asm-generic/atomic64.h>
+
+#endif
+
+#endif
+
+#endif

+ 42 - 0
arch/arc/include/asm/barrier.h

@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */
+#define mb() __asm__ __volatile__ ("" : : : "memory")
+#define rmb() mb()
+#define wmb() mb()
+#define set_mb(var, value)  do { var = value; mb(); } while (0)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+#define read_barrier_depends()  mb()
+
+/* TODO-vineetg verify the correctness of macros here */
+#ifdef CONFIG_SMP
+#define smp_mb()        mb()
+#define smp_rmb()       rmb()
+#define smp_wmb()       wmb()
+#else
+#define smp_mb()        barrier()
+#define smp_rmb()       barrier()
+#define smp_wmb()       barrier()
+#endif
+
+#define smp_mb__before_atomic_dec()	barrier()
+#define smp_mb__after_atomic_dec()	barrier()
+#define smp_mb__before_atomic_inc()	barrier()
+#define smp_mb__after_atomic_inc()	barrier()
+
+#define smp_read_barrier_depends()      do { } while (0)
+
+#endif
+
+#endif

+ 516 - 0
arch/arc/include/asm/bitops.h

@@ -0,0 +1,516 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_BITOPS_H
+#define _ASM_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/*
+ * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
+ * The Kconfig glue ensures that in SMP, this is only set if the container
+ * SoC/platform has cross-core coherent LLOCK/SCOND
+ */
+#if defined(CONFIG_ARC_HAS_LLSC)
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *m)
+{
+	unsigned int temp;
+
+	m += nr >> 5;
+
+	if (__builtin_constant_p(nr))
+		nr &= 0x1f;
+
+	__asm__ __volatile__(
+	"1:	llock   %0, [%1]	\n"
+	"	bset    %0, %0, %2	\n"
+	"	scond   %0, [%1]	\n"
+	"	bnz     1b	\n"
+	: "=&r"(temp)
+	: "r"(m), "ir"(nr)
+	: "cc");
+}
+
+static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+	unsigned int temp;
+
+	m += nr >> 5;
+
+	if (__builtin_constant_p(nr))
+		nr &= 0x1f;
+
+	__asm__ __volatile__(
+	"1:	llock   %0, [%1]	\n"
+	"	bclr    %0, %0, %2	\n"
+	"	scond   %0, [%1]	\n"
+	"	bnz     1b	\n"
+	: "=&r"(temp)
+	: "r"(m), "ir"(nr)
+	: "cc");
+}
+
+static inline void change_bit(unsigned long nr, volatile unsigned long *m)
+{
+	unsigned int temp;
+
+	m += nr >> 5;
+
+	if (__builtin_constant_p(nr))
+		nr &= 0x1f;
+
+	__asm__ __volatile__(
+	"1:	llock   %0, [%1]	\n"
+	"	bxor    %0, %0, %2	\n"
+	"	scond   %0, [%1]	\n"
+	"	bnz     1b		\n"
+	: "=&r"(temp)
+	: "r"(m), "ir"(nr)
+	: "cc");
+}
+
+/*
+ * Semantically:
+ *    Test the bit
+ *    if clear
+ *        set it and return 0 (old value)
+ *    else
+ *        return 1 (old value).
+ *
+ * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
+ * and the old value of bit is returned
+ */
+static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+	unsigned long old, temp;
+
+	m += nr >> 5;
+
+	if (__builtin_constant_p(nr))
+		nr &= 0x1f;
+
+	__asm__ __volatile__(
+	"1:	llock   %0, [%2]	\n"
+	"	bset    %1, %0, %3	\n"
+	"	scond   %1, [%2]	\n"
+	"	bnz     1b		\n"
+	: "=&r"(old), "=&r"(temp)
+	: "r"(m), "ir"(nr)
+	: "cc");
+
+	return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+	unsigned int old, temp;
+
+	m += nr >> 5;
+
+	if (__builtin_constant_p(nr))
+		nr &= 0x1f;
+
+	__asm__ __volatile__(
+	"1:	llock   %0, [%2]	\n"
+	"	bclr    %1, %0, %3	\n"
+	"	scond   %1, [%2]	\n"
+	"	bnz     1b		\n"
+	: "=&r"(old), "=&r"(temp)
+	: "r"(m), "ir"(nr)
+	: "cc");
+
+	return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+	unsigned int old, temp;
+
+	m += nr >> 5;
+
+	if (__builtin_constant_p(nr))
+		nr &= 0x1f;
+
+	__asm__ __volatile__(
+	"1:	llock   %0, [%2]	\n"
+	"	bxor    %1, %0, %3	\n"
+	"	scond   %1, [%2]	\n"
+	"	bnz     1b		\n"
+	: "=&r"(old), "=&r"(temp)
+	: "r"(m), "ir"(nr)
+	: "cc");
+
+	return (old & (1 << nr)) != 0;
+}
+
+#else	/* !CONFIG_ARC_HAS_LLSC */
+
+#include <asm/smp.h>
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ *
+ * There's "significant" micro-optimization in writing our own variants of
+ * bitops (over generic variants)
+ *
+ * (1) The generic APIs have "signed" @nr while we have it "unsigned"
+ *     This avoids extra code to be generated for pointer arithmatic, since
+ *     is "not sure" that index is NOT -ve
+ * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
+ *     only consider bottom 5 bits of @nr, so NO need to mask them off.
+ *     (GCC Quirk: however for constant @nr we still need to do the masking
+ *             at compile time)
+ */
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *m)
+{
+	unsigned long temp, flags;
+	m += nr >> 5;
+
+	if (__builtin_constant_p(nr))
+		nr &= 0x1f;
+
+	bitops_lock(flags);
+
+	temp = *m;
+	*m = temp | (1UL << nr);
+
+	bitops_unlock(flags);
+}
+
+static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+	unsigned long temp, flags;
+	m += nr >> 5;
+
+	if (__builtin_constant_p(nr))
+		nr &= 0x1f;
+
+	bitops_lock(flags);
+
+	temp = *m;
+	*m = temp & ~(1UL << nr);
+
+	bitops_unlock(flags);
+}
+
+static inline void change_bit(unsigned long nr, volatile unsigned long *m)
+{
+	unsigned long temp, flags;
+	m += nr >> 5;
+
+	if (__builtin_constant_p(nr))
+		nr &= 0x1f;
+
+	bitops_lock(flags);
+
+	temp = *m;
+	*m = temp ^ (1UL << nr);
+
+	bitops_unlock(flags);
+}
+
+static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+	unsigned long old, flags;
+	m += nr >> 5;
+
+	if (__builtin_constant_p(nr))
+		nr &= 0x1f;
+
+	bitops_lock(flags);
+
+	old = *m;
+	*m = old | (1 << nr);
+
+	bitops_unlock(flags);
+
+	return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+	unsigned long old, flags;
+	m += nr >> 5;
+
+	if (__builtin_constant_p(nr))
+		nr &= 0x1f;
+
+	bitops_lock(flags);
+
+	old = *m;
+	*m = old & ~(1 << nr);
+
+	bitops_unlock(flags);
+
+	return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+	unsigned long old, flags;
+	m += nr >> 5;
+
+	if (__builtin_constant_p(nr))
+		nr &= 0x1f;
+
+	bitops_lock(flags);
+
+	old = *m;
+	*m = old ^ (1 << nr);
+
+	bitops_unlock(flags);
+
+	return (old & (1 << nr)) != 0;
+}
+
+#endif /* CONFIG_ARC_HAS_LLSC */
+
+/***************************************
+ * Non atomic variants
+ **************************************/
+
+static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
+{
+	unsigned long temp;
+	m += nr >> 5;
+
+	if (__builtin_constant_p(nr))
+		nr &= 0x1f;
+
+	temp = *m;
+	*m = temp | (1UL << nr);
+}
+
+static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+	unsigned long temp;
+	m += nr >> 5;
+
+	if (__builtin_constant_p(nr))
+		nr &= 0x1f;
+
+	temp = *m;
+	*m = temp & ~(1UL << nr);
+}
+
+static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
+{
+	unsigned long temp;
+	m += nr >> 5;
+
+	if (__builtin_constant_p(nr))
+		nr &= 0x1f;
+
+	temp = *m;
+	*m = temp ^ (1UL << nr);
+}
+
+static inline int
+__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+	unsigned long old;
+	m += nr >> 5;
+
+	if (__builtin_constant_p(nr))
+		nr &= 0x1f;
+
+	old = *m;
+	*m = old | (1 << nr);
+
+	return (old & (1 << nr)) != 0;
+}
+
+static inline int
+__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+	unsigned long old;
+	m += nr >> 5;
+
+	if (__builtin_constant_p(nr))
+		nr &= 0x1f;
+
+	old = *m;
+	*m = old & ~(1 << nr);
+
+	return (old & (1 << nr)) != 0;
+}
+
+static inline int
+__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+	unsigned long old;
+	m += nr >> 5;
+
+	if (__builtin_constant_p(nr))
+		nr &= 0x1f;
+
+	old = *m;
+	*m = old ^ (1 << nr);
+
+	return (old & (1 << nr)) != 0;
+}
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+static inline int
+__constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
+{
+	return ((1UL << (nr & 31)) &
+		(((const volatile unsigned int *)addr)[nr >> 5])) != 0;
+}
+
+static inline int
+__test_bit(unsigned int nr, const volatile unsigned long *addr)
+{
+	unsigned long mask;
+
+	addr += nr >> 5;
+
+	/* ARC700 only considers 5 bits in bit-fiddling insn */
+	mask = 1 << nr;
+
+	return ((mask & *addr) != 0);
+}
+
+#define test_bit(nr, addr)	(__builtin_constant_p(nr) ? \
+					__constant_test_bit((nr), (addr)) : \
+					__test_bit((nr), (addr)))
+
+/*
+ * Count the number of zeros, starting from MSB
+ * Helper for fls( ) friends
+ * This is a pure count, so (1-32) or (0-31) doesn't apply
+ * It could be 0 to 32, based on num of 0's in there
+ * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
+ */
+static inline __attribute__ ((const)) int clz(unsigned int x)
+{
+	unsigned int res;
+
+	__asm__ __volatile__(
+	"	norm.f  %0, %1		\n"
+	"	mov.n   %0, 0		\n"
+	"	add.p   %0, %0, 1	\n"
+	: "=r"(res)
+	: "r"(x)
+	: "cc");
+
+	return res;
+}
+
+static inline int constant_fls(int x)
+{
+	int r = 32;
+
+	if (!x)
+		return 0;
+	if (!(x & 0xffff0000u)) {
+		x <<= 16;
+		r -= 16;
+	}
+	if (!(x & 0xff000000u)) {
+		x <<= 8;
+		r -= 8;
+	}
+	if (!(x & 0xf0000000u)) {
+		x <<= 4;
+		r -= 4;
+	}
+	if (!(x & 0xc0000000u)) {
+		x <<= 2;
+		r -= 2;
+	}
+	if (!(x & 0x80000000u)) {
+		x <<= 1;
+		r -= 1;
+	}
+	return r;
+}
+
+/*
+ * fls = Find Last Set in word
+ * @result: [1-32]
+ * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
+ */
+static inline __attribute__ ((const)) int fls(unsigned long x)
+{
+	if (__builtin_constant_p(x))
+	       return constant_fls(x);
+
+	return 32 - clz(x);
+}
+
+/*
+ * __fls: Similar to fls, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) int __fls(unsigned long x)
+{
+	if (!x)
+		return 0;
+	else
+		return fls(x) - 1;
+}
+
+/*
+ * ffs = Find First Set in word (LSB to MSB)
+ * @result: [1-32], 0 if all 0's
+ */
+#define ffs(x)	({ unsigned long __t = (x); fls(__t & -__t); })
+
+/*
+ * __ffs: Similar to ffs, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) int __ffs(unsigned long word)
+{
+	if (!word)
+		return word;
+
+	return ffs(word) - 1;
+}
+
+/*
+ * ffz = Find First Zero in word.
+ * @return:[0-31], 32 if all 1's
+ */
+#define ffz(x)	__ffs(~(x))
+
+/* TODO does this affect uni-processor code */
+#define smp_mb__before_clear_bit()  barrier()
+#define smp_mb__after_clear_bit()   barrier()
+
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/lock.h>
+
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif

+ 37 - 0
arch/arc/include/asm/bug.h

@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_BUG_H
+#define _ASM_ARC_BUG_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/ptrace.h>
+
+struct task_struct;
+
+void show_regs(struct pt_regs *regs);
+void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs);
+void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
+			    unsigned long address, unsigned long cause_reg);
+void die(const char *str, struct pt_regs *regs, unsigned long address,
+	 unsigned long cause_reg);
+
+#define BUG()	do {				\
+	dump_stack();					\
+	pr_warn("Kernel BUG in %s: %s: %d!\n",	\
+		__FILE__, __func__,  __LINE__);	\
+} while (0)
+
+#define HAVE_ARCH_BUG
+
+#include <asm-generic/bug.h>
+
+#endif	/* !__ASSEMBLY__ */
+
+#endif

+ 75 - 0
arch/arc/include/asm/cache.h

@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_CACHE_H
+#define __ARC_ASM_CACHE_H
+
+/* In case $$ not config, setup a dummy number for rest of kernel */
+#ifndef CONFIG_ARC_CACHE_LINE_SHIFT
+#define L1_CACHE_SHIFT		6
+#else
+#define L1_CACHE_SHIFT		CONFIG_ARC_CACHE_LINE_SHIFT
+#endif
+
+#define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
+
+#define ARC_ICACHE_WAYS	2
+#define ARC_DCACHE_WAYS	4
+
+/* Helpers */
+#define ARC_ICACHE_LINE_LEN	L1_CACHE_BYTES
+#define ARC_DCACHE_LINE_LEN	L1_CACHE_BYTES
+
+#define ICACHE_LINE_MASK	(~(ARC_ICACHE_LINE_LEN - 1))
+#define DCACHE_LINE_MASK	(~(ARC_DCACHE_LINE_LEN - 1))
+
+#if ARC_ICACHE_LINE_LEN != ARC_DCACHE_LINE_LEN
+#error "Need to fix some code as I/D cache lines not same"
+#else
+#define is_not_cache_aligned(p)	((unsigned long)p & (~DCACHE_LINE_MASK))
+#endif
+
+#ifndef __ASSEMBLY__
+
+/* Uncached access macros */
+#define arc_read_uncached_32(ptr)	\
+({					\
+	unsigned int __ret;		\
+	__asm__ __volatile__(		\
+	"	ld.di %0, [%1]	\n"	\
+	: "=r"(__ret)			\
+	: "r"(ptr));			\
+	__ret;				\
+})
+
+#define arc_write_uncached_32(ptr, data)\
+({					\
+	__asm__ __volatile__(		\
+	"	st.di %0, [%1]	\n"	\
+	:				\
+	: "r"(data), "r"(ptr));		\
+})
+
+/* used to give SHMLBA a value to avoid Cache Aliasing */
+extern unsigned int ARC_shmlba;
+
+#define ARCH_DMA_MINALIGN      L1_CACHE_BYTES
+
+/*
+ * ARC700 doesn't cache any access in top 256M.
+ * Ideal for wiring memory mapped peripherals as we don't need to do
+ * explicit uncached accesses (LD.di/ST.di) hence more portable drivers
+ */
+#define ARC_UNCACHED_ADDR_SPACE	0xc0000000
+
+extern void arc_cache_init(void);
+extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
+extern void __init read_decode_cache_bcr(void);
+#endif
+
+#endif /* _ASM_CACHE_H */

+ 67 - 0
arch/arc/include/asm/cacheflush.h

@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
+ *   -flush_cache_dup_mm (fork)
+ *   -likewise for flush_cache_mm (exit/execve)
+ *   -likewise for flush_cache_{range,page} (munmap, exit, COW-break)
+ *
+ *  vineetg: April 2008
+ *   -Added a critical CacheLine flush to copy_to_user_page( ) which
+ *     was causing gdbserver to not setup breakpoints consistently
+ */
+
+#ifndef _ASM_CACHEFLUSH_H
+#define _ASM_CACHEFLUSH_H
+
+#include <linux/mm.h>
+
+void flush_cache_all(void);
+
+void flush_icache_range(unsigned long start, unsigned long end);
+void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
+				     int len);
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+
+void flush_dcache_page(struct page *page);
+
+void dma_cache_wback_inv(unsigned long start, unsigned long sz);
+void dma_cache_inv(unsigned long start, unsigned long sz);
+void dma_cache_wback(unsigned long start, unsigned long sz);
+
+#define flush_dcache_mmap_lock(mapping)		do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
+
+/* TBD: optimize this */
+#define flush_cache_vmap(start, end)		flush_cache_all()
+#define flush_cache_vunmap(start, end)		flush_cache_all()
+
+/*
+ * VM callbacks when entire/range of user-space V-P mappings are
+ * torn-down/get-invalidated
+ *
+ * Currently we don't support D$ aliasing configs for our VIPT caches
+ * NOPS for VIPT Cache with non-aliasing D$ configurations only
+ */
+#define flush_cache_dup_mm(mm)			/* called on fork */
+#define flush_cache_mm(mm)			/* called on munmap/exit */
+#define flush_cache_range(mm, u_vstart, u_vend)
+#define flush_cache_page(vma, u_vaddr, pfn)	/* PF handling/COW-break */
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len)		\
+do {									\
+	memcpy(dst, src, len);						\
+	if (vma->vm_flags & VM_EXEC)					\
+		flush_icache_range_vaddr((unsigned long)(dst), vaddr, len);\
+} while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len)		\
+	memcpy(dst, src, len);						\
+
+#endif

+ 101 - 0
arch/arc/include/asm/checksum.h

@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Joern Rennecke  <joern.rennecke@embecosm.com>: Jan 2012
+ *  -Insn Scheduling improvements to csum core routines.
+ *      = csum_fold( ) largely derived from ARM version.
+ *      = ip_fast_cum( ) to have module scheduling
+ *  -gcc 4.4.x broke networking. Alias analysis needed to be primed.
+ *   worked around by adding memory clobber to ip_fast_csum( )
+ *
+ * vineetg: May 2010
+ *  -Rewrote ip_fast_cscum( ) and csum_fold( ) with fast inline asm
+ */
+
+#ifndef _ASM_ARC_CHECKSUM_H
+#define _ASM_ARC_CHECKSUM_H
+
+/*
+ *	Fold a partial checksum
+ *
+ *  The 2 swords comprising the 32bit sum are added, any carry to 16th bit
+ *  added back and final sword result inverted.
+ */
+static inline __sum16 csum_fold(__wsum s)
+{
+	unsigned r = s << 16 | s >> 16;	/* ror */
+	s = ~s;
+	s -= r;
+	return s >> 16;
+}
+
+/*
+ *	This is a version of ip_compute_csum() optimized for IP headers,
+ *	which always checksum on 4 octet boundaries.
+ */
+static inline __sum16
+ip_fast_csum(const void *iph, unsigned int ihl)
+{
+	const void *ptr = iph;
+	unsigned int tmp, tmp2, sum;
+
+	__asm__(
+	"	ld.ab  %0, [%3, 4]		\n"
+	"	ld.ab  %2, [%3, 4]		\n"
+	"	sub    %1, %4, 2		\n"
+	"	lsr.f  lp_count, %1, 1		\n"
+	"	bcc    0f			\n"
+	"	add.f  %0, %0, %2		\n"
+	"	ld.ab  %2, [%3, 4]		\n"
+	"0:	lp     1f			\n"
+	"	ld.ab  %1, [%3, 4]		\n"
+	"	adc.f  %0, %0, %2		\n"
+	"	ld.ab  %2, [%3, 4]		\n"
+	"	adc.f  %0, %0, %1		\n"
+	"1:	adc.f  %0, %0, %2		\n"
+	"	add.cs %0,%0,1			\n"
+	: "=&r"(sum), "=r"(tmp), "=&r"(tmp2), "+&r" (ptr)
+	: "r"(ihl)
+	: "cc", "lp_count", "memory");
+
+	return csum_fold(sum);
+}
+
+/*
+ * TCP pseudo Header is 12 bytes:
+ * SA [4], DA [4], zeroes [1], Proto[1], TCP Seg(hdr+data) Len [2]
+ */
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+		   unsigned short proto, __wsum sum)
+{
+	__asm__ __volatile__(
+	"	add.f %0, %0, %1	\n"
+	"	adc.f %0, %0, %2	\n"
+	"	adc.f %0, %0, %3	\n"
+	"	adc.f %0, %0, %4	\n"
+	"	adc   %0, %0, 0		\n"
+	: "+&r"(sum)
+	: "r"(saddr), "r"(daddr),
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	  "r"(len),
+#else
+	  "r"(len << 8),
+#endif
+	  "r"(htons(proto))
+	: "cc");
+
+	return sum;
+}
+
+#define csum_fold csum_fold
+#define ip_fast_csum ip_fast_csum
+#define csum_tcpudp_nofold csum_tcpudp_nofold
+
+#include <asm-generic/checksum.h>
+
+#endif /* _ASM_ARC_CHECKSUM_H */

+ 22 - 0
arch/arc/include/asm/clk.h

@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_CLK_H
+#define _ASM_ARC_CLK_H
+
+/* Although we can't really hide core_freq, the accessor is still better way */
+extern unsigned long core_freq;
+
+static inline unsigned long arc_get_core_freq(void)
+{
+	return core_freq;
+}
+
+extern int arc_set_core_freq(unsigned long);
+
+#endif

+ 143 - 0
arch/arc/include/asm/cmpxchg.h

@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_CMPXCHG_H
+#define __ASM_ARC_CMPXCHG_H
+
+#include <linux/types.h>
+#include <asm/smp.h>
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+{
+	unsigned long prev;
+
+	__asm__ __volatile__(
+	"1:	llock   %0, [%1]	\n"
+	"	brne    %0, %2, 2f	\n"
+	"	scond   %3, [%1]	\n"
+	"	bnz     1b		\n"
+	"2:				\n"
+	: "=&r"(prev)
+	: "r"(ptr), "ir"(expected),
+	  "r"(new) /* can't be "ir". scond can't take limm for "b" */
+	: "cc");
+
+	return prev;
+}
+
+#else
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+{
+	unsigned long flags;
+	int prev;
+	volatile unsigned long *p = ptr;
+
+	atomic_ops_lock(flags);
+	prev = *p;
+	if (prev == expected)
+		*p = new;
+	atomic_ops_unlock(flags);
+	return prev;
+}
+
+#endif /* CONFIG_ARC_HAS_LLSC */
+
+#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
+				(unsigned long)(o), (unsigned long)(n)))
+
+/*
+ * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
+ * just to gaurantee semantics.
+ * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
+ * which also happens to be atomic_ops_lock.
+ *
+ * Thus despite semantically being different, implementation of atomic_cmpxchg()
+ * is same as cmpxchg().
+ */
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+
+
+/*
+ * xchg (reg with memory) based on "Native atomic" EX insn
+ */
+static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
+				   int size)
+{
+	extern unsigned long __xchg_bad_pointer(void);
+
+	switch (size) {
+	case 4:
+		__asm__ __volatile__(
+		"	ex  %0, [%1]	\n"
+		: "+r"(val)
+		: "r"(ptr)
+		: "memory");
+
+		return val;
+	}
+	return __xchg_bad_pointer();
+}
+
+#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
+						 sizeof(*(ptr))))
+
+/*
+ * On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need
+ * not require any locking. However there's a quirk.
+ * ARC lacks native CMPXCHG, thus emulated (see above), using external locking -
+ * incidently it "reuses" the same atomic_ops_lock used by atomic APIs.
+ * Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to
+ * abide by same serializing rules, thus ends up using atomic_ops_lock as well.
+ *
+ * This however is only relevant if SMP and/or ARC lacks LLSC
+ *   if (UP or LLSC)
+ *      xchg doesn't need serialization
+ *   else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC)
+ *      xchg needs serialization
+ */
+
+#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
+
+#define xchg(ptr, with)			\
+({					\
+	unsigned long flags;		\
+	typeof(*(ptr)) old_val;		\
+					\
+	atomic_ops_lock(flags);		\
+	old_val = _xchg(ptr, with);	\
+	atomic_ops_unlock(flags);	\
+	old_val;			\
+})
+
+#else
+
+#define xchg(ptr, with)  _xchg(ptr, with)
+
+#endif
+
+/*
+ * "atomic" variant of xchg()
+ * REQ: It needs to follow the same serialization rules as other atomic_xxx()
+ * Since xchg() doesn't always do that, it would seem that following defintion
+ * is incorrect. But here's the rationale:
+ *   SMP : Even xchg() takes the atomic_ops_lock, so OK.
+ *   LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
+ *         is natively "SMP safe", no serialization required).
+ *   UP  : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
+ *         could clobber them. atomic_xchg() itself would be 1 insn, so it
+ *         can't be clobbered by others. Thus no serialization required when
+ *         atomic_xchg is involved.
+ */
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+#endif

+ 32 - 0
arch/arc/include/asm/current.h

@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: May 16th, 2008
+ *  - Current macro is now implemented as "global register" r25
+ */
+
+#ifndef _ASM_ARC_CURRENT_H
+#define _ASM_ARC_CURRENT_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+register struct task_struct *curr_arc asm("r25");
+#define current (curr_arc)
+
+#else
+#include <asm-generic/current.h>
+#endif /* ! CONFIG_ARC_CURR_IN_REG */
+
+#endif /* ! __ASSEMBLY__ */
+
+#endif	/* __KERNEL__ */
+
+#endif /* _ASM_ARC_CURRENT_H */

+ 56 - 0
arch/arc/include/asm/defines.h

@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_DEFINES_H__
+#define __ARC_ASM_DEFINES_H__
+
+#if defined(CONFIG_ARC_MMU_V1)
+#define CONFIG_ARC_MMU_VER 1
+#elif defined(CONFIG_ARC_MMU_V2)
+#define CONFIG_ARC_MMU_VER 2
+#elif defined(CONFIG_ARC_MMU_V3)
+#define CONFIG_ARC_MMU_VER 3
+#endif
+
+#ifdef CONFIG_ARC_HAS_LLSC
+#define __CONFIG_ARC_HAS_LLSC_VAL 1
+#else
+#define __CONFIG_ARC_HAS_LLSC_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_SWAPE
+#define __CONFIG_ARC_HAS_SWAPE_VAL 1
+#else
+#define __CONFIG_ARC_HAS_SWAPE_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_RTSC
+#define __CONFIG_ARC_HAS_RTSC_VAL 1
+#else
+#define __CONFIG_ARC_HAS_RTSC_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_MMU_SASID
+#define __CONFIG_ARC_MMU_SASID_VAL 1
+#else
+#define __CONFIG_ARC_MMU_SASID_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+#define __CONFIG_ARC_HAS_ICACHE	1
+#else
+#define __CONFIG_ARC_HAS_ICACHE	0
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+#define __CONFIG_ARC_HAS_DCACHE	1
+#else
+#define __CONFIG_ARC_HAS_DCACHE	0
+#endif
+
+#endif /* __ARC_ASM_DEFINES_H__ */

+ 68 - 0
arch/arc/include/asm/delay.h

@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Delay routines using pre computed loops_per_jiffy value.
+ *
+ * vineetg: Feb 2012
+ *  -Rewrote in "C" to avoid dealing with availability of H/w MPY
+ *  -Also reduced the num of MPY operations from 3 to 2
+ *
+ * Amit Bhor: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_UDELAY_H
+#define __ASM_ARC_UDELAY_H
+
+#include <asm/param.h>		/* HZ */
+
+static inline void __delay(unsigned long loops)
+{
+	__asm__ __volatile__(
+	"1:	sub.f %0, %0, 1	\n"
+	"	jpnz 1b		\n"
+	: "+r"(loops)
+	:
+	: "cc");
+}
+
+extern void __bad_udelay(void);
+
+/*
+ * Normal Math for computing loops in "N" usecs
+ *  -we have precomputed @loops_per_jiffy
+ *  -1 sec has HZ jiffies
+ * loops per "N" usecs = ((loops_per_jiffy * HZ / 1000000) * N)
+ *
+ * Approximate Division by multiplication:
+ *  -Mathematically if we multiply and divide a number by same value the
+ *   result remains unchanged:  In this case, we use 2^32
+ *  -> (loops_per_N_usec * 2^32 ) / 2^32
+ *  -> (((loops_per_jiffy * HZ / 1000000) * N) * 2^32) / 2^32
+ *  -> (loops_per_jiffy * HZ * N * 4295) / 2^32
+ *
+ *  -Divide by 2^32 is very simply right shift by 32
+ *  -We simply need to ensure that the multiply per above eqn happens in
+ *   64-bit precision (if CPU doesn't support it - gcc can emaulate it)
+ */
+
+static inline void __udelay(unsigned long usecs)
+{
+	unsigned long loops;
+
+	/* (long long) cast ensures 64 bit MPY - real or emulated
+	 * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
+	 */
+	loops = ((long long)(usecs * 4295 * HZ) *
+		 (long long)(loops_per_jiffy)) >> 32;
+
+	__delay(loops);
+}
+
+#define udelay(n) (__builtin_constant_p(n) ? ((n) > 20000 ? __bad_udelay() \
+				: __udelay(n)) : __udelay(n))
+
+#endif /* __ASM_ARC_UDELAY_H */

+ 116 - 0
arch/arc/include/asm/disasm.h

@@ -0,0 +1,116 @@
+/*
+ * several functions that help interpret ARC instructions
+ * used for unaligned accesses, kprobes and kgdb
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_DISASM_H__
+#define __ARC_DISASM_H__
+
+enum {
+	op_Bcc = 0, op_BLcc = 1, op_LD = 2, op_ST = 3, op_MAJOR_4 = 4,
+	op_MAJOR_5 = 5, op_LD_ADD = 12, op_ADD_SUB_SHIFT = 13,
+	op_ADD_MOV_CMP = 14, op_S = 15, op_LD_S = 16, op_LDB_S = 17,
+	op_LDW_S = 18, op_LDWX_S = 19, op_ST_S = 20, op_STB_S = 21,
+	op_STW_S = 22, op_Su5 = 23, op_SP = 24, op_GP = 25,
+	op_Pcl = 26, op_MOV_S = 27, op_ADD_CMP = 28, op_BR_S = 29,
+	op_B_S = 30, op_BL_S = 31
+};
+
+enum flow {
+	noflow,
+	direct_jump,
+	direct_call,
+	indirect_jump,
+	indirect_call,
+	invalid_instr
+};
+
+#define IS_BIT(word, n)		((word) & (1<<n))
+#define BITS(word, s, e)	(((word) >> (s)) & (~((-2) << ((e) - (s)))))
+
+#define MAJOR_OPCODE(word)	(BITS((word), 27, 31))
+#define MINOR_OPCODE(word)	(BITS((word), 16, 21))
+#define FIELD_A(word)		(BITS((word), 0, 5))
+#define FIELD_B(word)		((BITS((word), 12, 14)<<3) | \
+				(BITS((word), 24, 26)))
+#define FIELD_C(word)		(BITS((word), 6, 11))
+#define FIELD_u6(word)		FIELDC(word)
+#define FIELD_s12(word)		sign_extend(((BITS((word), 0, 5) << 6) | \
+					BITS((word), 6, 11)), 12)
+
+/* note that for BL/BRcc these two macro's need another AND statement to mask
+ * out bit 1 (make the result a multiple of 4) */
+#define FIELD_s9(word)		sign_extend(((BITS(word, 15, 15) << 8) | \
+					BITS(word, 16, 23)), 9)
+#define FIELD_s21(word)		sign_extend(((BITS(word, 6, 15) << 11) | \
+					(BITS(word, 17, 26) << 1)), 12)
+#define FIELD_s25(word)		sign_extend(((BITS(word, 0, 3) << 21) | \
+					(BITS(word, 6, 15) << 11) | \
+					(BITS(word, 17, 26) << 1)), 12)
+
+/* note: these operate on 16 bits! */
+#define FIELD_S_A(word)		((BITS((word), 2, 2)<<3) | BITS((word), 0, 2))
+#define FIELD_S_B(word)		((BITS((word), 10, 10)<<3) | \
+				BITS((word), 8, 10))
+#define FIELD_S_C(word)		((BITS((word), 7, 7)<<3) | BITS((word), 5, 7))
+#define FIELD_S_H(word)		((BITS((word), 0, 2)<<3) | BITS((word), 5, 8))
+#define FIELD_S_u5(word)	(BITS((word), 0, 4))
+#define FIELD_S_u6(word)	(BITS((word), 0, 4) << 1)
+#define FIELD_S_u7(word)	(BITS((word), 0, 4) << 2)
+#define FIELD_S_u10(word)	(BITS((word), 0, 7) << 2)
+#define FIELD_S_s7(word)	sign_extend(BITS((word), 0, 5) << 1, 9)
+#define FIELD_S_s8(word)	sign_extend(BITS((word), 0, 7) << 1, 9)
+#define FIELD_S_s9(word)	sign_extend(BITS((word), 0, 8), 9)
+#define FIELD_S_s10(word)	sign_extend(BITS((word), 0, 8) << 1, 10)
+#define FIELD_S_s11(word)	sign_extend(BITS((word), 0, 8) << 2, 11)
+#define FIELD_S_s13(word)	sign_extend(BITS((word), 0, 10) << 2, 13)
+
+#define STATUS32_L		0x00000100
+#define REG_LIMM		62
+
+struct disasm_state {
+	/* generic info */
+	unsigned long words[2];
+	int instr_len;
+	int major_opcode;
+	/* info for branch/jump */
+	int is_branch;
+	int target;
+	int delay_slot;
+	enum flow flow;
+	/* info for load/store */
+	int src1, src2, src3, dest, wb_reg;
+	int zz, aa, x, pref, di;
+	int fault, write;
+};
+
+static inline int sign_extend(int value, int bits)
+{
+	if (IS_BIT(value, (bits - 1)))
+		value |= (0xffffffff << bits);
+
+	return value;
+}
+
+static inline int is_short_instr(unsigned long addr)
+{
+	uint16_t word = *((uint16_t *)addr);
+	int opcode = (word >> 11) & 0x1F;
+	return (opcode >= 0x0B);
+}
+
+void disasm_instr(unsigned long addr, struct disasm_state *state,
+	int userspace, struct pt_regs *regs, struct callee_regs *cregs);
+int disasm_next_pc(unsigned long pc, struct pt_regs *regs, struct callee_regs
+	*cregs, unsigned long *fall_thru, unsigned long *target);
+long get_reg(int reg, struct pt_regs *regs, struct callee_regs *cregs);
+void set_reg(int reg, long val, struct pt_regs *regs,
+		struct callee_regs *cregs);
+
+#endif	/* __ARC_DISASM_H__ */

+ 221 - 0
arch/arc/include/asm/dma-mapping.h

@@ -0,0 +1,221 @@
+/*
+ * DMA Mapping glue for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_ARC_DMA_MAPPING_H
+#define ASM_ARC_DMA_MAPPING_H
+
+#include <asm-generic/dma-coherent.h>
+#include <asm/cacheflush.h>
+
+#ifndef CONFIG_ARC_PLAT_NEEDS_CPU_TO_DMA
+/*
+ * dma_map_* API take cpu addresses, which is kernel logical address in the
+ * untranslated address space (0x8000_0000) based. The dma address (bus addr)
+ * ideally needs to be 0x0000_0000 based hence these glue routines.
+ * However given that intermediate bus bridges can ignore the high bit, we can
+ * do with these routines being no-ops.
+ * If a platform/device comes up which sriclty requires 0 based bus addr
+ * (e.g. AHB-PCI bridge on Angel4 board), then it can provide it's own versions
+ */
+#define plat_dma_addr_to_kernel(dev, addr) ((unsigned long)(addr))
+#define plat_kernel_addr_to_dma(dev, ptr) ((dma_addr_t)(ptr))
+
+#else
+#include <plat/dma_addr.h>
+#endif
+
+void *dma_alloc_noncoherent(struct device *dev, size_t size,
+			    dma_addr_t *dma_handle, gfp_t gfp);
+
+void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+			  dma_addr_t dma_handle);
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+			 dma_addr_t *dma_handle, gfp_t gfp);
+
+void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
+		       dma_addr_t dma_handle);
+
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+			   void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+				  void *cpu_addr, dma_addr_t dma_addr,
+				  size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
+/*
+ * streaming DMA Mapping API...
+ * CPU accesses page via normal paddr, thus needs to explicitly made
+ * consistent before each use
+ */
+
+static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size,
+					   enum dma_data_direction dir)
+{
+	switch (dir) {
+	case DMA_FROM_DEVICE:
+		dma_cache_inv(paddr, size);
+		break;
+	case DMA_TO_DEVICE:
+		dma_cache_wback(paddr, size);
+		break;
+	case DMA_BIDIRECTIONAL:
+		dma_cache_wback_inv(paddr, size);
+		break;
+	default:
+		pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
+	}
+}
+
+void __arc_dma_cache_sync(unsigned long paddr, size_t size,
+			  enum dma_data_direction dir);
+
+#define _dma_cache_sync(addr, sz, dir)			\
+do {							\
+	if (__builtin_constant_p(dir))			\
+		__inline_dma_cache_sync(addr, sz, dir);	\
+	else						\
+		__arc_dma_cache_sync(addr, sz, dir);	\
+}							\
+while (0);
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+	       enum dma_data_direction dir)
+{
+	_dma_cache_sync((unsigned long)cpu_addr, size, dir);
+	return plat_kernel_addr_to_dma(dev, cpu_addr);
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+		 size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page,
+	     unsigned long offset, size_t size,
+	     enum dma_data_direction dir)
+{
+	unsigned long paddr = page_to_phys(page) + offset;
+	return dma_map_single(dev, (void *)paddr, size, dir);
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+	       size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg,
+	   int nents, enum dma_data_direction dir)
+{
+	struct scatterlist *s;
+	int i;
+
+	for_each_sg(sg, s, nents, i)
+		sg->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+					       s->length, dir);
+
+	return nents;
+}
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+	     int nents, enum dma_data_direction dir)
+{
+	struct scatterlist *s;
+	int i;
+
+	for_each_sg(sg, s, nents, i)
+		dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+			size_t size, enum dma_data_direction dir)
+{
+	_dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
+			DMA_FROM_DEVICE);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+			   size_t size, enum dma_data_direction dir)
+{
+	_dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
+			DMA_TO_DEVICE);
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+			      unsigned long offset, size_t size,
+			      enum dma_data_direction direction)
+{
+	_dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
+			size, DMA_FROM_DEVICE);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+				 unsigned long offset, size_t size,
+				 enum dma_data_direction direction)
+{
+	_dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
+			size, DMA_TO_DEVICE);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+		    enum dma_data_direction dir)
+{
+	int i;
+
+	for (i = 0; i < nelems; i++, sg++)
+		_dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+		       enum dma_data_direction dir)
+{
+	int i;
+
+	for (i = 0; i < nelems; i++, sg++)
+		_dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static inline int dma_supported(struct device *dev, u64 dma_mask)
+{
+	/* Support 32 bit DMA mask exclusively */
+	return dma_mask == DMA_BIT_MASK(32);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+	return 0;
+}
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+		return -EIO;
+
+	*dev->dma_mask = dma_mask;
+
+	return 0;
+}
+
+#endif

+ 14 - 0
arch/arc/include/asm/dma.h

@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_ARC_DMA_H
+#define ASM_ARC_DMA_H
+
+#define MAX_DMA_ADDRESS 0xC0000000
+
+#endif

+ 78 - 0
arch/arc/include/asm/elf.h

@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_ELF_H
+#define __ASM_ARC_ELF_H
+
+#include <linux/types.h>
+#include <uapi/asm/elf.h>
+
+/* These ELF defines belong to uapi but libc elf.h already defines them */
+#define EM_ARCOMPACT		93
+
+/* ARC Relocations (kernel Modules only) */
+#define  R_ARC_32		0x4
+#define  R_ARC_32_ME		0x1B
+#define  R_ARC_S25H_PCREL	0x10
+#define  R_ARC_S25W_PCREL	0x11
+
+/*to set parameters in the core dumps */
+#define ELF_ARCH		EM_ARCOMPACT
+#define ELF_CLASS		ELFCLASS32
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define ELF_DATA		ELFDATA2MSB
+#else
+#define ELF_DATA		ELFDATA2LSB
+#endif
+
+/*
+ * To ensure that
+ *  -we don't load something for the wrong architecture.
+ *  -The userspace is using the correct syscall ABI
+ */
+struct elf32_hdr;
+extern int elf_check_arch(const struct elf32_hdr *);
+#define elf_check_arch	elf_check_arch
+
+#define CORE_DUMP_USE_REGSET
+
+#define ELF_EXEC_PAGESIZE	PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader.  We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE		(2 * TASK_SIZE / 3)
+
+/*
+ * When the program starts, a1 contains a pointer to a function to be
+ * registered with atexit, as per the SVR4 ABI.  A value of 0 means we
+ * have no such handler.
+ */
+#define ELF_PLAT_INIT(_r, load_addr)	((_r)->r0 = 0)
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports.
+ */
+#define ELF_HWCAP	(0)
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization.  This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+#define ELF_PLATFORM	(NULL)
+
+#define SET_PERSONALITY(ex) \
+	set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
+
+#endif

+ 724 - 0
arch/arc/include/asm/entry.h

@@ -0,0 +1,724 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
+ *  Stack switching code can no longer reliably rely on the fact that
+ *  if we are NOT in user mode, stack is switched to kernel mode.
+ *  e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
+ *  it's prologue including stack switching from user mode
+ *
+ * Vineetg: Aug 28th 2008: Bug #94984
+ *  -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
+ *   Normally CPU does this automatically, however when doing FAKE rtie,
+ *   we also need to explicitly do this. The problem in macros
+ *   FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
+ *   was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
+ *
+ * Vineetg: May 5th 2008
+ *  -Modified CALLEE_REG save/restore macros to handle the fact that
+ *      r25 contains the kernel current task ptr
+ *  - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
+ *  - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
+ *      address Write back load ld.ab instead of seperate ld/add instn
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_ENTRY_H
+#define __ASM_ARC_ENTRY_H
+
+#ifdef __ASSEMBLY__
+#include <asm/unistd.h>		/* For NR_syscalls defination */
+#include <asm/asm-offsets.h>
+#include <asm/arcregs.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>	/* For VMALLOC_START */
+#include <asm/thread_info.h>	/* For THREAD_SIZE */
+
+/* Note on the LD/ST addr modes with addr reg wback
+ *
+ * LD.a same as LD.aw
+ *
+ * LD.a    reg1, [reg2, x]  => Pre Incr
+ *      Eff Addr for load = [reg2 + x]
+ *
+ * LD.ab   reg1, [reg2, x]  => Post Incr
+ *      Eff Addr for load = [reg2]
+ */
+
+/*--------------------------------------------------------------
+ * Save caller saved registers (scratch registers) ( r0 - r12 )
+ * Registers are pushed / popped in the order defined in struct ptregs
+ * in asm/ptrace.h
+ *-------------------------------------------------------------*/
+.macro  SAVE_CALLER_SAVED
+	st.a    r0, [sp, -4]
+	st.a    r1, [sp, -4]
+	st.a    r2, [sp, -4]
+	st.a    r3, [sp, -4]
+	st.a    r4, [sp, -4]
+	st.a    r5, [sp, -4]
+	st.a    r6, [sp, -4]
+	st.a    r7, [sp, -4]
+	st.a    r8, [sp, -4]
+	st.a    r9, [sp, -4]
+	st.a    r10, [sp, -4]
+	st.a    r11, [sp, -4]
+	st.a    r12, [sp, -4]
+.endm
+
+/*--------------------------------------------------------------
+ * Restore caller saved registers (scratch registers)
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLER_SAVED
+	ld.ab   r12, [sp, 4]
+	ld.ab   r11, [sp, 4]
+	ld.ab   r10, [sp, 4]
+	ld.ab   r9, [sp, 4]
+	ld.ab   r8, [sp, 4]
+	ld.ab   r7, [sp, 4]
+	ld.ab   r6, [sp, 4]
+	ld.ab   r5, [sp, 4]
+	ld.ab   r4, [sp, 4]
+	ld.ab   r3, [sp, 4]
+	ld.ab   r2, [sp, 4]
+	ld.ab   r1, [sp, 4]
+	ld.ab   r0, [sp, 4]
+.endm
+
+
+/*--------------------------------------------------------------
+ * Save callee saved registers (non scratch registers) ( r13 - r25 )
+ *  on kernel stack.
+ * User mode callee regs need to be saved in case of
+ *    -fork and friends for replicating from parent to child
+ *    -before going into do_signal( ) for ptrace/core-dump
+ * Special case handling is required for r25 in case it is used by kernel
+ *  for caching task ptr. Low level exception/ISR save user mode r25
+ *  into task->thread.user_r25. So it needs to be retrieved from there and
+ *  saved into kernel stack with rest of callee reg-file
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLEE_SAVED_USER
+	st.a    r13, [sp, -4]
+	st.a    r14, [sp, -4]
+	st.a    r15, [sp, -4]
+	st.a    r16, [sp, -4]
+	st.a    r17, [sp, -4]
+	st.a    r18, [sp, -4]
+	st.a    r19, [sp, -4]
+	st.a    r20, [sp, -4]
+	st.a    r21, [sp, -4]
+	st.a    r22, [sp, -4]
+	st.a    r23, [sp, -4]
+	st.a    r24, [sp, -4]
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+	; Retrieve orig r25 and save it on stack
+	ld      r12, [r25, TASK_THREAD + THREAD_USER_R25]
+	st.a    r12, [sp, -4]
+#else
+	st.a    r25, [sp, -4]
+#endif
+
+	/* move up by 1 word to "create" callee_regs->"stack_place_holder" */
+	sub sp, sp, 4
+.endm
+
+/*--------------------------------------------------------------
+ * Save callee saved registers (non scratch registers) ( r13 - r25 )
+ * kernel mode callee regs needed to be saved in case of context switch
+ * If r25 is used for caching task pointer then that need not be saved
+ * as it can be re-created from current task global
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLEE_SAVED_KERNEL
+	st.a    r13, [sp, -4]
+	st.a    r14, [sp, -4]
+	st.a    r15, [sp, -4]
+	st.a    r16, [sp, -4]
+	st.a    r17, [sp, -4]
+	st.a    r18, [sp, -4]
+	st.a    r19, [sp, -4]
+	st.a    r20, [sp, -4]
+	st.a    r21, [sp, -4]
+	st.a    r22, [sp, -4]
+	st.a    r23, [sp, -4]
+	st.a    r24, [sp, -4]
+#ifdef CONFIG_ARC_CURR_IN_REG
+	sub     sp, sp, 8
+#else
+	st.a    r25, [sp, -4]
+	sub     sp, sp, 4
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * RESTORE_CALLEE_SAVED_KERNEL:
+ * Loads callee (non scratch) Reg File by popping from Kernel mode stack.
+ *  This is reverse of SAVE_CALLEE_SAVED,
+ *
+ * NOTE:
+ * Ideally this shd only be called in switch_to for loading
+ *  switched-IN task's CALLEE Reg File.
+ *  For all other cases RESTORE_CALLEE_SAVED_FAST must be used
+ *  which simply pops the stack w/o touching regs.
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLEE_SAVED_KERNEL
+
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+	add     sp, sp, 8  /* skip callee_reg gutter and user r25 placeholder */
+#else
+	add     sp, sp, 4   /* skip "callee_regs->stack_place_holder" */
+	ld.ab   r25, [sp, 4]
+#endif
+
+	ld.ab   r24, [sp, 4]
+	ld.ab   r23, [sp, 4]
+	ld.ab   r22, [sp, 4]
+	ld.ab   r21, [sp, 4]
+	ld.ab   r20, [sp, 4]
+	ld.ab   r19, [sp, 4]
+	ld.ab   r18, [sp, 4]
+	ld.ab   r17, [sp, 4]
+	ld.ab   r16, [sp, 4]
+	ld.ab   r15, [sp, 4]
+	ld.ab   r14, [sp, 4]
+	ld.ab   r13, [sp, 4]
+
+.endm
+
+/*--------------------------------------------------------------
+ * RESTORE_CALLEE_SAVED_USER:
+ * This is called after do_signal where tracer might have changed callee regs
+ * thus we need to restore the reg file.
+ * Special case handling is required for r25 in case it is used by kernel
+ *  for caching task ptr. Ptrace would have modified on-kernel-stack value of
+ *  r25, which needs to be shoved back into task->thread.user_r25 where from
+ *  Low level exception/ISR return code will retrieve to populate with rest of
+ *  callee reg-file.
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLEE_SAVED_USER
+
+	add     sp, sp, 4   /* skip "callee_regs->stack_place_holder" */
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+	ld.ab   r12, [sp, 4]
+	st      r12, [r25, TASK_THREAD + THREAD_USER_R25]
+#else
+	ld.ab   r25, [sp, 4]
+#endif
+
+	ld.ab   r24, [sp, 4]
+	ld.ab   r23, [sp, 4]
+	ld.ab   r22, [sp, 4]
+	ld.ab   r21, [sp, 4]
+	ld.ab   r20, [sp, 4]
+	ld.ab   r19, [sp, 4]
+	ld.ab   r18, [sp, 4]
+	ld.ab   r17, [sp, 4]
+	ld.ab   r16, [sp, 4]
+	ld.ab   r15, [sp, 4]
+	ld.ab   r14, [sp, 4]
+	ld.ab   r13, [sp, 4]
+.endm
+
+/*--------------------------------------------------------------
+ * Super FAST Restore callee saved regs by simply re-adjusting SP
+ *-------------------------------------------------------------*/
+.macro DISCARD_CALLEE_SAVED_USER
+	add     sp, sp, 14 * 4
+.endm
+
+/*--------------------------------------------------------------
+ * Restore User mode r25 saved in task_struct->thread.user_r25
+ *-------------------------------------------------------------*/
+.macro RESTORE_USER_R25
+	ld  r25, [r25, TASK_THREAD + THREAD_USER_R25]
+.endm
+
+/*-------------------------------------------------------------
+ * given a tsk struct, get to the base of it's kernel mode stack
+ * tsk->thread_info is really a PAGE, whose bottom hoists stack
+ * which grows upwards towards thread_info
+ *------------------------------------------------------------*/
+
+.macro GET_TSK_STACK_BASE tsk, out
+
+	/* Get task->thread_info (this is essentially start of a PAGE) */
+	ld  \out, [\tsk, TASK_THREAD_INFO]
+
+	/* Go to end of page where stack begins (grows upwards) */
+	add2 \out, \out, (THREAD_SIZE - 4)/4   /* one word GUTTER */
+
+.endm
+
+/*--------------------------------------------------------------
+ * Switch to Kernel Mode stack if SP points to User Mode stack
+ *
+ * Entry   : r9 contains pre-IRQ/exception/trap status32
+ * Exit    : SP is set to kernel mode stack pointer
+ *           If CURR_IN_REG, r25 set to "current" task pointer
+ * Clobbers: r9
+ *-------------------------------------------------------------*/
+
+.macro SWITCH_TO_KERNEL_STK
+
+	/* User Mode when this happened ? Yes: Proceed to switch stack */
+	bbit1   r9, STATUS_U_BIT, 88f
+
+	/* OK we were already in kernel mode when this event happened, thus can
+	 * assume SP is kernel mode SP. _NO_ need to do any stack switching
+	 */
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+	/* However....
+	 * If Level 2 Interrupts enabled, we may end up with a corner case:
+	 * 1. User Task executing
+	 * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
+	 * 3. But before it could switch SP from USER to KERNEL stack
+	 *      a L2 IRQ "Interrupts" L1
+	 * Thay way although L2 IRQ happened in Kernel mode, stack is still
+	 * not switched.
+	 * To handle this, we may need to switch stack even if in kernel mode
+	 * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
+	 */
+	brlo sp, VMALLOC_START, 88f
+
+	/* TODO: vineetg:
+	 * We need to be a bit more cautious here. What if a kernel bug in
+	 * L1 ISR, caused SP to go whaco (some small value which looks like
+	 * USER stk) and then we take L2 ISR.
+	 * Above brlo alone would treat it as a valid L1-L2 sceanrio
+	 * instead of shouting alound
+	 * The only feasible way is to make sure this L2 happened in
+	 * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
+	 * L1 ISR before it switches stack
+	 */
+
+#endif
+
+	/* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
+	 * safe-keeping not really needed, but it keeps the epilogue code
+	 * (SP restore) simpler/uniform.
+	 */
+	b.d	77f
+
+	st.a	sp, [sp, -12]	; Make room for orig_r0 and orig_r8
+
+88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
+
+	GET_CURR_TASK_ON_CPU   r9
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+	/* If current task pointer cached in r25, time to
+	 *  -safekeep USER r25 in task->thread_struct->user_r25
+	 *  -load r25 with current task ptr
+	 */
+	st.as	r25, [r9, (TASK_THREAD + THREAD_USER_R25)/4]
+	mov	r25, r9
+#endif
+
+	/* With current tsk in r9, get it's kernel mode stack base */
+	GET_TSK_STACK_BASE  r9, r9
+
+#ifdef PT_REGS_CANARY
+	st	0xabcdabcd, [r9, 0]
+#endif
+
+	/* Save Pre Intr/Exception User SP on kernel stack */
+	st.a    sp, [r9, -12]	; Make room for orig_r0 and orig_r8
+
+	/* CAUTION:
+	 * SP should be set at the very end when we are done with everything
+	 * In case of 2 levels of interrupt we depend on value of SP to assume
+	 * that everything else is done (loading r25 etc)
+	 */
+
+	/* set SP to point to kernel mode stack */
+	mov sp, r9
+
+77: /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
+
+.endm
+
+/*------------------------------------------------------------
+ * "FAKE" a rtie to return from CPU Exception context
+ * This is to re-enable Exceptions within exception
+ * Look at EV_ProtV to see how this is actually used
+ *-------------------------------------------------------------*/
+
+.macro FAKE_RET_FROM_EXCPN  reg
+
+	ld  \reg, [sp, PT_status32]
+	bic  \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
+	bset \reg, \reg, STATUS_L_BIT
+	sr  \reg, [erstatus]
+	mov \reg, 55f
+	sr  \reg, [eret]
+
+	rtie
+55:
+.endm
+
+/*
+ * @reg [OUT] &thread_info of "current"
+ */
+.macro GET_CURR_THR_INFO_FROM_SP  reg
+	and \reg, sp, ~(THREAD_SIZE - 1)
+.endm
+
+/*
+ * @reg [OUT] thread_info->flags of "current"
+ */
+.macro GET_CURR_THR_INFO_FLAGS  reg
+	GET_CURR_THR_INFO_FROM_SP  \reg
+	ld  \reg, [\reg, THREAD_INFO_FLAGS]
+.endm
+
+/*--------------------------------------------------------------
+ * For early Exception Prologue, a core reg is temporarily needed to
+ * code the rest of prolog (stack switching). This is done by stashing
+ * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
+ *
+ * Before saving the full regfile - this reg is restored back, only
+ * to be saved again on kernel mode stack, as part of ptregs.
+ *-------------------------------------------------------------*/
+.macro EXCPN_PROLOG_FREEUP_REG	reg
+#ifdef CONFIG_SMP
+	sr  \reg, [ARC_REG_SCRATCH_DATA0]
+#else
+	st  \reg, [@ex_saved_reg1]
+#endif
+.endm
+
+.macro EXCPN_PROLOG_RESTORE_REG	reg
+#ifdef CONFIG_SMP
+	lr  \reg, [ARC_REG_SCRATCH_DATA0]
+#else
+	ld  \reg, [@ex_saved_reg1]
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
+ * Requires SP to be already switched to kernel mode Stack
+ * sp points to the next free element on the stack at exit of this macro.
+ * Registers are pushed / popped in the order defined in struct ptregs
+ * in asm/ptrace.h
+ * Note that syscalls are implemented via TRAP which is also a exception
+ * from CPU's point of view
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_EXCEPTION   marker
+
+	st      \marker, [sp, 8]
+	st      r0, [sp, 4]    /* orig_r0, needed only for sys calls */
+
+	/* Restore r9 used to code the early prologue */
+	EXCPN_PROLOG_RESTORE_REG  r9
+
+	SAVE_CALLER_SAVED
+	st.a    r26, [sp, -4]   /* gp */
+	st.a    fp, [sp, -4]
+	st.a    blink, [sp, -4]
+	lr	r9, [eret]
+	st.a    r9, [sp, -4]
+	lr	r9, [erstatus]
+	st.a    r9, [sp, -4]
+	st.a    lp_count, [sp, -4]
+	lr	r9, [lp_end]
+	st.a    r9, [sp, -4]
+	lr	r9, [lp_start]
+	st.a    r9, [sp, -4]
+	lr	r9, [erbta]
+	st.a    r9, [sp, -4]
+
+#ifdef PT_REGS_CANARY
+	mov   r9, 0xdeadbeef
+	st    r9, [sp, -4]
+#endif
+
+	/* move up by 1 word to "create" pt_regs->"stack_place_holder" */
+	sub sp, sp, 4
+.endm
+
+/*--------------------------------------------------------------
+ * Save scratch regs for exceptions
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_SYS
+	SAVE_ALL_EXCEPTION  orig_r8_IS_EXCPN
+.endm
+
+/*--------------------------------------------------------------
+ * Save scratch regs for sys calls
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_TRAP
+	/*
+	 * Setup pt_regs->orig_r8.
+	 * Encode syscall number (r8) in upper short word of event type (r9)
+	 * N.B. #1: This is already endian safe (see ptrace.h)
+	 *      #2: Only r9 can be used as scratch as it is already clobbered
+	 *          and it's contents are no longer needed by the latter part
+	 *          of exception prologue
+	 */
+	lsl  r9, r8, 16
+	or   r9, r9, orig_r8_IS_SCALL
+
+	SAVE_ALL_EXCEPTION  r9
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by system call or Exceptions
+ * SP should always be pointing to the next free stack element
+ * when entering this macro.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+.macro RESTORE_ALL_SYS
+
+	add sp, sp, 4       /* hop over unused "pt_regs->stack_place_holder" */
+
+	ld.ab   r9, [sp, 4]
+	sr	r9, [erbta]
+	ld.ab   r9, [sp, 4]
+	sr	r9, [lp_start]
+	ld.ab   r9, [sp, 4]
+	sr	r9, [lp_end]
+	ld.ab   r9, [sp, 4]
+	mov	lp_count, r9
+	ld.ab   r9, [sp, 4]
+	sr	r9, [erstatus]
+	ld.ab   r9, [sp, 4]
+	sr	r9, [eret]
+	ld.ab   blink, [sp, 4]
+	ld.ab   fp, [sp, 4]
+	ld.ab   r26, [sp, 4]    /* gp */
+	RESTORE_CALLER_SAVED
+
+	ld  sp, [sp] /* restore original sp */
+	/* orig_r0 and orig_r8 skipped automatically */
+.endm
+
+
+/*--------------------------------------------------------------
+ * Save all registers used by interrupt handlers.
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_INT1
+
+	/* restore original r9 , saved in int1_saved_reg
+	* It will be saved on stack in macro: SAVE_CALLER_SAVED
+	*/
+#ifdef CONFIG_SMP
+	lr  r9, [ARC_REG_SCRATCH_DATA0]
+#else
+	ld  r9, [@int1_saved_reg]
+#endif
+
+	/* now we are ready to save the remaining context :) */
+	st      orig_r8_IS_IRQ1, [sp, 8]    /* Event Type */
+	st      0, [sp, 4]    /* orig_r0 , N/A for IRQ */
+	SAVE_CALLER_SAVED
+	st.a    r26, [sp, -4]   /* gp */
+	st.a    fp, [sp, -4]
+	st.a    blink, [sp, -4]
+	st.a    ilink1, [sp, -4]
+	lr	r9, [status32_l1]
+	st.a    r9, [sp, -4]
+	st.a    lp_count, [sp, -4]
+	lr	r9, [lp_end]
+	st.a    r9, [sp, -4]
+	lr	r9, [lp_start]
+	st.a    r9, [sp, -4]
+	lr	r9, [bta_l1]
+	st.a    r9, [sp, -4]
+
+#ifdef PT_REGS_CANARY
+	mov   r9, 0xdeadbee1
+	st    r9, [sp, -4]
+#endif
+	/* move up by 1 word to "create" pt_regs->"stack_place_holder" */
+	sub sp, sp, 4
+.endm
+
+.macro SAVE_ALL_INT2
+
+	/* TODO-vineetg: SMP we can't use global nor can we use
+	*   SCRATCH0 as we do for int1 because while int1 is using
+	*   it, int2 can come
+	*/
+	/* retsore original r9 , saved in sys_saved_r9 */
+	ld  r9, [@int2_saved_reg]
+
+	/* now we are ready to save the remaining context :) */
+	st      orig_r8_IS_IRQ2, [sp, 8]    /* Event Type */
+	st      0, [sp, 4]    /* orig_r0 , N/A for IRQ */
+	SAVE_CALLER_SAVED
+	st.a    r26, [sp, -4]   /* gp */
+	st.a    fp, [sp, -4]
+	st.a    blink, [sp, -4]
+	st.a    ilink2, [sp, -4]
+	lr	r9, [status32_l2]
+	st.a    r9, [sp, -4]
+	st.a    lp_count, [sp, -4]
+	lr	r9, [lp_end]
+	st.a    r9, [sp, -4]
+	lr	r9, [lp_start]
+	st.a    r9, [sp, -4]
+	lr	r9, [bta_l2]
+	st.a    r9, [sp, -4]
+
+#ifdef PT_REGS_CANARY
+	mov   r9, 0xdeadbee2
+	st    r9, [sp, -4]
+#endif
+
+	/* move up by 1 word to "create" pt_regs->"stack_place_holder" */
+	sub sp, sp, 4
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by interrupt handlers.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+
+.macro RESTORE_ALL_INT1
+	add sp, sp, 4       /* hop over unused "pt_regs->stack_place_holder" */
+
+	ld.ab   r9, [sp, 4] /* Actual reg file */
+	sr	r9, [bta_l1]
+	ld.ab   r9, [sp, 4]
+	sr	r9, [lp_start]
+	ld.ab   r9, [sp, 4]
+	sr	r9, [lp_end]
+	ld.ab   r9, [sp, 4]
+	mov	lp_count, r9
+	ld.ab   r9, [sp, 4]
+	sr	r9, [status32_l1]
+	ld.ab   r9, [sp, 4]
+	mov	ilink1, r9
+	ld.ab   blink, [sp, 4]
+	ld.ab   fp, [sp, 4]
+	ld.ab   r26, [sp, 4]    /* gp */
+	RESTORE_CALLER_SAVED
+
+	ld  sp, [sp] /* restore original sp */
+	/* orig_r0 and orig_r8 skipped automatically */
+.endm
+
+.macro RESTORE_ALL_INT2
+	add sp, sp, 4       /* hop over unused "pt_regs->stack_place_holder" */
+
+	ld.ab   r9, [sp, 4]
+	sr	r9, [bta_l2]
+	ld.ab   r9, [sp, 4]
+	sr	r9, [lp_start]
+	ld.ab   r9, [sp, 4]
+	sr	r9, [lp_end]
+	ld.ab   r9, [sp, 4]
+	mov	lp_count, r9
+	ld.ab   r9, [sp, 4]
+	sr	r9, [status32_l2]
+	ld.ab   r9, [sp, 4]
+	mov	ilink2, r9
+	ld.ab   blink, [sp, 4]
+	ld.ab   fp, [sp, 4]
+	ld.ab   r26, [sp, 4]    /* gp */
+	RESTORE_CALLER_SAVED
+
+	ld  sp, [sp] /* restore original sp */
+	/* orig_r0 and orig_r8 skipped automatically */
+
+.endm
+
+
+/* Get CPU-ID of this core */
+.macro  GET_CPU_ID  reg
+	lr  \reg, [identity]
+	lsr \reg, \reg, 8
+	bmsk \reg, \reg, 7
+.endm
+
+#ifdef CONFIG_SMP
+
+/*-------------------------------------------------
+ * Retrieve the current running task on this CPU
+ * 1. Determine curr CPU id.
+ * 2. Use it to index into _current_task[ ]
+ */
+.macro  GET_CURR_TASK_ON_CPU   reg
+	GET_CPU_ID  \reg
+	ld.as  \reg, [@_current_task, \reg]
+.endm
+
+/*-------------------------------------------------
+ * Save a new task as the "current" task on this CPU
+ * 1. Determine curr CPU id.
+ * 2. Use it to index into _current_task[ ]
+ *
+ * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
+ * because ST r0, [r1, offset] can ONLY have s9 @offset
+ * while   LD can take s9 (4 byte insn) or LIMM (8 byte insn)
+ */
+
+.macro  SET_CURR_TASK_ON_CPU    tsk, tmp
+	GET_CPU_ID  \tmp
+	add2 \tmp, @_current_task, \tmp
+	st   \tsk, [\tmp]
+#ifdef CONFIG_ARC_CURR_IN_REG
+	mov r25, \tsk
+#endif
+
+.endm
+
+
+#else   /* Uniprocessor implementation of macros */
+
+.macro  GET_CURR_TASK_ON_CPU    reg
+	ld  \reg, [@_current_task]
+.endm
+
+.macro  SET_CURR_TASK_ON_CPU    tsk, tmp
+	st  \tsk, [@_current_task]
+#ifdef CONFIG_ARC_CURR_IN_REG
+	mov r25, \tsk
+#endif
+.endm
+
+#endif /* SMP / UNI */
+
+/* ------------------------------------------------------------------
+ * Get the ptr to some field of Current Task at @off in task struct
+ *  -Uses r25 for Current task ptr if that is enabled
+ */
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+.macro GET_CURR_TASK_FIELD_PTR  off,  reg
+	add \reg, r25, \off
+.endm
+
+#else
+
+.macro GET_CURR_TASK_FIELD_PTR  off,  reg
+	GET_CURR_TASK_ON_CPU  \reg
+	add \reg, \reg, \off
+.endm
+
+#endif	/* CONFIG_ARC_CURR_IN_REG */
+
+#endif  /* __ASSEMBLY__ */
+
+#endif  /* __ASM_ARC_ENTRY_H */

+ 15 - 0
arch/arc/include/asm/exec.h

@@ -0,0 +1,15 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_EXEC_H
+#define __ASM_ARC_EXEC_H
+
+/* Align to 16b */
+#define arch_align_stack(p) ((unsigned long)(p) & ~0xf)
+
+#endif

+ 151 - 0
arch/arc/include/asm/futex.h

@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: August 2010: From Android kernel work
+ */
+
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/preempt.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
+							\
+	__asm__ __volatile__(				\
+	"1:	ld  %1, [%2]			\n"	\
+		insn				"\n"	\
+	"2:	st  %0, [%2]			\n"	\
+	"	mov %0, 0			\n"	\
+	"3:					\n"	\
+	"	.section .fixup,\"ax\"		\n"	\
+	"	.align  4			\n"	\
+	"4:	mov %0, %4			\n"	\
+	"	b   3b				\n"	\
+	"	.previous			\n"	\
+	"	.section __ex_table,\"a\"	\n"	\
+	"	.align  4			\n"	\
+	"	.word   1b, 4b			\n"	\
+	"	.word   2b, 4b			\n"	\
+	"	.previous			\n"	\
+							\
+	: "=&r" (ret), "=&r" (oldval)			\
+	: "r" (uaddr), "r" (oparg), "ir" (-EFAULT)	\
+	: "cc", "memory")
+
+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+{
+	int op = (encoded_op >> 28) & 7;
+	int cmp = (encoded_op >> 24) & 15;
+	int oparg = (encoded_op << 8) >> 20;
+	int cmparg = (encoded_op << 20) >> 20;
+	int oldval = 0, ret;
+
+	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+		oparg = 1 << oparg;
+
+	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+		return -EFAULT;
+
+	pagefault_disable();	/* implies preempt_disable() */
+
+	switch (op) {
+	case FUTEX_OP_SET:
+		__futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
+		break;
+	case FUTEX_OP_ADD:
+		__futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
+		break;
+	case FUTEX_OP_OR:
+		__futex_atomic_op("or  %0, %1, %3", ret, oldval, uaddr, oparg);
+		break;
+	case FUTEX_OP_ANDN:
+		__futex_atomic_op("bic %0, %1, %3", ret, oldval, uaddr, oparg);
+		break;
+	case FUTEX_OP_XOR:
+		__futex_atomic_op("xor %0, %1, %3", ret, oldval, uaddr, oparg);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	pagefault_enable();	/* subsumes preempt_enable() */
+
+	if (!ret) {
+		switch (cmp) {
+		case FUTEX_OP_CMP_EQ:
+			ret = (oldval == cmparg);
+			break;
+		case FUTEX_OP_CMP_NE:
+			ret = (oldval != cmparg);
+			break;
+		case FUTEX_OP_CMP_LT:
+			ret = (oldval < cmparg);
+			break;
+		case FUTEX_OP_CMP_GE:
+			ret = (oldval >= cmparg);
+			break;
+		case FUTEX_OP_CMP_LE:
+			ret = (oldval <= cmparg);
+			break;
+		case FUTEX_OP_CMP_GT:
+			ret = (oldval > cmparg);
+			break;
+		default:
+			ret = -ENOSYS;
+		}
+	}
+	return ret;
+}
+
+/* Compare-xchg with preemption disabled.
+ *  Notes:
+ *      -Best-Effort: Exchg happens only if compare succeeds.
+ *          If compare fails, returns; leaving retry/looping to upper layers
+ *      -successful cmp-xchg: return orig value in @addr (same as cmp val)
+ *      -Compare fails: return orig value in @addr
+ *      -user access r/w fails: return -EFAULT
+ */
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
+					u32 newval)
+{
+	u32 val;
+
+	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+		return -EFAULT;
+
+	pagefault_disable();	/* implies preempt_disable() */
+
+	/* TBD : can use llock/scond */
+	__asm__ __volatile__(
+	"1:	ld    %0, [%3]	\n"
+	"	brne  %0, %1, 3f	\n"
+	"2:	st    %2, [%3]	\n"
+	"3:	\n"
+	"	.section .fixup,\"ax\"	\n"
+	"4:	mov %0, %4	\n"
+	"	b   3b	\n"
+	"	.previous	\n"
+	"	.section __ex_table,\"a\"	\n"
+	"	.align  4	\n"
+	"	.word   1b, 4b	\n"
+	"	.word   2b, 4b	\n"
+	"	.previous\n"
+	: "=&r"(val)
+	: "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
+	: "cc", "memory");
+
+	pagefault_enable();	/* subsumes preempt_enable() */
+
+	*uval = val;
+	return val;
+}
+
+#endif

+ 105 - 0
arch/arc/include/asm/io.h

@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_IO_H
+#define _ASM_ARC_IO_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+
+#define PCI_IOBASE ((void __iomem *)0)
+
+extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
+extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+				  unsigned long flags);
+extern void iounmap(const void __iomem *addr);
+
+#define ioremap_nocache(phy, sz)	ioremap(phy, sz)
+#define ioremap_wc(phy, sz)		ioremap(phy, sz)
+
+/* Change struct page to physical address */
+#define page_to_phys(page)		(page_to_pfn(page) << PAGE_SHIFT)
+
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+	u8 b;
+
+	__asm__ __volatile__(
+	"	ldb%U1 %0, %1	\n"
+	: "=r" (b)
+	: "m" (*(volatile u8 __force *)addr)
+	: "memory");
+
+	return b;
+}
+
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+	u16 s;
+
+	__asm__ __volatile__(
+	"	ldw%U1 %0, %1	\n"
+	: "=r" (s)
+	: "m" (*(volatile u16 __force *)addr)
+	: "memory");
+
+	return s;
+}
+
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+	u32 w;
+
+	__asm__ __volatile__(
+	"	ld%U1 %0, %1	\n"
+	: "=r" (w)
+	: "m" (*(volatile u32 __force *)addr)
+	: "memory");
+
+	return w;
+}
+
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
+{
+	__asm__ __volatile__(
+	"	stb%U1 %0, %1	\n"
+	:
+	: "r" (b), "m" (*(volatile u8 __force *)addr)
+	: "memory");
+}
+
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 s, volatile void __iomem *addr)
+{
+	__asm__ __volatile__(
+	"	stw%U1 %0, %1	\n"
+	:
+	: "r" (s), "m" (*(volatile u16 __force *)addr)
+	: "memory");
+
+}
+
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 w, volatile void __iomem *addr)
+{
+	__asm__ __volatile__(
+	"	st%U1 %0, %1	\n"
+	:
+	: "r" (w), "m" (*(volatile u32 __force *)addr)
+	: "memory");
+
+}
+
+#include <asm-generic/io.h>
+
+#endif /* _ASM_ARC_IO_H */

+ 25 - 0
arch/arc/include/asm/irq.h

@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_IRQ_H
+#define __ASM_ARC_IRQ_H
+
+#define NR_IRQS		32
+
+/* Platform Independent IRQs */
+#define TIMER0_IRQ      3
+#define TIMER1_IRQ      4
+
+#include <asm-generic/irq.h>
+
+extern void __init arc_init_IRQ(void);
+extern int __init get_hw_config_num_irq(void);
+
+void __cpuinit arc_local_timer_setup(unsigned int cpu);
+
+#endif

+ 153 - 0
arch/arc/include/asm/irqflags.h

@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_IRQFLAGS_H
+#define __ASM_ARC_IRQFLAGS_H
+
+/* vineetg: March 2010 : local_irq_save( ) optimisation
+ *  -Remove explicit mov of current status32 into reg, that is not needed
+ *  -Use BIC  insn instead of INVERTED + AND
+ *  -Conditionally disable interrupts (if they are not enabled, don't disable)
+*/
+
+#ifdef __KERNEL__
+
+#include <asm/arcregs.h>
+
+#ifndef __ASSEMBLY__
+
+/******************************************************************
+ * IRQ Control Macros
+ ******************************************************************/
+
+/*
+ * Save IRQ state and disable IRQs
+ */
+static inline long arch_local_irq_save(void)
+{
+	unsigned long temp, flags;
+
+	__asm__ __volatile__(
+	"	lr  %1, [status32]	\n"
+	"	bic %0, %1, %2		\n"
+	"	and.f 0, %1, %2	\n"
+	"	flag.nz %0		\n"
+	: "=r"(temp), "=r"(flags)
+	: "n"((STATUS_E1_MASK | STATUS_E2_MASK))
+	: "cc");
+
+	return flags;
+}
+
+/*
+ * restore saved IRQ state
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+
+	__asm__ __volatile__(
+	"	flag %0			\n"
+	:
+	: "r"(flags));
+}
+
+/*
+ * Unconditionally Enable IRQs
+ */
+extern void arch_local_irq_enable(void);
+
+/*
+ * Unconditionally Disable IRQs
+ */
+static inline void arch_local_irq_disable(void)
+{
+	unsigned long temp;
+
+	__asm__ __volatile__(
+	"	lr  %0, [status32]	\n"
+	"	and %0, %0, %1		\n"
+	"	flag %0			\n"
+	: "=&r"(temp)
+	: "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)));
+}
+
+/*
+ * save IRQ state
+ */
+static inline long arch_local_save_flags(void)
+{
+	unsigned long temp;
+
+	__asm__ __volatile__(
+	"	lr  %0, [status32]	\n"
+	: "=&r"(temp));
+
+	return temp;
+}
+
+/*
+ * Query IRQ state
+ */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+	return !(flags & (STATUS_E1_MASK
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+			| STATUS_E2_MASK
+#endif
+		));
+}
+
+static inline int arch_irqs_disabled(void)
+{
+	return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+static inline void arch_mask_irq(unsigned int irq)
+{
+	unsigned int ienb;
+
+	ienb = read_aux_reg(AUX_IENABLE);
+	ienb &= ~(1 << irq);
+	write_aux_reg(AUX_IENABLE, ienb);
+}
+
+static inline void arch_unmask_irq(unsigned int irq)
+{
+	unsigned int ienb;
+
+	ienb = read_aux_reg(AUX_IENABLE);
+	ienb |= (1 << irq);
+	write_aux_reg(AUX_IENABLE, ienb);
+}
+
+#else
+
+.macro IRQ_DISABLE  scratch
+	lr	\scratch, [status32]
+	bic	\scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+	flag	\scratch
+.endm
+
+.macro IRQ_DISABLE_SAVE  scratch, save
+	lr	\scratch, [status32]
+	mov	\save, \scratch		/* Make a copy */
+	bic	\scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+	flag	\scratch
+.endm
+
+.macro IRQ_ENABLE  scratch
+	lr	\scratch, [status32]
+	or	\scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+	flag	\scratch
+.endm
+
+#endif	/* __ASSEMBLY__ */
+
+#endif	/* KERNEL */
+
+#endif

+ 19 - 0
arch/arc/include/asm/kdebug.h

@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_KDEBUG_H
+#define _ASM_ARC_KDEBUG_H
+
+enum die_val {
+	DIE_UNUSED,
+	DIE_TRAP,
+	DIE_IERR,
+	DIE_OOPS
+};
+
+#endif

+ 61 - 0
arch/arc/include/asm/kgdb.h

@@ -0,0 +1,61 @@
+/*
+ * kgdb support for ARC
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_KGDB_H__
+#define __ARC_KGDB_H__
+
+#ifdef CONFIG_KGDB
+
+#include <asm/user.h>
+
+/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
+ * register API yet */
+#undef DBG_MAX_REG_NUM
+
+#define GDB_MAX_REGS		39
+
+#define BREAK_INSTR_SIZE	2
+#define CACHE_FLUSH_IS_SAFE	1
+#define NUMREGBYTES		(GDB_MAX_REGS * 4)
+#define BUFMAX			2048
+
+static inline void arch_kgdb_breakpoint(void)
+{
+	__asm__ __volatile__ ("trap_s	0x4\n");
+}
+
+extern void kgdb_trap(struct pt_regs *regs, int param);
+
+enum arc700_linux_regnums {
+	_R0		= 0,
+	_R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13,
+	_R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24,
+	_R25, _R26,
+	_BTA		= 27,
+	_LP_START	= 28,
+	_LP_END		= 29,
+	_LP_COUNT	= 30,
+	_STATUS32	= 31,
+	_BLINK		= 32,
+	_FP		= 33,
+	__SP		= 34,
+	_EFA		= 35,
+	_RET		= 36,
+	_ORIG_R8	= 37,
+	_STOP_PC	= 38
+};
+
+#else
+static inline void kgdb_trap(struct pt_regs *regs, int param)
+{
+}
+#endif
+
+#endif	/* __ARC_KGDB_H__ */

+ 62 - 0
arch/arc/include/asm/kprobes.h

@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ARC_KPROBES_H
+#define _ARC_KPROBES_H
+
+#ifdef CONFIG_KPROBES
+
+typedef u16 kprobe_opcode_t;
+
+#define UNIMP_S_INSTRUCTION 0x79e0
+#define TRAP_S_2_INSTRUCTION 0x785e
+
+#define MAX_INSN_SIZE   8
+#define MAX_STACK_SIZE  64
+
+struct arch_specific_insn {
+	int is_short;
+	kprobe_opcode_t *t1_addr, *t2_addr;
+	kprobe_opcode_t t1_opcode, t2_opcode;
+};
+
+#define flush_insn_slot(p)  do {  } while (0)
+
+#define kretprobe_blacklist_size    0
+
+struct kprobe;
+
+void arch_remove_kprobe(struct kprobe *p);
+
+int kprobe_exceptions_notify(struct notifier_block *self,
+			     unsigned long val, void *data);
+
+struct prev_kprobe {
+	struct kprobe *kp;
+	unsigned long status;
+};
+
+struct kprobe_ctlblk {
+	unsigned int kprobe_status;
+	struct pt_regs jprobe_saved_regs;
+	char jprobes_stack[MAX_STACK_SIZE];
+	struct prev_kprobe prev_kprobe;
+};
+
+int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
+void kretprobe_trampoline(void);
+void trap_is_kprobe(unsigned long cause, unsigned long address,
+			   struct pt_regs *regs);
+#else
+static void trap_is_kprobe(unsigned long cause, unsigned long address,
+			   struct pt_regs *regs)
+{
+}
+#endif
+
+#endif

+ 63 - 0
arch/arc/include/asm/linkage.h

@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#ifdef __ASSEMBLY__
+
+/* Can't use the ENTRY macro in linux/linkage.h
+ * gas considers ';' as comment vs. newline
+ */
+.macro ARC_ENTRY name
+	.global \name
+	.align 4
+	\name:
+.endm
+
+.macro ARC_EXIT name
+#define ASM_PREV_SYM_ADDR(name)  .-##name
+	.size \ name, ASM_PREV_SYM_ADDR(\name)
+.endm
+
+/* annotation for data we want in DCCM - if enabled in .config */
+.macro ARCFP_DATA nm
+#ifdef CONFIG_ARC_HAS_DCCM
+	.section .data.arcfp
+#else
+	.section .data
+#endif
+	.global \nm
+.endm
+
+/* annotation for data we want in DCCM - if enabled in .config */
+.macro ARCFP_CODE
+#ifdef CONFIG_ARC_HAS_ICCM
+	.section .text.arcfp, "ax",@progbits
+#else
+	.section .text, "ax",@progbits
+#endif
+.endm
+
+#else	/* !__ASSEMBLY__ */
+
+#ifdef CONFIG_ARC_HAS_ICCM
+#define __arcfp_code __attribute__((__section__(".text.arcfp")))
+#else
+#define __arcfp_code __attribute__((__section__(".text")))
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCCM
+#define __arcfp_data __attribute__((__section__(".data.arcfp")))
+#else
+#define __arcfp_data __attribute__((__section__(".data")))
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif

+ 87 - 0
arch/arc/include/asm/mach_desc.h

@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * based on METAG mach/arch.h (which in turn was based on ARM)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MACH_DESC_H_
+#define _ASM_ARC_MACH_DESC_H_
+
+/**
+ * struct machine_desc - Board specific callbacks, called from ARC common code
+ *	Provided by each ARC board using MACHINE_START()/MACHINE_END(), so
+ *	a multi-platform kernel builds with array of such descriptors.
+ *	We extend the early DT scan to also match the DT's "compatible" string
+ *	against the @dt_compat of all such descriptors, and one with highest
+ *	"DT score" is selected as global @machine_desc.
+ *
+ * @name:		Board/SoC name
+ * @dt_compat:		Array of device tree 'compatible' strings
+ * 			(XXX: although only 1st entry is looked at)
+ * @init_early:		Very early callback [called from setup_arch()]
+ * @init_irq:		setup external IRQ controllers [called from init_IRQ()]
+ * @init_smp:		for each CPU (e.g. setup IPI)
+ * 			[(M):init_IRQ(), (o):start_kernel_secondary()]
+ * @init_time:		platform specific clocksource/clockevent registration
+ * 			[called from time_init()]
+ * @init_machine:	arch initcall level callback (e.g. populate static
+ * 			platform devices or parse Devicetree)
+ * @init_late:		Late initcall level callback
+ *
+ */
+struct machine_desc {
+	const char		*name;
+	const char		**dt_compat;
+
+	void			(*init_early)(void);
+	void			(*init_irq)(void);
+#ifdef CONFIG_SMP
+	void			(*init_smp)(unsigned int);
+#endif
+	void			(*init_time)(void);
+	void			(*init_machine)(void);
+	void			(*init_late)(void);
+
+};
+
+/*
+ * Current machine - only accessible during boot.
+ */
+extern struct machine_desc *machine_desc;
+
+/*
+ * Machine type table - also only accessible during boot
+ */
+extern struct machine_desc __arch_info_begin[], __arch_info_end[];
+#define for_each_machine_desc(p)			\
+	for (p = __arch_info_begin; p < __arch_info_end; p++)
+
+static inline struct machine_desc *default_machine_desc(void)
+{
+	/* the default machine is the last one linked in */
+	if (__arch_info_end - 1 < __arch_info_begin)
+		return NULL;
+	return __arch_info_end - 1;
+}
+
+/*
+ * Set of macros to define architecture features.
+ * This is built into a table by the linker.
+ */
+#define MACHINE_START(_type, _name)			\
+static const struct machine_desc __mach_desc_##_type	\
+__used							\
+__attribute__((__section__(".arch.info.init"))) = {	\
+	.name		= _name,
+
+#define MACHINE_END				\
+};
+
+extern struct machine_desc *setup_machine_fdt(void *dt);
+extern void __init copy_devtree(void);
+
+#endif

+ 23 - 0
arch/arc/include/asm/mmu.h

@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MMU_H
+#define _ASM_ARC_MMU_H
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+	unsigned long asid;	/* Pvt Addr-Space ID for mm */
+#ifdef CONFIG_ARC_TLB_DBG
+	struct task_struct *tsk;
+#endif
+} mm_context_t;
+
+#endif
+
+#endif

+ 213 - 0
arch/arc/include/asm/mmu_context.h

@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ *  -Refactored get_new_mmu_context( ) to only handle live-mm.
+ *   retiring-mm handled in other hooks
+ *
+ * Vineetg: March 25th, 2008: Bug #92690
+ *  -Major rewrite of Core ASID allocation routine get_new_mmu_context
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_MMU_CONTEXT_H
+#define _ASM_ARC_MMU_CONTEXT_H
+
+#include <asm/arcregs.h>
+#include <asm/tlb.h>
+
+#include <asm-generic/mm_hooks.h>
+
+/*		ARC700 ASID Management
+ *
+ * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries
+ * with same vaddr (different tasks) to co-exit. This provides for
+ * "Fast Context Switch" i.e. no TLB flush on ctxt-switch
+ *
+ * Linux assigns each task a unique ASID. A simple round-robin allocation
+ * of H/w ASID is done using software tracker @asid_cache.
+ * When it reaches max 255, the allocation cycle starts afresh by flushing
+ * the entire TLB and wrapping ASID back to zero.
+ *
+ * For book-keeping, Linux uses a couple of data-structures:
+ *  -mm_struct has an @asid field to keep a note of task's ASID (needed at the
+ *   time of say switch_mm( )
+ *  -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping,
+ *  given an ASID, finding the mm struct associated.
+ *
+ * The round-robin allocation algorithm allows for ASID stealing.
+ * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was
+ * already assigned to another (switched-out) task. Obviously the prev owner
+ * is marked with an invalid ASID to make it request for a new ASID when it
+ * gets scheduled next time. However its TLB entries (with ASID "x") could
+ * exist, which must be cleared before the same ASID is used by the new owner.
+ * Flushing them would be plausible but costly solution. Instead we force a
+ * allocation policy quirk, which ensures that a stolen ASID won't have any
+ * TLB entries associates, alleviating the need to flush.
+ * The quirk essentially is not allowing ASID allocated in prev cycle
+ * to be used past a roll-over in the next cycle.
+ * When this happens (i.e. task ASID > asid tracker), task needs to refresh
+ * its ASID, aligning it to current value of tracker. If the task doesn't get
+ * scheduled past a roll-over, hence its ASID is not yet realigned with
+ * tracker, such ASID is anyways safely reusable because it is
+ * gauranteed that TLB entries with that ASID wont exist.
+ */
+
+#define FIRST_ASID  0
+#define MAX_ASID    255			/* 8 bit PID field in PID Aux reg */
+#define NO_ASID     (MAX_ASID + 1)	/* ASID Not alloc to mmu ctxt */
+#define NUM_ASID    ((MAX_ASID - FIRST_ASID) + 1)
+
+/* ASID to mm struct mapping */
+extern struct mm_struct *asid_mm_map[NUM_ASID + 1];
+
+extern int asid_cache;
+
+/*
+ * Assign a new ASID to task. If the task already has an ASID, it is
+ * relinquished.
+ */
+static inline void get_new_mmu_context(struct mm_struct *mm)
+{
+	struct mm_struct *prev_owner;
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	/*
+	 * Relinquish the currently owned ASID (if any).
+	 * Doing unconditionally saves a cmp-n-branch; for already unused
+	 * ASID slot, the value was/remains NULL
+	 */
+	asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL;
+
+	/* move to new ASID */
+	if (++asid_cache > MAX_ASID) {	/* ASID roll-over */
+		asid_cache = FIRST_ASID;
+		flush_tlb_all();
+	}
+
+	/*
+	 * Is next ASID already owned by some-one else (we are stealing it).
+	 * If so, let the orig owner be aware of this, so when it runs, it
+	 * asks for a brand new ASID. This would only happen for a long-lived
+	 * task with ASID from prev allocation cycle (before ASID roll-over).
+	 *
+	 * This might look wrong - if we are re-using some other task's ASID,
+	 * won't we use it's stale TLB entries too. Actually switch_mm( ) takes
+	 * care of such a case: it ensures that task with ASID from prev alloc
+	 * cycle, when scheduled will refresh it's ASID: see switch_mm( ) below
+	 * The stealing scenario described here will only happen if that task
+	 * didn't get a chance to refresh it's ASID - implying stale entries
+	 * won't exist.
+	 */
+	prev_owner = asid_mm_map[asid_cache];
+	if (prev_owner)
+		prev_owner->context.asid = NO_ASID;
+
+	/* Assign new ASID to tsk */
+	asid_mm_map[asid_cache] = mm;
+	mm->context.asid = asid_cache;
+
+#ifdef CONFIG_ARC_TLB_DBG
+	pr_info("ARC_TLB_DBG: NewMM=0x%x OldMM=0x%x task_struct=0x%x Task: %s,"
+	       " pid:%u, assigned asid:%lu\n",
+	       (unsigned int)mm, (unsigned int)prev_owner,
+	       (unsigned int)(mm->context.tsk), (mm->context.tsk)->comm,
+	       (mm->context.tsk)->pid, mm->context.asid);
+#endif
+
+	write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE);
+
+	local_irq_restore(flags);
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+	mm->context.asid = NO_ASID;
+#ifdef CONFIG_ARC_TLB_DBG
+	mm->context.tsk = tsk;
+#endif
+	return 0;
+}
+
+/* Prepare the MMU for task: setup PID reg with allocated ASID
+    If task doesn't have an ASID (never alloc or stolen, get a new ASID)
+*/
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+			     struct task_struct *tsk)
+{
+#ifndef CONFIG_SMP
+	/* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
+	write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
+#endif
+
+	/*
+	 * Get a new ASID if task doesn't have a valid one. Possible when
+	 *  -task never had an ASID (fresh after fork)
+	 *  -it's ASID was stolen - past an ASID roll-over.
+	 *  -There's a third obscure scenario (if this task is running for the
+	 *   first time afer an ASID rollover), where despite having a valid
+	 *   ASID, we force a get for new ASID - see comments at top.
+	 *
+	 * Both the non-alloc scenario and first-use-after-rollover can be
+	 * detected using the single condition below:  NO_ASID = 256
+	 * while asid_cache is always a valid ASID value (0-255).
+	 */
+	if (next->context.asid > asid_cache) {
+		get_new_mmu_context(next);
+	} else {
+		/*
+		 * XXX: This will never happen given the chks above
+		 * BUG_ON(next->context.asid > MAX_ASID);
+		 */
+		write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE);
+	}
+
+}
+
+static inline void destroy_context(struct mm_struct *mm)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	asid_mm_map[mm->context.asid] = NULL;
+	mm->context.asid = NO_ASID;
+
+	local_irq_restore(flags);
+}
+
+/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
+ * for retiring-mm. However destroy_context( ) still needs to do that because
+ * between mm_release( ) = >deactive_mm( ) and
+ * mmput => .. => __mmdrop( ) => destroy_context( )
+ * there is a good chance that task gets sched-out/in, making it's ASID valid
+ * again (this teased me for a whole day).
+ */
+#define deactivate_mm(tsk, mm)   do { } while (0)
+
+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+#ifndef CONFIG_SMP
+	write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
+#endif
+
+	/* Unconditionally get a new ASID */
+	get_new_mmu_context(next);
+
+}
+
+#define enter_lazy_tlb(mm, tsk)
+
+#endif /* __ASM_ARC_MMU_CONTEXT_H */

+ 28 - 0
arch/arc/include/asm/module.h

@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+
+ */
+
+#ifndef _ASM_ARC_MODULE_H
+#define _ASM_ARC_MODULE_H
+
+#include <asm-generic/module.h>
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+struct mod_arch_specific {
+	void *unw_info;
+	int unw_sec_idx;
+};
+#endif
+
+#define MODULE_PROC_FAMILY "ARC700"
+
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
+
+#endif /* _ASM_ARC_MODULE_H */

+ 18 - 0
arch/arc/include/asm/mutex.h

@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * xchg() based mutex fast path maintains a state of 0 or 1, as opposed to
+ * atomic dec based which can "count" any number of lock contenders.
+ * This ideally needs to be fixed in core, but for now switching to dec ver.
+ */
+#if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2)
+#include <asm-generic/mutex-dec.h>
+#else
+#include <asm-generic/mutex-xchg.h>
+#endif

+ 109 - 0
arch/arc/include/asm/page.h

@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARC_PAGE_H
+#define __ASM_ARC_PAGE_H
+
+#include <uapi/asm/page.h>
+
+
+#ifndef __ASSEMBLY__
+
+#define get_user_page(vaddr)		__get_free_page(GFP_KERNEL)
+#define free_user_page(page, addr)	free_page(addr)
+
+/* TBD: for now don't worry about VIPT D$ aliasing */
+#define clear_page(paddr)		memset((paddr), 0, PAGE_SIZE)
+#define copy_page(to, from)		memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(addr, vaddr, pg)	clear_page(addr)
+#define copy_user_page(vto, vfrom, vaddr, pg)	copy_page(vto, vfrom)
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct {
+	unsigned long pte;
+} pte_t;
+typedef struct {
+	unsigned long pgd;
+} pgd_t;
+typedef struct {
+	unsigned long pgprot;
+} pgprot_t;
+typedef unsigned long pgtable_t;
+
+#define pte_val(x)      ((x).pte)
+#define pgd_val(x)      ((x).pgd)
+#define pgprot_val(x)   ((x).pgprot)
+
+#define __pte(x)        ((pte_t) { (x) })
+#define __pgd(x)        ((pgd_t) { (x) })
+#define __pgprot(x)     ((pgprot_t) { (x) })
+
+#define pte_pgprot(x) __pgprot(pte_val(x))
+
+#else /* !STRICT_MM_TYPECHECKS */
+
+typedef unsigned long pte_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+typedef unsigned long pgtable_t;
+
+#define pte_val(x)	(x)
+#define pgd_val(x)	(x)
+#define pgprot_val(x)	(x)
+#define __pte(x)	(x)
+#define __pgprot(x)	(x)
+#define pte_pgprot(x)	(x)
+
+#endif
+
+#define ARCH_PFN_OFFSET     (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT)
+
+#define pfn_valid(pfn)      (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+
+/*
+ * __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
+ *
+ * These macros have historically been misnamed
+ * virt here means link-address/program-address as embedded in object code.
+ * So if kernel img is linked at 0x8000_0000 onwards, 0x8010_0000 will be
+ * 128th page, and virt_to_page( ) will return the struct page corresp to it.
+ * mem_map[ ] is an array of struct page for each page frame in the system
+ *
+ * Independent of where linux is linked at, link-addr = physical address
+ * So the old macro  __pa = vaddr + PAGE_OFFSET - CONFIG_LINUX_LINK_BASE
+ * would have been wrong in case kernel is not at 0x8zs
+ */
+#define __pa(vaddr)  ((unsigned long)vaddr)
+#define __va(paddr)  ((void *)((unsigned long)(paddr)))
+
+#define virt_to_page(kaddr)	\
+	(mem_map + ((__pa(kaddr) - CONFIG_LINUX_LINK_BASE) >> PAGE_SHIFT))
+
+#define virt_addr_valid(kaddr)  pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+/* Default Permissions for page, used in mmap.c */
+#ifdef CONFIG_ARC_STACK_NONEXEC
+#define VM_DATA_DEFAULT_FLAGS   (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
+#else
+#define VM_DATA_DEFAULT_FLAGS   (VM_READ | VM_WRITE | VM_EXEC | \
+				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#endif
+
+#define WANT_PAGE_VIRTUAL   1
+
+#include <asm-generic/memory_model.h>   /* page_to_pfn, pfn_to_page */
+#include <asm-generic/getorder.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif

+ 13 - 0
arch/arc/include/asm/perf_event.h

@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ASM_PERF_EVENT_H
+#define __ASM_PERF_EVENT_H
+
+#endif /* __ASM_PERF_EVENT_H */

+ 134 - 0
arch/arc/include/asm/pgalloc.h

@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: June 2011
+ *  -"/proc/meminfo | grep PageTables" kept on increasing
+ *   Recently added pgtable dtor was not getting called.
+ *
+ * vineetg: May 2011
+ *  -Variable pg-sz means that Page Tables could be variable sized themselves
+ *    So calculate it based on addr traversal split [pgd-bits:pte-bits:xxx]
+ *  -Page Table size capped to max 1 to save memory - hence verified.
+ *  -Since these deal with constants, gcc compile-time optimizes them.
+ *
+ * vineetg: Nov 2010
+ *  -Added pgtable ctor/dtor used for pgtable mem accounting
+ *
+ * vineetg: April 2010
+ *  -Switched pgtable_t from being struct page * to unsigned long
+ *      =Needed so that Page Table allocator (pte_alloc_one) is not forced to
+ *       to deal with struct page. Thay way in future we can make it allocate
+ *       multiple PG Tbls in one Page Frame
+ *      =sweet side effect is avoiding calls to ugly page_address( ) from the
+ *       pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate
+ *
+ *  Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_PGALLOC_H
+#define _ASM_ARC_PGALLOC_H
+
+#include <linux/mm.h>
+#include <linux/log2.h>
+
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+{
+	pmd_set(pmd, pte);
+}
+
+static inline void
+pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep)
+{
+	pmd_set(pmd, (pte_t *) ptep);
+}
+
+static inline int __get_order_pgd(void)
+{
+	return get_order(PTRS_PER_PGD * 4);
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	int num, num2;
+	pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd());
+
+	if (ret) {
+		num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
+		memzero(ret, num * sizeof(pgd_t));
+
+		num2 = VMALLOC_SIZE / PGDIR_SIZE;
+		memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
+
+		memzero(ret + num + num2,
+			       (PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
+
+	}
+	return ret;
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+	free_pages((unsigned long)pgd, __get_order_pgd());
+}
+
+
+/*
+ * With software-only page-tables, addr-split for traversal is tweakable and
+ * that directly governs how big tables would be at each level.
+ * Further, the MMU page size is configurable.
+ * Thus we need to programatically assert the size constraint
+ * All of this is const math, allowing gcc to do constant folding/propagation.
+ */
+
+static inline int __get_order_pte(void)
+{
+	return get_order(PTRS_PER_PTE * 4);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+					unsigned long address)
+{
+	pte_t *pte;
+
+	pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
+					 __get_order_pte());
+
+	return pte;
+}
+
+static inline pgtable_t
+pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+	pgtable_t pte_pg;
+
+	pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
+	if (pte_pg) {
+		memzero((void *)pte_pg, PTRS_PER_PTE * 4);
+		pgtable_page_ctor(virt_to_page(pte_pg));
+	}
+
+	return pte_pg;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+	free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
+{
+	pgtable_page_dtor(virt_to_page(ptep));
+	free_pages(ptep, __get_order_pte());
+}
+
+#define __pte_free_tlb(tlb, pte, addr)  pte_free((tlb)->mm, pte)
+
+#define check_pgt_cache()   do { } while (0)
+#define pmd_pgtable(pmd) pmd_page_vaddr(pmd)
+
+#endif /* _ASM_ARC_PGALLOC_H */

+ 405 - 0
arch/arc/include/asm/pgtable.h

@@ -0,0 +1,405 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ *  -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
+ *     They are semantically the same although in different contexts
+ *     VALID marks a TLB entry exists and it will only happen if PRESENT
+ *  - Utilise some unused free bits to confine PTE flags to 12 bits
+ *     This is a must for 4k pg-sz
+ *
+ * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
+ *  -TLB Locking never really existed, except for initial specs
+ *  -SILENT_xxx not needed for our port
+ *  -Per my request, MMU V3 changes the layout of some of the bits
+ *     to avoid a few shifts in TLB Miss handlers.
+ *
+ * vineetg: April 2010
+ *  -PGD entry no longer contains any flags. If empty it is 0, otherwise has
+ *   Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
+ *
+ * vineetg: April 2010
+ *  -Switched form 8:11:13 split for page table lookup to 11:8:13
+ *  -this speeds up page table allocation itself as we now have to memset 1K
+ *    instead of 8k per page table.
+ * -TODO: Right now page table alloc is 8K and rest 7K is unused
+ *    need to optimise it
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_PGTABLE_H
+#define _ASM_ARC_PGTABLE_H
+
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+/**************************************************************************
+ * Page Table Flags
+ *
+ * ARC700 MMU only deals with softare managed TLB entries.
+ * Page Tables are purely for Linux VM's consumption and the bits below are
+ * suited to that (uniqueness). Hence some are not implemented in the TLB and
+ * some have different value in TLB.
+ * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
+ *      seperate PD0 and PD1, which combined forms a translation entry)
+ *      while for PTE perspective, they are 8 and 9 respectively
+ * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
+ *      (saves some bit shift ops in TLB Miss hdlrs)
+ */
+
+#if (CONFIG_ARC_MMU_VER <= 2)
+
+#define _PAGE_ACCESSED      (1<<1)	/* Page is accessed (S) */
+#define _PAGE_CACHEABLE     (1<<2)	/* Page is cached (H) */
+#define _PAGE_EXECUTE       (1<<3)	/* Page has user execute perm (H) */
+#define _PAGE_WRITE         (1<<4)	/* Page has user write perm (H) */
+#define _PAGE_READ          (1<<5)	/* Page has user read perm (H) */
+#define _PAGE_K_EXECUTE     (1<<6)	/* Page has kernel execute perm (H) */
+#define _PAGE_K_WRITE       (1<<7)	/* Page has kernel write perm (H) */
+#define _PAGE_K_READ        (1<<8)	/* Page has kernel perm (H) */
+#define _PAGE_GLOBAL        (1<<9)	/* Page is global (H) */
+#define _PAGE_MODIFIED      (1<<10)	/* Page modified (dirty) (S) */
+#define _PAGE_FILE          (1<<10)	/* page cache/ swap (S) */
+#define _PAGE_PRESENT       (1<<11)	/* TLB entry is valid (H) */
+
+#else
+
+/* PD1 */
+#define _PAGE_CACHEABLE     (1<<0)	/* Page is cached (H) */
+#define _PAGE_EXECUTE       (1<<1)	/* Page has user execute perm (H) */
+#define _PAGE_WRITE         (1<<2)	/* Page has user write perm (H) */
+#define _PAGE_READ          (1<<3)	/* Page has user read perm (H) */
+#define _PAGE_K_EXECUTE     (1<<4)	/* Page has kernel execute perm (H) */
+#define _PAGE_K_WRITE       (1<<5)	/* Page has kernel write perm (H) */
+#define _PAGE_K_READ        (1<<6)	/* Page has kernel perm (H) */
+#define _PAGE_ACCESSED      (1<<7)	/* Page is accessed (S) */
+
+/* PD0 */
+#define _PAGE_GLOBAL        (1<<8)	/* Page is global (H) */
+#define _PAGE_PRESENT       (1<<9)	/* TLB entry is valid (H) */
+#define _PAGE_SHARED_CODE   (1<<10)	/* Shared Code page with cmn vaddr
+					   usable for shared TLB entries (H) */
+
+#define _PAGE_MODIFIED      (1<<11)	/* Page modified (dirty) (S) */
+#define _PAGE_FILE          (1<<12)	/* page cache/ swap (S) */
+
+#define _PAGE_SHARED_CODE_H (1<<31)	/* Hardware counterpart of above */
+#endif
+
+/* Kernel allowed all permissions for all pages */
+#define _K_PAGE_PERMS  (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
+
+#ifdef CONFIG_ARC_CACHE_PAGES
+#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
+#else
+#define _PAGE_DEF_CACHEABLE (0)
+#endif
+
+/* Helper for every "user" page
+ * -kernel can R/W/X
+ * -by default cached, unless config otherwise
+ * -present in memory
+ */
+#define ___DEF (_PAGE_PRESENT | _K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
+
+/* Set of bits not changed in pte_modify */
+#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
+
+/* More Abbrevaited helpers */
+#define PAGE_U_NONE     __pgprot(___DEF)
+#define PAGE_U_R        __pgprot(___DEF | _PAGE_READ)
+#define PAGE_U_W_R      __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
+#define PAGE_U_X_R      __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
+#define PAGE_U_X_W_R    __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
+						       _PAGE_EXECUTE)
+
+#define PAGE_SHARED	PAGE_U_W_R
+
+/* While kernel runs out of unstrslated space, vmalloc/modules use a chunk of
+ * kernel vaddr space - visible in all addr spaces, but kernel mode only
+ * Thus Global, all-kernel-access, no-user-access, cached
+ */
+#define PAGE_KERNEL          __pgprot(___DEF | _PAGE_GLOBAL)
+
+/* ioremap */
+#define PAGE_KERNEL_NO_CACHE __pgprot(_PAGE_PRESENT | _K_PAGE_PERMS | \
+						     _PAGE_GLOBAL)
+
+/**************************************************************************
+ * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
+ *
+ * Certain cases have 1:1 mapping
+ *  e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
+ *       which directly corresponds to  PAGE_U_X_R
+ *
+ * Other rules which cause the divergence from 1:1 mapping
+ *
+ *  1. Although ARC700 can do exclusive execute/write protection (meaning R
+ *     can be tracked independet of X/W unlike some other CPUs), still to
+ *     keep things consistent with other archs:
+ *      -Write implies Read:   W => R
+ *      -Execute implies Read: X => R
+ *
+ *  2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
+ *     This is to enable COW mechanism
+ */
+	/* xwr */
+#define __P000  PAGE_U_NONE
+#define __P001  PAGE_U_R
+#define __P010  PAGE_U_R	/* Pvt-W => !W */
+#define __P011  PAGE_U_R	/* Pvt-W => !W */
+#define __P100  PAGE_U_X_R	/* X => R */
+#define __P101  PAGE_U_X_R
+#define __P110  PAGE_U_X_R	/* Pvt-W => !W and X => R */
+#define __P111  PAGE_U_X_R	/* Pvt-W => !W */
+
+#define __S000  PAGE_U_NONE
+#define __S001  PAGE_U_R
+#define __S010  PAGE_U_W_R	/* W => R */
+#define __S011  PAGE_U_W_R
+#define __S100  PAGE_U_X_R	/* X => R */
+#define __S101  PAGE_U_X_R
+#define __S110  PAGE_U_X_W_R	/* X => R */
+#define __S111  PAGE_U_X_W_R
+
+/****************************************************************
+ * Page Table Lookup split
+ *
+ * We implement 2 tier paging and since this is all software, we are free
+ * to customize the span of a PGD / PTE entry to suit us
+ *
+ *			32 bit virtual address
+ * -------------------------------------------------------
+ * | BITS_FOR_PGD    |  BITS_FOR_PTE    |  BITS_IN_PAGE  |
+ * -------------------------------------------------------
+ *       |                  |                |
+ *       |                  |                --> off in page frame
+ *       |		    |
+ *       |                  ---> index into Page Table
+ *       |
+ *       ----> index into Page Directory
+ */
+
+#define BITS_IN_PAGE	PAGE_SHIFT
+
+/* Optimal Sizing of Pg Tbl - based on MMU page size */
+#if defined(CONFIG_ARC_PAGE_SIZE_8K)
+#define BITS_FOR_PTE	8
+#elif defined(CONFIG_ARC_PAGE_SIZE_16K)
+#define BITS_FOR_PTE	8
+#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
+#define BITS_FOR_PTE	9
+#endif
+
+#define BITS_FOR_PGD	(32 - BITS_FOR_PTE - BITS_IN_PAGE)
+
+#define PGDIR_SHIFT	(BITS_FOR_PTE + BITS_IN_PAGE)
+#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)	/* vaddr span, not PDG sz */
+#define PGDIR_MASK	(~(PGDIR_SIZE-1))
+
+#ifdef __ASSEMBLY__
+#define	PTRS_PER_PTE	(1 << BITS_FOR_PTE)
+#define	PTRS_PER_PGD	(1 << BITS_FOR_PGD)
+#else
+#define	PTRS_PER_PTE	(1UL << BITS_FOR_PTE)
+#define	PTRS_PER_PGD	(1UL << BITS_FOR_PGD)
+#endif
+/*
+ * Number of entries a user land program use.
+ * TASK_SIZE is the maximum vaddr that can be used by a userland program.
+ */
+#define	USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
+
+/*
+ * No special requirements for lowest virtual address we permit any user space
+ * mapping to be mapped at.
+ */
+#define FIRST_USER_ADDRESS      0
+
+
+/****************************************************************
+ * Bucket load of VM Helpers
+ */
+
+#ifndef __ASSEMBLY__
+
+#define pte_ERROR(e) \
+	pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pgd_ERROR(e) \
+	pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/* the zero page used for uninitialized and anonymous pages */
+extern char empty_zero_page[PAGE_SIZE];
+#define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
+
+#define pte_unmap(pte)		do { } while (0)
+#define pte_unmap_nested(pte)		do { } while (0)
+
+#define set_pte(pteptr, pteval)	((*(pteptr)) = (pteval))
+#define set_pmd(pmdptr, pmdval)	(*(pmdptr) = pmdval)
+
+/* find the page descriptor of the Page Tbl ref by PMD entry */
+#define pmd_page(pmd)		virt_to_page(pmd_val(pmd) & PAGE_MASK)
+
+/* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
+#define pmd_page_vaddr(pmd)	(pmd_val(pmd) & PAGE_MASK)
+
+/* In a 2 level sys, setup the PGD entry with PTE value */
+static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
+{
+	pmd_val(*pmdp) = (unsigned long)ptep;
+}
+
+#define pte_none(x)			(!pte_val(x))
+#define pte_present(x)			(pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm, addr, ptep)	set_pte_at(mm, addr, ptep, __pte(0))
+
+#define pmd_none(x)			(!pmd_val(x))
+#define	pmd_bad(x)			((pmd_val(x) & ~PAGE_MASK))
+#define pmd_present(x)			(pmd_val(x))
+#define pmd_clear(xp)			do { pmd_val(*(xp)) = 0; } while (0)
+
+#define pte_page(x) (mem_map + \
+		(unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT)))
+
+#define mk_pte(page, pgprot)						\
+({									\
+	pte_t pte;							\
+	pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot);	\
+	pte;								\
+})
+
+/* TBD: Non linear mapping stuff */
+static inline int pte_file(pte_t pte)
+{
+	return pte_val(pte) & _PAGE_FILE;
+}
+
+#define PTE_FILE_MAX_BITS	30
+#define pgoff_to_pte(x)         __pte(x)
+#define pte_to_pgoff(x)		(pte_val(x) >> 2)
+#define pte_pfn(pte)		(pte_val(pte) >> PAGE_SHIFT)
+#define pfn_pte(pfn, prot)	(__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define __pte_index(addr)	(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+/*
+ * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
+ * and returns ptr to PTE entry corresponding to @addr
+ */
+#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
+					 __pte_index(addr))
+
+/* No mapping of Page Tables in high mem etc, so following same as above */
+#define pte_offset_kernel(dir, addr)		pte_offset(dir, addr)
+#define pte_offset_map(dir, addr)		pte_offset(dir, addr)
+
+/* Zoo of pte_xxx function */
+#define pte_read(pte)		(pte_val(pte) & _PAGE_READ)
+#define pte_write(pte)		(pte_val(pte) & _PAGE_WRITE)
+#define pte_dirty(pte)		(pte_val(pte) & _PAGE_MODIFIED)
+#define pte_young(pte)		(pte_val(pte) & _PAGE_ACCESSED)
+#define pte_special(pte)	(0)
+
+#define PTE_BIT_FUNC(fn, op) \
+	static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+
+PTE_BIT_FUNC(wrprotect,	&= ~(_PAGE_WRITE));
+PTE_BIT_FUNC(mkwrite,	|= (_PAGE_WRITE));
+PTE_BIT_FUNC(mkclean,	&= ~(_PAGE_MODIFIED));
+PTE_BIT_FUNC(mkdirty,	|= (_PAGE_MODIFIED));
+PTE_BIT_FUNC(mkold,	&= ~(_PAGE_ACCESSED));
+PTE_BIT_FUNC(mkyoung,	|= (_PAGE_ACCESSED));
+PTE_BIT_FUNC(exprotect,	&= ~(_PAGE_EXECUTE));
+PTE_BIT_FUNC(mkexec,	|= (_PAGE_EXECUTE));
+
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+/* Macro to mark a page protection as uncacheable */
+#define pgprot_noncached(prot)	(__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+			      pte_t *ptep, pte_t pteval)
+{
+	set_pte(ptep, pteval);
+}
+
+/*
+ * All kernel related VM pages are in init's mm.
+ */
+#define pgd_offset_k(address)	pgd_offset(&init_mm, address)
+#define pgd_index(addr)		((addr) >> PGDIR_SHIFT)
+#define pgd_offset(mm, addr)	(((mm)->pgd)+pgd_index(addr))
+
+/*
+ * Macro to quickly access the PGD entry, utlising the fact that some
+ * arch may cache the pointer to Page Directory of "current" task
+ * in a MMU register
+ *
+ * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
+ * becomes read a register
+ *
+ * ********CAUTION*******:
+ * Kernel code might be dealing with some mm_struct of NON "current"
+ * Thus use this macro only when you are certain that "current" is current
+ * e.g. when dealing with signal frame setup code etc
+ */
+#ifndef CONFIG_SMP
+#define pgd_offset_fast(mm, addr)	\
+({					\
+	pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0);  \
+	pgd_base + pgd_index(addr);	\
+})
+#else
+#define pgd_offset_fast(mm, addr)	pgd_offset(mm, addr)
+#endif
+
+extern void paging_init(void);
+extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+		      pte_t *ptep);
+
+/* Encode swap {type,off} tuple into PTE
+ * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
+ * both PAGE_FILE and PAGE_PRESENT are zero in a PTE holding swap "identifier"
+ */
+#define __swp_entry(type, off)	((swp_entry_t) { \
+					((type) & 0x1f) | ((off) << 13) })
+
+/* Decode a PTE containing swap "identifier "into constituents */
+#define __swp_type(pte_lookalike)	(((pte_lookalike).val) & 0x1f)
+#define __swp_offset(pte_lookalike)	((pte_lookalike).val << 13)
+
+/* NOPs, to keep generic kernel happy */
+#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x)	((pte_t) { (x).val })
+
+#define kern_addr_valid(addr)	(1)
+
+/*
+ * remap a physical page `pfn' of size `size' with page protection `prot'
+ * into virtual address `from'
+ */
+#define io_remap_pfn_range(vma, from, pfn, size, prot) \
+			remap_pfn_range(vma, from, pfn, size, prot)
+
+#include <asm-generic/pgtable.h>
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init()   do { } while (0)
+
+#endif /* __ASSEMBLY__ */
+
+#endif

+ 151 - 0
arch/arc/include/asm/processor.h

@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: March 2009
+ *  -Implemented task_pt_regs( )
+ *
+ * Amit Bhor, Sameer Dhavale, Ashwin Chaugule: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_PROCESSOR_H
+#define __ASM_ARC_PROCESSOR_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <asm/arcregs.h>	/* for STATUS_E1_MASK et all */
+
+/* Arch specific stuff which needs to be saved per task.
+ * However these items are not so important so as to earn a place in
+ * struct thread_info
+ */
+struct thread_struct {
+	unsigned long ksp;	/* kernel mode stack pointer */
+	unsigned long callee_reg;	/* pointer to callee regs */
+	unsigned long fault_address;	/* dbls as brkpt holder as well */
+	unsigned long cause_code;	/* Exception Cause Code (ECR) */
+#ifdef CONFIG_ARC_CURR_IN_REG
+	unsigned long user_r25;
+#endif
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+	struct arc_fpu fpu;
+#endif
+};
+
+#define INIT_THREAD  {                          \
+	.ksp = sizeof(init_stack) + (unsigned long) init_stack, \
+}
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+unsigned long thread_saved_pc(struct task_struct *t);
+
+#define task_pt_regs(p) \
+	((struct pt_regs *)(THREAD_SIZE - 4 + (void *)task_stack_page(p)) - 1)
+
+/* Free all resources held by a thread. */
+#define release_thread(thread) do { } while (0)
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk)    do { } while (0)
+
+/*
+ * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
+ * get optimised away by gcc
+ */
+#ifdef CONFIG_SMP
+#define cpu_relax()	__asm__ __volatile__ ("" : : : "memory")
+#else
+#define cpu_relax()	do { } while (0)
+#endif
+
+#define copy_segments(tsk, mm)      do { } while (0)
+#define release_segments(mm)        do { } while (0)
+
+#define KSTK_EIP(tsk)   (task_pt_regs(tsk)->ret)
+
+/*
+ * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * These can't be derived from pt_regs as that would give correp user-mode val
+ */
+#define KSTK_ESP(tsk)   (tsk->thread.ksp)
+#define KSTK_BLINK(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1+1)*4)))
+#define KSTK_FP(tsk)    (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1)*4)))
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ *
+ * E1,E2 so that Interrupts are enabled in user mode
+ * L set, so Loop inhibited to begin with
+ * lp_start and lp_end seeded with bogus non-zero values so to easily catch
+ * the ARC700 sr to lp_start hardware bug
+ */
+#define start_thread(_regs, _pc, _usp)				\
+do {								\
+	set_fs(USER_DS); /* reads from user space */		\
+	(_regs)->ret = (_pc);					\
+	/* Interrupts enabled in User Mode */			\
+	(_regs)->status32 = STATUS_U_MASK | STATUS_L_MASK	\
+		| STATUS_E1_MASK | STATUS_E2_MASK;		\
+	(_regs)->sp = (_usp);					\
+	/* bogus seed values for debugging */			\
+	(_regs)->lp_start = 0x10;				\
+	(_regs)->lp_end = 0x80;					\
+} while (0)
+
+extern unsigned int get_wchan(struct task_struct *p);
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ * Should the PC register be read instead ? This macro does not seem to
+ * be used in many places so this wont be all that bad.
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+#endif /* !__ASSEMBLY__ */
+
+/* Kernels Virtual memory area.
+ * Unlike other architectures(MIPS, sh, cris ) ARC 700 does not have a
+ * "kernel translated" region (like KSEG2 in MIPS). So we use a upper part
+ * of the translated bottom 2GB for kernel virtual memory and protect
+ * these pages from user accesses by disabling Ru, Eu and Wu.
+ */
+#define VMALLOC_SIZE	(0x10000000)	/* 256M */
+#define VMALLOC_START	(PAGE_OFFSET - VMALLOC_SIZE)
+#define VMALLOC_END	(PAGE_OFFSET)
+
+/* Most of the architectures seem to be keeping some kind of padding between
+ * userspace TASK_SIZE and PAGE_OFFSET. i.e TASK_SIZE != PAGE_OFFSET.
+ */
+#define USER_KERNEL_GUTTER    0x10000000
+
+/* User address space:
+ * On ARC700, CPU allows the entire lower half of 32 bit address space to be
+ * translated. Thus potentially 2G (0:0x7FFF_FFFF) could be User vaddr space.
+ * However we steal 256M for kernel addr (0x7000_0000:0x7FFF_FFFF) and another
+ * 256M (0x6000_0000:0x6FFF_FFFF) is gutter between user/kernel spaces
+ * Thus total User vaddr space is (0:0x5FFF_FFFF)
+ */
+#define TASK_SIZE	(PAGE_OFFSET - VMALLOC_SIZE - USER_KERNEL_GUTTER)
+
+#define STACK_TOP       TASK_SIZE
+#define STACK_TOP_MAX   STACK_TOP
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE      (TASK_SIZE / 3)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_ARC_PROCESSOR_H */

+ 14 - 0
arch/arc/include/asm/prom.h

@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_PROM_H_
+#define _ASM_ARC_PROM_H_
+
+#define HAVE_ARCH_DEVTREE_FIXUPS
+
+#endif

+ 130 - 0
arch/arc/include/asm/ptrace.h

@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+#ifndef __ASM_ARC_PTRACE_H
+#define __ASM_ARC_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+
+#ifndef __ASSEMBLY__
+
+/* THE pt_regs: Defines how regs are saved during entry into kernel */
+
+struct pt_regs {
+	/*
+	 * 1 word gutter after reg-file has been saved
+	 * Technically not needed, Since SP always points to a "full" location
+	 * (vs. "empty"). But pt_regs is shared with tools....
+	 */
+	long res;
+
+	/* Real registers */
+	long bta;	/* bta_l1, bta_l2, erbta */
+	long lp_start;
+	long lp_end;
+	long lp_count;
+	long status32;	/* status32_l1, status32_l2, erstatus */
+	long ret;	/* ilink1, ilink2 or eret */
+	long blink;
+	long fp;
+	long r26;	/* gp */
+	long r12;
+	long r11;
+	long r10;
+	long r9;
+	long r8;
+	long r7;
+	long r6;
+	long r5;
+	long r4;
+	long r3;
+	long r2;
+	long r1;
+	long r0;
+	long sp;	/* user/kernel sp depending on where we came from  */
+	long orig_r0;
+
+	/*to distinguish bet excp, syscall, irq */
+	union {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+		/* so that assembly code is same for LE/BE */
+		unsigned long orig_r8:16, event:16;
+#else
+		unsigned long event:16, orig_r8:16;
+#endif
+		long orig_r8_word;
+	};
+};
+
+/* Callee saved registers - need to be saved only when you are scheduled out */
+
+struct callee_regs {
+	long res;	/* Again this is not needed */
+	long r25;
+	long r24;
+	long r23;
+	long r22;
+	long r21;
+	long r20;
+	long r19;
+	long r18;
+	long r17;
+	long r16;
+	long r15;
+	long r14;
+	long r13;
+};
+
+#define instruction_pointer(regs)	((regs)->ret)
+#define profile_pc(regs)		instruction_pointer(regs)
+
+/* return 1 if user mode or 0 if kernel mode */
+#define user_mode(regs) (regs->status32 & STATUS_U_MASK)
+
+#define user_stack_pointer(regs)\
+({  unsigned int sp;		\
+	if (user_mode(regs))	\
+		sp = (regs)->sp;\
+	else			\
+		sp = -1;	\
+	sp;			\
+})
+
+/* return 1 if PC in delay slot */
+#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK)
+
+#define in_syscall(regs)    (regs->event & orig_r8_IS_SCALL)
+#define in_brkpt_trap(regs) (regs->event & orig_r8_IS_BRKPT)
+
+#define syscall_wont_restart(regs) (regs->event |= orig_r8_IS_SCALL_RESTARTED)
+#define syscall_restartable(regs) !(regs->event &  orig_r8_IS_SCALL_RESTARTED)
+
+#define current_pt_regs()					\
+({								\
+	/* open-coded current_thread_info() */			\
+	register unsigned long sp asm ("sp");			\
+	unsigned long pg_start = (sp & ~(THREAD_SIZE - 1));	\
+	(struct pt_regs *)(pg_start + THREAD_SIZE - 4) - 1;	\
+})
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+	return regs->r0;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#define orig_r8_IS_SCALL		0x0001
+#define orig_r8_IS_SCALL_RESTARTED	0x0002
+#define orig_r8_IS_BRKPT		0x0004
+#define orig_r8_IS_EXCPN		0x0004
+#define orig_r8_IS_IRQ1			0x0010
+#define orig_r8_IS_IRQ2			0x0020
+
+#endif /* __ASM_PTRACE_H */

+ 18 - 0
arch/arc/include/asm/sections.h

@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SECTIONS_H
+#define _ASM_ARC_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char _int_vec_base_lds[];
+extern char __arc_dccm_base[];
+extern char __dtb_start[];
+
+#endif

Some files were not shown because too many files changed in this diff