Эх сурвалжийг харах

Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm

* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm: (91 commits)
  ARM: 6806/1: irq: introduce entry and exit functions for chained handlers
  ARM: 6781/1: Thumb-2: Work around buggy Thumb-2 short branch relocations in gas
  ARM: 6747/1: P2V: Thumb2 support
  ARM: 6798/1: aout-core: zero thread debug registers in a.out core dump
  ARM: 6796/1: Footbridge: Fix I/O mappings for NOMMU mode
  ARM: 6784/1: errata: no automatic Store Buffer drain on Cortex-A9
  ARM: 6772/1: errata: possible fault MMU translations following an ASID switch
  ARM: 6776/1: mach-ux500: activate fix for errata 753970
  ARM: 6794/1: SPEAr: Append UL to device address macros.
  ARM: 6793/1: SPEAr: Remove unused *_SIZE macros from spear*.h files
  ARM: 6792/1: SPEAr: Replace SIZE macro's with SZ_4K macros
  ARM: 6791/1: SPEAr3xx: Declare device structures after shirq code
  ARM: 6790/1: SPEAr: Clock Framework: Rename usbd clock and align apb_clk entry
  ARM: 6789/1: SPEAr3xx: Rename sdio to sdhci
  ARM: 6788/1: SPEAr: Include mach/hardware.h instead of mach/spear.h
  ARM: 6787/1: SPEAr: Reorder #includes in .h & .c files.
  ARM: 6681/1: SPEAr: add debugfs support to clk API
  ARM: 6703/1: SPEAr: update clk API support
  ARM: 6679/1: SPEAr: make clk API functions more generic
  ARM: 6737/1: SPEAr: formalized timer support
  ...
Linus Torvalds 14 жил өмнө
parent
commit
16d8775700
100 өөрчлөгдсөн 2566 нэмэгдсэн , 1686 устгасан
  1. 8 0
      Documentation/arm/SH-Mobile/Makefile
  2. 169 0
      Documentation/arm/SH-Mobile/vrl4.c
  3. 29 0
      Documentation/arm/SH-Mobile/zboot-rom-mmcif.txt
  4. 119 15
      arch/arm/Kconfig
  5. 8 1
      arch/arm/Makefile
  6. 4 0
      arch/arm/boot/Makefile
  7. 19 2
      arch/arm/boot/compressed/Makefile
  8. 30 0
      arch/arm/boot/compressed/head-shmobile.S
  9. 46 0
      arch/arm/boot/compressed/head-vt8500.S
  10. 116 135
      arch/arm/boot/compressed/head.S
  11. 1 1
      arch/arm/boot/compressed/misc.c
  12. 87 0
      arch/arm/boot/compressed/mmcif-sh7372.c
  13. 0 3
      arch/arm/boot/compressed/vmlinux.lds.in
  14. 59 13
      arch/arm/common/gic.c
  15. 1 5
      arch/arm/include/asm/a.out-core.h
  16. 22 38
      arch/arm/include/asm/bitops.h
  17. 4 132
      arch/arm/include/asm/cacheflush.h
  18. 0 69
      arch/arm/include/asm/cpu-multi32.h
  19. 0 44
      arch/arm/include/asm/cpu-single.h
  20. 2 1
      arch/arm/include/asm/cputype.h
  21. 94 0
      arch/arm/include/asm/fncpy.h
  22. 146 0
      arch/arm/include/asm/glue-cache.h
  23. 110 0
      arch/arm/include/asm/glue-df.h
  24. 57 0
      arch/arm/include/asm/glue-pf.h
  25. 264 0
      arch/arm/include/asm/glue-proc.h
  26. 0 138
      arch/arm/include/asm/glue.h
  27. 1 0
      arch/arm/include/asm/hardware/gic.h
  28. 27 2
      arch/arm/include/asm/highmem.h
  29. 31 0
      arch/arm/include/asm/mach/irq.h
  30. 61 14
      arch/arm/include/asm/memory.h
  31. 25 2
      arch/arm/include/asm/module.h
  32. 1 0
      arch/arm/include/asm/outercache.h
  33. 71 235
      arch/arm/include/asm/proc-fns.h
  34. 1 13
      arch/arm/include/asm/processor.h
  35. 0 2
      arch/arm/include/asm/ptrace.h
  36. 7 0
      arch/arm/include/asm/smp_scu.h
  37. 41 12
      arch/arm/include/asm/spinlock.h
  38. 9 8
      arch/arm/include/asm/system.h
  39. 5 6
      arch/arm/include/asm/tls.h
  40. 1 0
      arch/arm/include/asm/traps.h
  41. 1 1
      arch/arm/include/asm/user.h
  42. 1 0
      arch/arm/kernel/Makefile
  43. 10 12
      arch/arm/kernel/armksyms.c
  44. 11 0
      arch/arm/kernel/asm-offsets.c
  45. 5 0
      arch/arm/kernel/bios32.c
  46. 1 1
      arch/arm/kernel/debug.S
  47. 2 1
      arch/arm/kernel/entry-armv.S
  48. 7 7
      arch/arm/kernel/entry-header.S
  49. 0 90
      arch/arm/kernel/head-common.S
  50. 0 3
      arch/arm/kernel/head-nommu.S
  51. 160 31
      arch/arm/kernel/head.S
  52. 31 19
      arch/arm/kernel/irq.c
  53. 21 14
      arch/arm/kernel/module.c
  54. 2 2
      arch/arm/kernel/perf_event_v6.c
  55. 1 382
      arch/arm/kernel/ptrace.c
  56. 0 37
      arch/arm/kernel/ptrace.h
  57. 1 0
      arch/arm/kernel/return_address.c
  58. 55 13
      arch/arm/kernel/setup.c
  59. 0 9
      arch/arm/kernel/signal.c
  60. 134 0
      arch/arm/kernel/sleep.S
  61. 23 0
      arch/arm/kernel/smp_scu.c
  62. 1 1
      arch/arm/kernel/tcm.c
  63. 2 2
      arch/arm/kernel/traps.c
  64. 4 0
      arch/arm/kernel/vmlinux.lds.S
  65. 30 20
      arch/arm/lib/bitops.h
  66. 2 8
      arch/arm/lib/changebit.S
  67. 2 9
      arch/arm/lib/clearbit.S
  68. 2 9
      arch/arm/lib/setbit.S
  69. 3 6
      arch/arm/lib/testchangebit.S
  70. 3 6
      arch/arm/lib/testclearbit.S
  71. 3 6
      arch/arm/lib/testsetbit.S
  72. 1 1
      arch/arm/mach-aaec2000/include/mach/memory.h
  73. 1 0
      arch/arm/mach-at91/board-snapper9260.c
  74. 2 9
      arch/arm/mach-at91/include/mach/gpio.h
  75. 1 1
      arch/arm/mach-at91/include/mach/memory.h
  76. 1 1
      arch/arm/mach-bcmring/include/mach/hardware.h
  77. 1 1
      arch/arm/mach-bcmring/include/mach/memory.h
  78. 1 1
      arch/arm/mach-clps711x/include/mach/memory.h
  79. 1 1
      arch/arm/mach-cns3xxx/include/mach/memory.h
  80. 2 2
      arch/arm/mach-davinci/include/mach/memory.h
  81. 1 1
      arch/arm/mach-dove/Kconfig
  82. 1 1
      arch/arm/mach-dove/include/mach/memory.h
  83. 1 1
      arch/arm/mach-ebsa110/include/mach/memory.h
  84. 116 0
      arch/arm/mach-ep93xx/edb93xx.c
  85. 29 4
      arch/arm/mach-ep93xx/gpio.c
  86. 0 2
      arch/arm/mach-ep93xx/include/mach/gpio.h
  87. 5 5
      arch/arm/mach-ep93xx/include/mach/memory.h
  88. 66 18
      arch/arm/mach-footbridge/dc21285-timer.c
  89. 14 7
      arch/arm/mach-footbridge/include/mach/hardware.h
  90. 8 2
      arch/arm/mach-footbridge/include/mach/io.h
  91. 1 1
      arch/arm/mach-footbridge/include/mach/memory.h
  92. 85 44
      arch/arm/mach-footbridge/isa-timer.c
  93. 1 0
      arch/arm/mach-gemini/board-nas4220b.c
  94. 1 0
      arch/arm/mach-gemini/board-rut1xx.c
  95. 1 0
      arch/arm/mach-gemini/board-wbd111.c
  96. 1 0
      arch/arm/mach-gemini/board-wbd222.c
  97. 1 0
      arch/arm/mach-gemini/common.h
  98. 26 0
      arch/arm/mach-gemini/devices.c
  99. 2 2
      arch/arm/mach-gemini/include/mach/memory.h
  100. 1 1
      arch/arm/mach-h720x/include/mach/memory.h

+ 8 - 0
Documentation/arm/SH-Mobile/Makefile

@@ -0,0 +1,8 @@
+BIN := vrl4
+
+.PHONY: all
+all: $(BIN)
+
+.PHONY: clean
+clean:
+	rm -f *.o $(BIN)

+ 169 - 0
Documentation/arm/SH-Mobile/vrl4.c

@@ -0,0 +1,169 @@
+/*
+ * vrl4 format generator
+ *
+ * Copyright (C) 2010 Simon Horman
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+/*
+ * usage: vrl4 < zImage > out
+ *	  dd if=out of=/dev/sdx bs=512 seek=1 # Write the image to sector 1
+ *
+ * Reads a zImage from stdin and writes a vrl4 image to stdout.
+ * In practice this means writing a padded vrl4 header to stdout followed
+ * by the zImage.
+ *
+ * The padding places the zImage at ALIGN bytes into the output.
+ * The vrl4 uses ALIGN + START_BASE as the start_address.
+ * This is where the mask ROM will jump to after verifying the header.
+ *
+ * The header sets copy_size to min(sizeof(zImage), MAX_BOOT_PROG_LEN) + ALIGN.
+ * That is, the mask ROM will load the padded header (ALIGN bytes)
+ * And then MAX_BOOT_PROG_LEN bytes of the image, or the entire image,
+ * whichever is smaller.
+ *
+ * The zImage is not modified in any way.
+ */
+
+#define _BSD_SOURCE
+#include <endian.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <errno.h>
+
+struct hdr {
+	uint32_t magic1;
+	uint32_t reserved1;
+	uint32_t magic2;
+	uint32_t reserved2;
+	uint16_t copy_size;
+	uint16_t boot_options;
+	uint32_t reserved3;
+	uint32_t start_address;
+	uint32_t reserved4;
+	uint32_t reserved5;
+	char     reserved6[308];
+};
+
+#define DECLARE_HDR(h)					\
+	struct hdr (h) = {				\
+		.magic1 =	htole32(0xea000000),	\
+		.reserved1 =	htole32(0x56),		\
+		.magic2 =	htole32(0xe59ff008),	\
+		.reserved3 =	htole16(0x1) }
+
+/* Align to 512 bytes, the MMCIF sector size */
+#define ALIGN_BITS	9
+#define ALIGN		(1 << ALIGN_BITS)
+
+#define START_BASE	0xe55b0000
+
+/*
+ * With an alignment of 512 the header uses the first sector.
+ * There is a 128 sector (64kbyte) limit on the data loaded by the mask ROM.
+ * So there are 127 sectors left for the boot programme. But in practice
+ * Only a small portion of a zImage is needed, 16 sectors should be more
+ * than enough.
+ *
+ * Note that this sets how much of the zImage is copied by the mask ROM.
+ * The entire zImage is present after the header and is loaded
+ * by the code in the boot program (which is the first portion of the zImage).
+ */
+#define	MAX_BOOT_PROG_LEN (16 * 512)
+
+#define ROUND_UP(x)	((x + ALIGN - 1) & ~(ALIGN - 1))
+
+ssize_t do_read(int fd, void *buf, size_t count)
+{
+	size_t offset = 0;
+	ssize_t l;
+
+	while (offset < count) {
+		l = read(fd, buf + offset, count - offset);
+		if (!l)
+			break;
+		if (l < 0) {
+			if (errno == EAGAIN || errno == EWOULDBLOCK)
+				continue;
+			perror("read");
+			return -1;
+		}
+		offset += l;
+	}
+
+	return offset;
+}
+
+ssize_t do_write(int fd, const void *buf, size_t count)
+{
+	size_t offset = 0;
+	ssize_t l;
+
+	while (offset < count) {
+		l = write(fd, buf + offset, count - offset);
+		if (l < 0) {
+			if (errno == EAGAIN || errno == EWOULDBLOCK)
+				continue;
+			perror("write");
+			return -1;
+		}
+		offset += l;
+	}
+
+	return offset;
+}
+
+ssize_t write_zero(int fd, size_t len)
+{
+	size_t i = len;
+
+	while (i--) {
+		const char x = 0;
+		if (do_write(fd, &x, 1) < 0)
+			return -1;
+	}
+
+	return len;
+}
+
+int main(void)
+{
+	DECLARE_HDR(hdr);
+	char boot_program[MAX_BOOT_PROG_LEN];
+	size_t aligned_hdr_len, alligned_prog_len;
+	ssize_t prog_len;
+
+	prog_len = do_read(0, boot_program, sizeof(boot_program));
+	if (prog_len <= 0)
+		return -1;
+
+	aligned_hdr_len = ROUND_UP(sizeof(hdr));
+	hdr.start_address = htole32(START_BASE + aligned_hdr_len);
+	alligned_prog_len = ROUND_UP(prog_len);
+	hdr.copy_size = htole16(aligned_hdr_len + alligned_prog_len);
+
+	if (do_write(1, &hdr, sizeof(hdr)) < 0)
+		return -1;
+	if (write_zero(1, aligned_hdr_len - sizeof(hdr)) < 0)
+		return -1;
+
+	if (do_write(1, boot_program, prog_len) < 0)
+		return 1;
+
+	/* Write out the rest of the kernel */
+	while (1) {
+		prog_len = do_read(0, boot_program, sizeof(boot_program));
+		if (prog_len < 0)
+			return 1;
+		if (prog_len == 0)
+			break;
+		if (do_write(1, boot_program, prog_len) < 0)
+			return 1;
+	}
+
+	return 0;
+}

+ 29 - 0
Documentation/arm/SH-Mobile/zboot-rom-mmcif.txt

@@ -0,0 +1,29 @@
+ROM-able zImage boot from MMC
+-----------------------------
+
+An ROM-able zImage compiled with ZBOOT_ROM_MMCIF may be written to MMC and
+SuperH Mobile ARM will to boot directly from the MMCIF hardware block.
+
+This is achieved by the mask ROM loading the first portion of the image into
+MERAM and then jumping to it. This portion contains loader code which
+copies the entire image to SDRAM and jumps to it. From there the zImage
+boot code proceeds as normal, uncompressing the image into its final
+location and then jumping to it.
+
+This code has been tested on an AP4EB board using the developer 1A eMMC
+boot mode which is configured using the following jumper settings.
+The board used for testing required a patched mask ROM in order for
+this mode to function.
+
+   8 7 6 5 4 3 2 1
+   x|x|x|x|x| |x|
+S4 -+-+-+-+-+-+-+-
+    | | | | |x| |x on
+
+The zImage must be written to the MMC card at sector 1 (512 bytes) in
+vrl4 format. A utility vrl4 is supplied to accomplish this.
+
+e.g.
+	vrl4 < zImage | dd of=/dev/sdX bs=512 seek=1
+
+A dual-voltage MMC 4.0 card was used for testing.

+ 119 - 15
arch/arm/Kconfig

@@ -7,7 +7,7 @@ config ARM
 	select HAVE_MEMBLOCK
 	select HAVE_MEMBLOCK
 	select RTC_LIB
 	select RTC_LIB
 	select SYS_SUPPORTS_APM_EMULATION
 	select SYS_SUPPORTS_APM_EMULATION
-	select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI)
+	select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI)
 	select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
 	select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_KGDB
 	select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL)
 	select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL)
@@ -24,7 +24,7 @@ config ARM
 	select HAVE_PERF_EVENTS
 	select HAVE_PERF_EVENTS
 	select PERF_USE_VMALLOC
 	select PERF_USE_VMALLOC
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_REGS_AND_STACK_ACCESS_API
-	select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V7))
+	select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
 	select HAVE_C_RECORDMCOUNT
 	select HAVE_C_RECORDMCOUNT
 	select HAVE_GENERIC_HARDIRQS
 	select HAVE_GENERIC_HARDIRQS
 	select HAVE_SPARSE_IRQ
 	select HAVE_SPARSE_IRQ
@@ -63,6 +63,10 @@ config GENERIC_CLOCKEVENTS_BROADCAST
 	depends on GENERIC_CLOCKEVENTS
 	depends on GENERIC_CLOCKEVENTS
 	default y if SMP
 	default y if SMP
 
 
+config KTIME_SCALAR
+	bool
+	default y
+
 config HAVE_TCM
 config HAVE_TCM
 	bool
 	bool
 	select GENERIC_ALLOCATOR
 	select GENERIC_ALLOCATOR
@@ -178,11 +182,6 @@ config FIQ
 config ARCH_MTD_XIP
 config ARCH_MTD_XIP
 	bool
 	bool
 
 
-config ARM_L1_CACHE_SHIFT_6
-	bool
-	help
-	  Setting ARM L1 cache line size to 64 Bytes.
-
 config VECTORS_BASE
 config VECTORS_BASE
 	hex
 	hex
 	default 0xffff0000 if MMU || CPU_HIGH_VECTOR
 	default 0xffff0000 if MMU || CPU_HIGH_VECTOR
@@ -191,6 +190,22 @@ config VECTORS_BASE
 	help
 	help
 	  The base address of exception vectors.
 	  The base address of exception vectors.
 
 
+config ARM_PATCH_PHYS_VIRT
+	bool "Patch physical to virtual translations at runtime (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	depends on !XIP_KERNEL && MMU
+	depends on !ARCH_REALVIEW || !SPARSEMEM
+	help
+	  Patch phys-to-virt translation functions at runtime according to
+	  the position of the kernel in system memory.
+
+	  This can only be used with non-XIP with MMU kernels where
+	  the base of physical memory is at a 16MB boundary.
+
+config ARM_PATCH_PHYS_VIRT_16BIT
+	def_bool y
+	depends on ARM_PATCH_PHYS_VIRT && ARCH_MSM
+
 source "init/Kconfig"
 source "init/Kconfig"
 
 
 source "kernel/Kconfig.freezer"
 source "kernel/Kconfig.freezer"
@@ -346,7 +361,7 @@ config ARCH_FOOTBRIDGE
 	bool "FootBridge"
 	bool "FootBridge"
 	select CPU_SA110
 	select CPU_SA110
 	select FOOTBRIDGE
 	select FOOTBRIDGE
-	select ARCH_USES_GETTIMEOFFSET
+	select GENERIC_CLOCKEVENTS
 	help
 	help
 	  Support for systems based on the DC21285 companion chip
 	  Support for systems based on the DC21285 companion chip
 	  ("FootBridge"), such as the Simtec CATS and the Rebel NetWinder.
 	  ("FootBridge"), such as the Simtec CATS and the Rebel NetWinder.
@@ -457,6 +472,7 @@ config ARCH_IXP4XX
 
 
 config ARCH_DOVE
 config ARCH_DOVE
 	bool "Marvell Dove"
 	bool "Marvell Dove"
+	select CPU_V6K
 	select PCI
 	select PCI
 	select ARCH_REQUIRE_GPIOLIB
 	select ARCH_REQUIRE_GPIOLIB
 	select GENERIC_CLOCKEVENTS
 	select GENERIC_CLOCKEVENTS
@@ -875,6 +891,16 @@ config PLAT_SPEAR
 	help
 	help
 	  Support for ST's SPEAr platform (SPEAr3xx, SPEAr6xx and SPEAr13xx).
 	  Support for ST's SPEAr platform (SPEAr3xx, SPEAr6xx and SPEAr13xx).
 
 
+config ARCH_VT8500
+	bool "VIA/WonderMedia 85xx"
+	select CPU_ARM926T
+	select GENERIC_GPIO
+	select ARCH_HAS_CPUFREQ
+	select GENERIC_CLOCKEVENTS
+	select ARCH_REQUIRE_GPIOLIB
+	select HAVE_PWM
+	help
+	  Support for VIA/WonderMedia VT8500/WM85xx System-on-Chip.
 endchoice
 endchoice
 
 
 #
 #
@@ -1007,6 +1033,8 @@ source "arch/arm/mach-versatile/Kconfig"
 
 
 source "arch/arm/mach-vexpress/Kconfig"
 source "arch/arm/mach-vexpress/Kconfig"
 
 
+source "arch/arm/mach-vt8500/Kconfig"
+
 source "arch/arm/mach-w90x900/Kconfig"
 source "arch/arm/mach-w90x900/Kconfig"
 
 
 # Definitions to make life easier
 # Definitions to make life easier
@@ -1048,7 +1076,7 @@ config XSCALE_PMU
 	default y
 	default y
 
 
 config CPU_HAS_PMU
 config CPU_HAS_PMU
-	depends on (CPU_V6 || CPU_V7 || XSCALE_PMU) && \
+	depends on (CPU_V6 || CPU_V6K || CPU_V7 || XSCALE_PMU) && \
 		   (!ARCH_OMAP3 || OMAP3_EMU)
 		   (!ARCH_OMAP3 || OMAP3_EMU)
 	default y
 	default y
 	bool
 	bool
@@ -1064,7 +1092,7 @@ endif
 
 
 config ARM_ERRATA_411920
 config ARM_ERRATA_411920
 	bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
 	bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
-	depends on CPU_V6
+	depends on CPU_V6 || CPU_V6K
 	help
 	help
 	  Invalidation of the Instruction Cache operation can
 	  Invalidation of the Instruction Cache operation can
 	  fail. This erratum is present in 1136 (before r1p4), 1156 and 1176.
 	  fail. This erratum is present in 1136 (before r1p4), 1156 and 1176.
@@ -1140,7 +1168,7 @@ config ARM_ERRATA_742231
 
 
 config PL310_ERRATA_588369
 config PL310_ERRATA_588369
 	bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
 	bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
-	depends on CACHE_L2X0 && ARCH_OMAP4
+	depends on CACHE_L2X0
 	help
 	help
 	   The PL310 L2 cache controller implements three types of Clean &
 	   The PL310 L2 cache controller implements three types of Clean &
 	   Invalidate maintenance operations: by Physical Address
 	   Invalidate maintenance operations: by Physical Address
@@ -1149,8 +1177,7 @@ config PL310_ERRATA_588369
 	   clean operation followed immediately by an invalidate operation,
 	   clean operation followed immediately by an invalidate operation,
 	   both performing to the same memory location. This functionality
 	   both performing to the same memory location. This functionality
 	   is not correctly implemented in PL310 as clean lines are not
 	   is not correctly implemented in PL310 as clean lines are not
-	   invalidated as a result of these operations. Note that this errata
-	   uses Texas Instrument's secure monitor api.
+	   invalidated as a result of these operations.
 
 
 config ARM_ERRATA_720789
 config ARM_ERRATA_720789
 	bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
 	bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
@@ -1164,6 +1191,17 @@ config ARM_ERRATA_720789
 	  tables. The workaround changes the TLB flushing routines to invalidate
 	  tables. The workaround changes the TLB flushing routines to invalidate
 	  entries regardless of the ASID.
 	  entries regardless of the ASID.
 
 
+config PL310_ERRATA_727915
+	bool "Background Clean & Invalidate by Way operation can cause data corruption"
+	depends on CACHE_L2X0
+	help
+	  PL310 implements the Clean & Invalidate by Way L2 cache maintenance
+	  operation (offset 0x7FC). This operation runs in background so that
+	  PL310 can handle normal accesses while it is in progress. Under very
+	  rare circumstances, due to this erratum, write data can be lost when
+	  PL310 treats a cacheable write transaction during a Clean &
+	  Invalidate by Way operation.
+
 config ARM_ERRATA_743622
 config ARM_ERRATA_743622
 	bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption"
 	bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption"
 	depends on CPU_V7
 	depends on CPU_V7
@@ -1202,6 +1240,28 @@ config ARM_ERRATA_753970
 	  This has the same effect as the cache sync operation: store buffer
 	  This has the same effect as the cache sync operation: store buffer
 	  drain and waiting for all buffers empty.
 	  drain and waiting for all buffers empty.
 
 
+config ARM_ERRATA_754322
+	bool "ARM errata: possible faulty MMU translations following an ASID switch"
+	depends on CPU_V7
+	help
+	  This option enables the workaround for the 754322 Cortex-A9 (r2p*,
+	  r3p*) erratum. A speculative memory access may cause a page table walk
+	  which starts prior to an ASID switch but completes afterwards. This
+	  can populate the micro-TLB with a stale entry which may be hit with
+	  the new ASID. This workaround places two dsb instructions in the mm
+	  switching code so that no page table walks can cross the ASID switch.
+
+config ARM_ERRATA_754327
+	bool "ARM errata: no automatic Store Buffer drain"
+	depends on CPU_V7 && SMP
+	help
+	  This option enables the workaround for the 754327 Cortex-A9 (prior to
+	  r2p0) erratum. The Store Buffer does not have any automatic draining
+	  mechanism and therefore a livelock may occur if an external agent
+	  continuously polls a memory location waiting to observe an update.
+	  This workaround defines cpu_relax() as smp_mb(), preventing correctly
+	  written polling loops from denying visibility of updates to memory.
+
 endmenu
 endmenu
 
 
 source "arch/arm/common/Kconfig"
 source "arch/arm/common/Kconfig"
@@ -1275,6 +1335,7 @@ source "kernel/time/Kconfig"
 config SMP
 config SMP
 	bool "Symmetric Multi-Processing (EXPERIMENTAL)"
 	bool "Symmetric Multi-Processing (EXPERIMENTAL)"
 	depends on EXPERIMENTAL
 	depends on EXPERIMENTAL
+	depends on CPU_V6K || CPU_V7
 	depends on GENERIC_CLOCKEVENTS
 	depends on GENERIC_CLOCKEVENTS
 	depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \
 	depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \
 		 MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \
 		 MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \
@@ -1386,7 +1447,7 @@ config HZ
 
 
 config THUMB2_KERNEL
 config THUMB2_KERNEL
 	bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)"
 	bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)"
-	depends on CPU_V7 && !CPU_V6 && EXPERIMENTAL
+	depends on CPU_V7 && !CPU_V6 && !CPU_V6K && EXPERIMENTAL
 	select AEABI
 	select AEABI
 	select ARM_ASM_UNIFIED
 	select ARM_ASM_UNIFIED
 	help
 	help
@@ -1396,6 +1457,37 @@ config THUMB2_KERNEL
 
 
 	  If unsure, say N.
 	  If unsure, say N.
 
 
+config THUMB2_AVOID_R_ARM_THM_JUMP11
+	bool "Work around buggy Thumb-2 short branch relocations in gas"
+	depends on THUMB2_KERNEL && MODULES
+	default y
+	help
+	  Various binutils versions can resolve Thumb-2 branches to
+	  locally-defined, preemptible global symbols as short-range "b.n"
+	  branch instructions.
+
+	  This is a problem, because there's no guarantee the final
+	  destination of the symbol, or any candidate locations for a
+	  trampoline, are within range of the branch.  For this reason, the
+	  kernel does not support fixing up the R_ARM_THM_JUMP11 (102)
+	  relocation in modules at all, and it makes little sense to add
+	  support.
+
+	  The symptom is that the kernel fails with an "unsupported
+	  relocation" error when loading some modules.
+
+	  Until fixed tools are available, passing
+	  -fno-optimize-sibling-calls to gcc should prevent gcc generating
+	  code which hits this problem, at the cost of a bit of extra runtime
+	  stack usage in some cases.
+
+	  The problem is described in more detail at:
+	      https://bugs.launchpad.net/binutils-linaro/+bug/725126
+
+	  Only Thumb-2 kernels are affected.
+
+	  Unless you are sure your tools don't have this problem, say Y.
+
 config ARM_ASM_UNIFIED
 config ARM_ASM_UNIFIED
 	bool
 	bool
 
 
@@ -1644,6 +1736,18 @@ config ZBOOT_ROM
 	  Say Y here if you intend to execute your compressed kernel image
 	  Say Y here if you intend to execute your compressed kernel image
 	  (zImage) directly from ROM or flash.  If unsure, say N.
 	  (zImage) directly from ROM or flash.  If unsure, say N.
 
 
+config ZBOOT_ROM_MMCIF
+	bool "Include MMCIF loader in zImage (EXPERIMENTAL)"
+	depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL
+	help
+	  Say Y here to include experimental MMCIF loading code in the
+	  ROM-able zImage. With this enabled it is possible to write the
+	  the ROM-able zImage kernel image to an MMC card and boot the
+	  kernel straight from the reset vector. At reset the processor
+	  Mask ROM will load the first part of the the ROM-able zImage
+	  which in turn loads the rest the kernel image to RAM using the
+	  MMCIF hardware block.
+
 config CMDLINE
 config CMDLINE
 	string "Default kernel command string"
 	string "Default kernel command string"
 	default ""
 	default ""
@@ -1877,7 +1981,7 @@ config FPE_FASTFPE
 
 
 config VFP
 config VFP
 	bool "VFP-format floating point maths"
 	bool "VFP-format floating point maths"
-	depends on CPU_V6 || CPU_ARM926T || CPU_V7 || CPU_FEROCEON
+	depends on CPU_V6 || CPU_V6K || CPU_ARM926T || CPU_V7 || CPU_FEROCEON
 	help
 	help
 	  Say Y to include VFP support code in the kernel. This is needed
 	  Say Y to include VFP support code in the kernel. This is needed
 	  if your hardware includes a VFP unit.
 	  if your hardware includes a VFP unit.

+ 8 - 1
arch/arm/Makefile

@@ -89,6 +89,7 @@ tune-$(CONFIG_CPU_XSCALE)	:=$(call cc-option,-mtune=xscale,-mtune=strongarm110)
 tune-$(CONFIG_CPU_XSC3)		:=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale
 tune-$(CONFIG_CPU_XSC3)		:=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale
 tune-$(CONFIG_CPU_FEROCEON)	:=$(call cc-option,-mtune=marvell-f,-mtune=xscale)
 tune-$(CONFIG_CPU_FEROCEON)	:=$(call cc-option,-mtune=marvell-f,-mtune=xscale)
 tune-$(CONFIG_CPU_V6)		:=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
 tune-$(CONFIG_CPU_V6)		:=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
+tune-$(CONFIG_CPU_V6K)		:=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
 
 
 ifeq ($(CONFIG_AEABI),y)
 ifeq ($(CONFIG_AEABI),y)
 CFLAGS_ABI	:=-mabi=aapcs-linux -mno-thumb-interwork
 CFLAGS_ABI	:=-mabi=aapcs-linux -mno-thumb-interwork
@@ -105,6 +106,10 @@ AFLAGS_AUTOIT	:=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mau
 AFLAGS_NOWARN	:=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
 AFLAGS_NOWARN	:=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
 CFLAGS_THUMB2	:=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
 CFLAGS_THUMB2	:=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
 AFLAGS_THUMB2	:=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb
 AFLAGS_THUMB2	:=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb
+# Work around buggy relocation from gas if requested:
+ifeq ($(CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11),y)
+CFLAGS_MODULE	+=-fno-optimize-sibling-calls
+endif
 endif
 endif
 
 
 # Need -Uarm for gcc < 3.x
 # Need -Uarm for gcc < 3.x
@@ -190,6 +195,7 @@ machine-$(CONFIG_ARCH_U300)		:= u300
 machine-$(CONFIG_ARCH_U8500)		:= ux500
 machine-$(CONFIG_ARCH_U8500)		:= ux500
 machine-$(CONFIG_ARCH_VERSATILE)	:= versatile
 machine-$(CONFIG_ARCH_VERSATILE)	:= versatile
 machine-$(CONFIG_ARCH_VEXPRESS)		:= vexpress
 machine-$(CONFIG_ARCH_VEXPRESS)		:= vexpress
+machine-$(CONFIG_ARCH_VT8500)		:= vt8500
 machine-$(CONFIG_ARCH_W90X900)		:= w90x900
 machine-$(CONFIG_ARCH_W90X900)		:= w90x900
 machine-$(CONFIG_ARCH_NUC93X)		:= nuc93x
 machine-$(CONFIG_ARCH_NUC93X)		:= nuc93x
 machine-$(CONFIG_FOOTBRIDGE)		:= footbridge
 machine-$(CONFIG_FOOTBRIDGE)		:= footbridge
@@ -280,7 +286,7 @@ bzImage: zImage
 zImage Image xipImage bootpImage uImage: vmlinux
 zImage Image xipImage bootpImage uImage: vmlinux
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 
 
-zinstall install: vmlinux
+zinstall uinstall install: vmlinux
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
 
 
 # We use MRPROPER_FILES and CLEAN_FILES now
 # We use MRPROPER_FILES and CLEAN_FILES now
@@ -301,6 +307,7 @@ define archhelp
   echo  '                  (supply initrd image via make variable INITRD=<path>)'
   echo  '                  (supply initrd image via make variable INITRD=<path>)'
   echo  '  install       - Install uncompressed kernel'
   echo  '  install       - Install uncompressed kernel'
   echo  '  zinstall      - Install compressed kernel'
   echo  '  zinstall      - Install compressed kernel'
+  echo  '  uinstall      - Install U-Boot wrapped compressed kernel'
   echo  '                  Install using (your) ~/bin/$(INSTALLKERNEL) or'
   echo  '                  Install using (your) ~/bin/$(INSTALLKERNEL) or'
   echo  '                  (distribution) /sbin/$(INSTALLKERNEL) or'
   echo  '                  (distribution) /sbin/$(INSTALLKERNEL) or'
   echo  '                  install to $$(INSTALL_PATH) and run lilo'
   echo  '                  install to $$(INSTALL_PATH) and run lilo'

+ 4 - 0
arch/arm/boot/Makefile

@@ -99,6 +99,10 @@ zinstall: $(obj)/zImage
 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
 	$(obj)/zImage System.map "$(INSTALL_PATH)"
 	$(obj)/zImage System.map "$(INSTALL_PATH)"
 
 
+uinstall: $(obj)/uImage
+	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+	$(obj)/uImage System.map "$(INSTALL_PATH)"
+
 zi:
 zi:
 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
 	$(obj)/zImage System.map "$(INSTALL_PATH)"
 	$(obj)/zImage System.map "$(INSTALL_PATH)"

+ 19 - 2
arch/arm/boot/compressed/Makefile

@@ -4,9 +4,20 @@
 # create a compressed vmlinuz image from the original vmlinux
 # create a compressed vmlinuz image from the original vmlinux
 #
 #
 
 
+OBJS		=
+
+# Ensure that mmcif loader code appears early in the image
+# to minimise that number of bocks that have to be read in
+# order to load it.
+ifeq ($(CONFIG_ZBOOT_ROM_MMCIF),y)
+ifeq ($(CONFIG_ARCH_SH7372),y)
+OBJS		+= mmcif-sh7372.o
+endif
+endif
+
 AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET)
 AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET)
 HEAD	= head.o
 HEAD	= head.o
-OBJS	= misc.o decompress.o
+OBJS	+= misc.o decompress.o
 FONTC	= $(srctree)/drivers/video/console/font_acorn_8x8.c
 FONTC	= $(srctree)/drivers/video/console/font_acorn_8x8.c
 
 
 #
 #
@@ -29,6 +40,10 @@ ifeq ($(CONFIG_ARCH_SA1100),y)
 OBJS		+= head-sa1100.o
 OBJS		+= head-sa1100.o
 endif
 endif
 
 
+ifeq ($(CONFIG_ARCH_VT8500),y)
+OBJS		+= head-vt8500.o
+endif
+
 ifeq ($(CONFIG_CPU_XSCALE),y)
 ifeq ($(CONFIG_CPU_XSCALE),y)
 OBJS		+= head-xscale.o
 OBJS		+= head-xscale.o
 endif
 endif
@@ -83,9 +98,11 @@ endif
 EXTRA_CFLAGS  := -fpic -fno-builtin
 EXTRA_CFLAGS  := -fpic -fno-builtin
 EXTRA_AFLAGS  := -Wa,-march=all
 EXTRA_AFLAGS  := -Wa,-march=all
 
 
+# Provide size of uncompressed kernel to the decompressor via a linker symbol.
+LDFLAGS_vmlinux = --defsym _image_size=$(shell stat -c "%s" $(obj)/../Image)
 # Supply ZRELADDR to the decompressor via a linker symbol.
 # Supply ZRELADDR to the decompressor via a linker symbol.
 ifneq ($(CONFIG_AUTO_ZRELADDR),y)
 ifneq ($(CONFIG_AUTO_ZRELADDR),y)
-LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR)
+LDFLAGS_vmlinux += --defsym zreladdr=$(ZRELADDR)
 endif
 endif
 ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
 ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
 LDFLAGS_vmlinux += --be8
 LDFLAGS_vmlinux += --be8

+ 30 - 0
arch/arm/boot/compressed/head-shmobile.S

@@ -25,6 +25,36 @@
 	/* load board-specific initialization code */
 	/* load board-specific initialization code */
 #include <mach/zboot.h>
 #include <mach/zboot.h>
 
 
+#ifdef CONFIG_ZBOOT_ROM_MMCIF
+	/* Load image from MMC */
+	adr	sp, __tmp_stack + 128
+	ldr	r0, __image_start
+	ldr	r1, __image_end
+	subs	r1, r1, r0
+	ldr	r0, __load_base
+	bl	mmcif_loader
+
+	/* Jump to loaded code */
+	ldr	r0, __loaded
+	ldr	r1, __image_start
+	sub	r0, r0, r1
+	ldr	r1, __load_base
+	add	pc, r0, r1
+
+__image_start:
+	.long	_start
+__image_end:
+	.long	_got_end
+__load_base:
+	.long	CONFIG_MEMORY_START + 0x02000000 @ Load at 32Mb into SDRAM
+__loaded:
+	.long	__continue
+	.align
+__tmp_stack:
+	.space	128
+__continue:
+#endif /* CONFIG_ZBOOT_ROM_MMCIF */
+
 	b	1f
 	b	1f
 __atags:@ tag #1
 __atags:@ tag #1
 	.long	12			@ tag->hdr.size = tag_size(tag_core);
 	.long	12			@ tag->hdr.size = tag_size(tag_core);

+ 46 - 0
arch/arm/boot/compressed/head-vt8500.S

@@ -0,0 +1,46 @@
+/*
+ * linux/arch/arm/boot/compressed/head-vt8500.S
+ *
+ * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * VIA VT8500 specific tweaks. This is merged into head.S by the linker.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/mach-types.h>
+
+		.section        ".start", "ax"
+
+__VT8500_start:
+	@ Compare the SCC ID register against a list of known values
+	ldr	r1, .SCCID
+	ldr	r3, [r1]
+
+	@ VT8500 override
+	ldr	r4, .VT8500SCC
+	cmp	r3, r4
+	ldreq	r7, .ID_BV07
+	beq	.Lendvt8500
+
+	@ WM8505 override
+	ldr	r4, .WM8505SCC
+	cmp	r3, r4
+	ldreq	r7, .ID_8505
+	beq	.Lendvt8500
+
+	@ Otherwise, leave the bootloader's machine id untouched
+
+.SCCID:
+	.word	0xd8120000
+.VT8500SCC:
+	.word	0x34000102
+.WM8505SCC:
+	.word	0x34260103
+
+.ID_BV07:
+	.word	MACH_TYPE_BV07
+.ID_8505:
+	.word	MACH_TYPE_WM8505_7IN_NETBOOK
+
+.Lendvt8500:

+ 116 - 135
arch/arm/boot/compressed/head.S

@@ -21,7 +21,7 @@
 
 
 #if defined(CONFIG_DEBUG_ICEDCC)
 #if defined(CONFIG_DEBUG_ICEDCC)
 
 
-#ifdef CONFIG_CPU_V6
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
 		.macro	loadsp, rb, tmp
 		.macro	loadsp, rb, tmp
 		.endm
 		.endm
 		.macro	writeb, ch, rb
 		.macro	writeb, ch, rb
@@ -128,14 +128,14 @@ wait:		mrc	p14, 0, pc, c0, c1, 0
 		.arm				@ Always enter in ARM state
 		.arm				@ Always enter in ARM state
 start:
 start:
 		.type	start,#function
 		.type	start,#function
- THUMB(		adr	r12, BSYM(1f)	)
- THUMB(		bx	r12		)
- THUMB(		.rept	6		)
- ARM(		.rept	8		)
+		.rept	7
 		mov	r0, r0
 		mov	r0, r0
 		.endr
 		.endr
+   ARM(		mov	r0, r0		)
+   ARM(		b	1f		)
+ THUMB(		adr	r12, BSYM(1f)	)
+ THUMB(		bx	r12		)
 
 
-		b	1f
 		.word	0x016f2818		@ Magic numbers to help the loader
 		.word	0x016f2818		@ Magic numbers to help the loader
 		.word	start			@ absolute load/run zImage address
 		.word	start			@ absolute load/run zImage address
 		.word	_edata			@ zImage end address
 		.word	_edata			@ zImage end address
@@ -174,9 +174,7 @@ not_angel:
 		 */
 		 */
 
 
 		.text
 		.text
-		adr	r0, LC0
-		ldmia	r0, {r1, r2, r3, r5, r6, r11, ip}
-		ldr	sp, [r0, #28]
+
 #ifdef CONFIG_AUTO_ZRELADDR
 #ifdef CONFIG_AUTO_ZRELADDR
 		@ determine final kernel image address
 		@ determine final kernel image address
 		mov	r4, pc
 		mov	r4, pc
@@ -185,35 +183,108 @@ not_angel:
 #else
 #else
 		ldr	r4, =zreladdr
 		ldr	r4, =zreladdr
 #endif
 #endif
-		subs	r0, r0, r1		@ calculate the delta offset
 
 
-						@ if delta is zero, we are
-		beq	not_relocated		@ running at the address we
-						@ were linked at.
+		bl	cache_on
+
+restart:	adr	r0, LC0
+		ldmia	r0, {r1, r2, r3, r5, r6, r9, r11, r12}
+		ldr	sp, [r0, #32]
+
+		/*
+		 * We might be running at a different address.  We need
+		 * to fix up various pointers.
+		 */
+		sub	r0, r0, r1		@ calculate the delta offset
+		add	r5, r5, r0		@ _start
+		add	r6, r6, r0		@ _edata
 
 
+#ifndef CONFIG_ZBOOT_ROM
+		/* malloc space is above the relocated stack (64k max) */
+		add	sp, sp, r0
+		add	r10, sp, #0x10000
+#else
 		/*
 		/*
-		 * We're running at a different address.  We need to fix
-		 * up various pointers:
-		 *   r5 - zImage base address (_start)
-		 *   r6 - size of decompressed image
-		 *   r11 - GOT start
-		 *   ip - GOT end
+		 * With ZBOOT_ROM the bss/stack is non relocatable,
+		 * but someone could still run this code from RAM,
+		 * in which case our reference is _edata.
 		 */
 		 */
-		add	r5, r5, r0
+		mov	r10, r6
+#endif
+
+/*
+ * Check to see if we will overwrite ourselves.
+ *   r4  = final kernel address
+ *   r5  = start of this image
+ *   r9  = size of decompressed image
+ *   r10 = end of this image, including  bss/stack/malloc space if non XIP
+ * We basically want:
+ *   r4 >= r10 -> OK
+ *   r4 + image length <= r5 -> OK
+ */
+		cmp	r4, r10
+		bhs	wont_overwrite
+		add	r10, r4, r9
+		cmp	r10, r5
+		bls	wont_overwrite
+
+/*
+ * Relocate ourselves past the end of the decompressed kernel.
+ *   r5  = start of this image
+ *   r6  = _edata
+ *   r10 = end of the decompressed kernel
+ * Because we always copy ahead, we need to do it from the end and go
+ * backward in case the source and destination overlap.
+ */
+		/* Round up to next 256-byte boundary. */
+		add	r10, r10, #256
+		bic	r10, r10, #255
+
+		sub	r9, r6, r5		@ size to copy
+		add	r9, r9, #31		@ rounded up to a multiple
+		bic	r9, r9, #31		@ ... of 32 bytes
+		add	r6, r9, r5
+		add	r9, r9, r10
+
+1:		ldmdb	r6!, {r0 - r3, r10 - r12, lr}
+		cmp	r6, r5
+		stmdb	r9!, {r0 - r3, r10 - r12, lr}
+		bhi	1b
+
+		/* Preserve offset to relocated code. */
+		sub	r6, r9, r6
+
+		bl	cache_clean_flush
+
+		adr	r0, BSYM(restart)
+		add	r0, r0, r6
+		mov	pc, r0
+
+wont_overwrite:
+/*
+ * If delta is zero, we are running at the address we were linked at.
+ *   r0  = delta
+ *   r2  = BSS start
+ *   r3  = BSS end
+ *   r4  = kernel execution address
+ *   r7  = architecture ID
+ *   r8  = atags pointer
+ *   r11 = GOT start
+ *   r12 = GOT end
+ *   sp  = stack pointer
+ */
+		teq	r0, #0
+		beq	not_relocated
 		add	r11, r11, r0
 		add	r11, r11, r0
-		add	ip, ip, r0
+		add	r12, r12, r0
 
 
 #ifndef CONFIG_ZBOOT_ROM
 #ifndef CONFIG_ZBOOT_ROM
 		/*
 		/*
 		 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
 		 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
 		 * we need to fix up pointers into the BSS region.
 		 * we need to fix up pointers into the BSS region.
-		 *   r2 - BSS start
-		 *   r3 - BSS end
-		 *   sp - stack pointer
+		 * Note that the stack pointer has already been fixed up.
 		 */
 		 */
 		add	r2, r2, r0
 		add	r2, r2, r0
 		add	r3, r3, r0
 		add	r3, r3, r0
-		add	sp, sp, r0
 
 
 		/*
 		/*
 		 * Relocate all entries in the GOT table.
 		 * Relocate all entries in the GOT table.
@@ -221,7 +292,7 @@ not_angel:
 1:		ldr	r1, [r11, #0]		@ relocate entries in the GOT
 1:		ldr	r1, [r11, #0]		@ relocate entries in the GOT
 		add	r1, r1, r0		@ table.  This fixes up the
 		add	r1, r1, r0		@ table.  This fixes up the
 		str	r1, [r11], #4		@ C references.
 		str	r1, [r11], #4		@ C references.
-		cmp	r11, ip
+		cmp	r11, r12
 		blo	1b
 		blo	1b
 #else
 #else
 
 
@@ -234,7 +305,7 @@ not_angel:
 		cmphs	r3, r1			@ _end < entry
 		cmphs	r3, r1			@ _end < entry
 		addlo	r1, r1, r0		@ table.  This fixes up the
 		addlo	r1, r1, r0		@ table.  This fixes up the
 		str	r1, [r11], #4		@ C references.
 		str	r1, [r11], #4		@ C references.
-		cmp	r11, ip
+		cmp	r11, r12
 		blo	1b
 		blo	1b
 #endif
 #endif
 
 
@@ -246,76 +317,24 @@ not_relocated:	mov	r0, #0
 		cmp	r2, r3
 		cmp	r2, r3
 		blo	1b
 		blo	1b
 
 
-		/*
-		 * The C runtime environment should now be setup
-		 * sufficiently.  Turn the cache on, set up some
-		 * pointers, and start decompressing.
-		 */
-		bl	cache_on
-
-		mov	r1, sp			@ malloc space above stack
-		add	r2, sp, #0x10000	@ 64k max
-
 /*
 /*
- * Check to see if we will overwrite ourselves.
- *   r4 = final kernel address
- *   r5 = start of this image
- *   r6 = size of decompressed image
- *   r2 = end of malloc space (and therefore this image)
- * We basically want:
- *   r4 >= r2 -> OK
- *   r4 + image length <= r5 -> OK
+ * The C runtime environment should now be setup sufficiently.
+ * Set up some pointers, and start decompressing.
+ *   r4  = kernel execution address
+ *   r7  = architecture ID
+ *   r8  = atags pointer
  */
  */
-		cmp	r4, r2
-		bhs	wont_overwrite
-		add	r0, r4, r6
-		cmp	r0, r5
-		bls	wont_overwrite
-
-		mov	r5, r2			@ decompress after malloc space
-		mov	r0, r5
+		mov	r0, r4
+		mov	r1, sp			@ malloc space above stack
+		add	r2, sp, #0x10000	@ 64k max
 		mov	r3, r7
 		mov	r3, r7
 		bl	decompress_kernel
 		bl	decompress_kernel
-
-		add	r0, r0, #127 + 128	@ alignment + stack
-		bic	r0, r0, #127		@ align the kernel length
-/*
- * r0     = decompressed kernel length
- * r1-r3  = unused
- * r4     = kernel execution address
- * r5     = decompressed kernel start
- * r7     = architecture ID
- * r8     = atags pointer
- * r9-r12,r14 = corrupted
- */
-		add	r1, r5, r0		@ end of decompressed kernel
-		adr	r2, reloc_start
-		ldr	r3, LC1
-		add	r3, r2, r3
-1:		ldmia	r2!, {r9 - r12, r14}	@ copy relocation code
-		stmia	r1!, {r9 - r12, r14}
-		ldmia	r2!, {r9 - r12, r14}
-		stmia	r1!, {r9 - r12, r14}
-		cmp	r2, r3
-		blo	1b
-		mov	sp, r1
-		add	sp, sp, #128		@ relocate the stack
-
 		bl	cache_clean_flush
 		bl	cache_clean_flush
- ARM(		add	pc, r5, r0		) @ call relocation code
- THUMB(		add	r12, r5, r0		)
- THUMB(		mov	pc, r12			) @ call relocation code
-
-/*
- * We're not in danger of overwriting ourselves.  Do this the simple way.
- *
- * r4     = kernel execution address
- * r7     = architecture ID
- */
-wont_overwrite:	mov	r0, r4
-		mov	r3, r7
-		bl	decompress_kernel
-		b	call_kernel
+		bl	cache_off
+		mov	r0, #0			@ must be zero
+		mov	r1, r7			@ restore architecture number
+		mov	r2, r8			@ restore atags pointer
+		mov	pc, r4			@ call kernel
 
 
 		.align	2
 		.align	2
 		.type	LC0, #object
 		.type	LC0, #object
@@ -323,11 +342,11 @@ LC0:		.word	LC0			@ r1
 		.word	__bss_start		@ r2
 		.word	__bss_start		@ r2
 		.word	_end			@ r3
 		.word	_end			@ r3
 		.word	_start			@ r5
 		.word	_start			@ r5
-		.word	_image_size		@ r6
+		.word	_edata			@ r6
+		.word	_image_size		@ r9
 		.word	_got_start		@ r11
 		.word	_got_start		@ r11
 		.word	_got_end		@ ip
 		.word	_got_end		@ ip
 		.word	user_stack_end		@ sp
 		.word	user_stack_end		@ sp
-LC1:		.word	reloc_end - reloc_start
 		.size	LC0, . - LC0
 		.size	LC0, . - LC0
 
 
 #ifdef CONFIG_ARCH_RPC
 #ifdef CONFIG_ARCH_RPC
@@ -353,7 +372,7 @@ params:		ldr	r0, =0x10000100		@ params_phys for RPC
  * On exit,
  * On exit,
  *  r0, r1, r2, r3, r9, r10, r12 corrupted
  *  r0, r1, r2, r3, r9, r10, r12 corrupted
  * This routine must preserve:
  * This routine must preserve:
- *  r4, r5, r6, r7, r8
+ *  r4, r7, r8
  */
  */
 		.align	5
 		.align	5
 cache_on:	mov	r3, #8			@ cache_on function
 cache_on:	mov	r3, #8			@ cache_on function
@@ -550,43 +569,6 @@ __common_mmu_cache_on:
 		sub	pc, lr, r0, lsr #32	@ properly flush pipeline
 		sub	pc, lr, r0, lsr #32	@ properly flush pipeline
 #endif
 #endif
 
 
-/*
- * All code following this line is relocatable.  It is relocated by
- * the above code to the end of the decompressed kernel image and
- * executed there.  During this time, we have no stacks.
- *
- * r0     = decompressed kernel length
- * r1-r3  = unused
- * r4     = kernel execution address
- * r5     = decompressed kernel start
- * r7     = architecture ID
- * r8     = atags pointer
- * r9-r12,r14 = corrupted
- */
-		.align	5
-reloc_start:	add	r9, r5, r0
-		sub	r9, r9, #128		@ do not copy the stack
-		debug_reloc_start
-		mov	r1, r4
-1:
-		.rept	4
-		ldmia	r5!, {r0, r2, r3, r10 - r12, r14}	@ relocate kernel
-		stmia	r1!, {r0, r2, r3, r10 - r12, r14}
-		.endr
-
-		cmp	r5, r9
-		blo	1b
-		mov	sp, r1
-		add	sp, sp, #128		@ relocate the stack
-		debug_reloc_end
-
-call_kernel:	bl	cache_clean_flush
-		bl	cache_off
-		mov	r0, #0			@ must be zero
-		mov	r1, r7			@ restore architecture number
-		mov	r2, r8			@ restore atags pointer
-		mov	pc, r4			@ call kernel
-
 /*
 /*
  * Here follow the relocatable cache support functions for the
  * Here follow the relocatable cache support functions for the
  * various processors.  This is a generic hook for locating an
  * various processors.  This is a generic hook for locating an
@@ -791,7 +773,7 @@ proc_types:
  * On exit,
  * On exit,
  *  r0, r1, r2, r3, r9, r12 corrupted
  *  r0, r1, r2, r3, r9, r12 corrupted
  * This routine must preserve:
  * This routine must preserve:
- *  r4, r6, r7
+ *  r4, r7, r8
  */
  */
 		.align	5
 		.align	5
 cache_off:	mov	r3, #12			@ cache_off function
 cache_off:	mov	r3, #12			@ cache_off function
@@ -866,7 +848,7 @@ __armv3_mmu_cache_off:
  * On exit,
  * On exit,
  *  r1, r2, r3, r9, r10, r11, r12 corrupted
  *  r1, r2, r3, r9, r10, r11, r12 corrupted
  * This routine must preserve:
  * This routine must preserve:
- *  r0, r4, r5, r6, r7
+ *  r4, r6, r7, r8
  */
  */
 		.align	5
 		.align	5
 cache_clean_flush:
 cache_clean_flush:
@@ -1088,7 +1070,6 @@ memdump:	mov	r12, r0
 #endif
 #endif
 
 
 		.ltorg
 		.ltorg
-reloc_end:
 
 
 		.align
 		.align
 		.section ".stack", "aw", %nobits
 		.section ".stack", "aw", %nobits

+ 1 - 1
arch/arm/boot/compressed/misc.c

@@ -36,7 +36,7 @@ extern void error(char *x);
 
 
 #ifdef CONFIG_DEBUG_ICEDCC
 #ifdef CONFIG_DEBUG_ICEDCC
 
 
-#ifdef CONFIG_CPU_V6
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
 
 
 static void icedcc_putc(int ch)
 static void icedcc_putc(int ch)
 {
 {

+ 87 - 0
arch/arm/boot/compressed/mmcif-sh7372.c

@@ -0,0 +1,87 @@
+/*
+ * sh7372 MMCIF loader
+ *
+ * Copyright (C) 2010 Magnus Damm
+ * Copyright (C) 2010 Simon Horman
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/mmc/sh_mmcif.h>
+#include <mach/mmcif.h>
+
+#define MMCIF_BASE      (void __iomem *)0xe6bd0000
+
+#define PORT84CR	(void __iomem *)0xe6050054
+#define PORT85CR	(void __iomem *)0xe6050055
+#define PORT86CR	(void __iomem *)0xe6050056
+#define PORT87CR	(void __iomem *)0xe6050057
+#define PORT88CR	(void __iomem *)0xe6050058
+#define PORT89CR	(void __iomem *)0xe6050059
+#define PORT90CR	(void __iomem *)0xe605005a
+#define PORT91CR	(void __iomem *)0xe605005b
+#define PORT92CR	(void __iomem *)0xe605005c
+#define PORT99CR	(void __iomem *)0xe6050063
+
+#define SMSTPCR3	(void __iomem *)0xe615013c
+
+/* SH7372 specific MMCIF loader
+ *
+ * loads the zImage from an MMC card starting from block 1.
+ *
+ * The image must be start with a vrl4 header and
+ * the zImage must start at offset 512 of the image. That is,
+ * at block 2 (=byte 1024) on the media
+ *
+ * Use the following line to write the vrl4 formated zImage
+ * to an MMC card
+ * # dd if=vrl4.out of=/dev/sdx bs=512 seek=1
+ */
+asmlinkage void mmcif_loader(unsigned char *buf, unsigned long len)
+{
+	mmcif_init_progress();
+	mmcif_update_progress(MMCIF_PROGRESS_ENTER);
+
+	/* Initialise MMC
+	 * registers: PORT84CR-PORT92CR
+	 *            (MMCD0_0-MMCD0_7,MMCCMD0 Control)
+	 * value: 0x04 - select function 4
+	 */
+	 __raw_writeb(0x04, PORT84CR);
+	 __raw_writeb(0x04, PORT85CR);
+	 __raw_writeb(0x04, PORT86CR);
+	 __raw_writeb(0x04, PORT87CR);
+	 __raw_writeb(0x04, PORT88CR);
+	 __raw_writeb(0x04, PORT89CR);
+	 __raw_writeb(0x04, PORT90CR);
+	 __raw_writeb(0x04, PORT91CR);
+	 __raw_writeb(0x04, PORT92CR);
+
+	/* Initialise MMC
+	 * registers: PORT99CR (MMCCLK0 Control)
+	 * value: 0x10 | 0x04 - enable output | select function 4
+	 */
+	__raw_writeb(0x14, PORT99CR);
+
+	/* Enable clock to MMC hardware block */
+	__raw_writel(__raw_readl(SMSTPCR3) & ~(1 << 12), SMSTPCR3);
+
+	mmcif_update_progress(MMCIF_PROGRESS_INIT);
+
+	/* setup MMCIF hardware */
+	sh_mmcif_boot_init(MMCIF_BASE);
+
+	mmcif_update_progress(MMCIF_PROGRESS_LOAD);
+
+	/* load kernel via MMCIF interface */
+	sh_mmcif_boot_do_read(MMCIF_BASE, 2, /* Kernel is at block 2 */
+			      (len + SH_MMCIF_BBS - 1) / SH_MMCIF_BBS, buf);
+
+
+	/* Disable clock to MMC hardware block */
+	__raw_writel(__raw_readl(SMSTPCR3) & (1 << 12), SMSTPCR3);
+
+	mmcif_update_progress(MMCIF_PROGRESS_DONE);
+}

+ 0 - 3
arch/arm/boot/compressed/vmlinux.lds.in

@@ -43,9 +43,6 @@ SECTIONS
 
 
   _etext = .;
   _etext = .;
 
 
-  /* Assume size of decompressed image is 4x the compressed image */
-  _image_size = (_etext - _text) * 4;
-
   _got_start = .;
   _got_start = .;
   .got			: { *(.got) }
   .got			: { *(.got) }
   _got_end = .;
   _got_end = .;

+ 59 - 13
arch/arm/common/gic.c

@@ -44,6 +44,19 @@ struct gic_chip_data {
 	void __iomem *cpu_base;
 	void __iomem *cpu_base;
 };
 };
 
 
+/*
+ * Supported arch specific GIC irq extension.
+ * Default make them NULL.
+ */
+struct irq_chip gic_arch_extn = {
+	.irq_ack	= NULL,
+	.irq_mask	= NULL,
+	.irq_unmask	= NULL,
+	.irq_retrigger	= NULL,
+	.irq_set_type	= NULL,
+	.irq_set_wake	= NULL,
+};
+
 #ifndef MAX_GIC_NR
 #ifndef MAX_GIC_NR
 #define MAX_GIC_NR	1
 #define MAX_GIC_NR	1
 #endif
 #endif
@@ -74,6 +87,8 @@ static inline unsigned int gic_irq(struct irq_data *d)
 static void gic_ack_irq(struct irq_data *d)
 static void gic_ack_irq(struct irq_data *d)
 {
 {
 	spin_lock(&irq_controller_lock);
 	spin_lock(&irq_controller_lock);
+	if (gic_arch_extn.irq_ack)
+		gic_arch_extn.irq_ack(d);
 	writel(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
 	writel(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
 	spin_unlock(&irq_controller_lock);
 	spin_unlock(&irq_controller_lock);
 }
 }
@@ -84,6 +99,8 @@ static void gic_mask_irq(struct irq_data *d)
 
 
 	spin_lock(&irq_controller_lock);
 	spin_lock(&irq_controller_lock);
 	writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
 	writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
+	if (gic_arch_extn.irq_mask)
+		gic_arch_extn.irq_mask(d);
 	spin_unlock(&irq_controller_lock);
 	spin_unlock(&irq_controller_lock);
 }
 }
 
 
@@ -92,6 +109,8 @@ static void gic_unmask_irq(struct irq_data *d)
 	u32 mask = 1 << (d->irq % 32);
 	u32 mask = 1 << (d->irq % 32);
 
 
 	spin_lock(&irq_controller_lock);
 	spin_lock(&irq_controller_lock);
+	if (gic_arch_extn.irq_unmask)
+		gic_arch_extn.irq_unmask(d);
 	writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
 	writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
 	spin_unlock(&irq_controller_lock);
 	spin_unlock(&irq_controller_lock);
 }
 }
@@ -116,6 +135,9 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
 
 
 	spin_lock(&irq_controller_lock);
 	spin_lock(&irq_controller_lock);
 
 
+	if (gic_arch_extn.irq_set_type)
+		gic_arch_extn.irq_set_type(d, type);
+
 	val = readl(base + GIC_DIST_CONFIG + confoff);
 	val = readl(base + GIC_DIST_CONFIG + confoff);
 	if (type == IRQ_TYPE_LEVEL_HIGH)
 	if (type == IRQ_TYPE_LEVEL_HIGH)
 		val &= ~confmask;
 		val &= ~confmask;
@@ -141,32 +163,54 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
 	return 0;
 	return 0;
 }
 }
 
 
+static int gic_retrigger(struct irq_data *d)
+{
+	if (gic_arch_extn.irq_retrigger)
+		return gic_arch_extn.irq_retrigger(d);
+
+	return -ENXIO;
+}
+
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
-static int
-gic_set_cpu(struct irq_data *d, const struct cpumask *mask_val, bool force)
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+			    bool force)
 {
 {
 	void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
 	void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
 	unsigned int shift = (d->irq % 4) * 8;
 	unsigned int shift = (d->irq % 4) * 8;
 	unsigned int cpu = cpumask_first(mask_val);
 	unsigned int cpu = cpumask_first(mask_val);
-	u32 val;
-	struct irq_desc *desc;
+	u32 val, mask, bit;
 
 
-	spin_lock(&irq_controller_lock);
-	desc = irq_to_desc(d->irq);
-	if (desc == NULL) {
-		spin_unlock(&irq_controller_lock);
+	if (cpu >= 8)
 		return -EINVAL;
 		return -EINVAL;
-	}
+
+	mask = 0xff << shift;
+	bit = 1 << (cpu + shift);
+
+	spin_lock(&irq_controller_lock);
 	d->node = cpu;
 	d->node = cpu;
-	val = readl(reg) & ~(0xff << shift);
-	val |= 1 << (cpu + shift);
-	writel(val, reg);
+	val = readl(reg) & ~mask;
+	writel(val | bit, reg);
 	spin_unlock(&irq_controller_lock);
 	spin_unlock(&irq_controller_lock);
 
 
 	return 0;
 	return 0;
 }
 }
 #endif
 #endif
 
 
+#ifdef CONFIG_PM
+static int gic_set_wake(struct irq_data *d, unsigned int on)
+{
+	int ret = -ENXIO;
+
+	if (gic_arch_extn.irq_set_wake)
+		ret = gic_arch_extn.irq_set_wake(d, on);
+
+	return ret;
+}
+
+#else
+#define gic_set_wake	NULL
+#endif
+
 static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
 static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
 {
 {
 	struct gic_chip_data *chip_data = get_irq_data(irq);
 	struct gic_chip_data *chip_data = get_irq_data(irq);
@@ -202,9 +246,11 @@ static struct irq_chip gic_chip = {
 	.irq_mask		= gic_mask_irq,
 	.irq_mask		= gic_mask_irq,
 	.irq_unmask		= gic_unmask_irq,
 	.irq_unmask		= gic_unmask_irq,
 	.irq_set_type		= gic_set_type,
 	.irq_set_type		= gic_set_type,
+	.irq_retrigger		= gic_retrigger,
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
-	.irq_set_affinity	= gic_set_cpu,
+	.irq_set_affinity	= gic_set_affinity,
 #endif
 #endif
+	.irq_set_wake		= gic_set_wake,
 };
 };
 
 
 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)

+ 1 - 5
arch/arm/include/asm/a.out-core.h

@@ -32,11 +32,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
 	dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	dump->u_ssize = 0;
 	dump->u_ssize = 0;
 
 
-	dump->u_debugreg[0] = tsk->thread.debug.bp[0].address;
-	dump->u_debugreg[1] = tsk->thread.debug.bp[1].address;
-	dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm;
-	dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm;
-	dump->u_debugreg[4] = tsk->thread.debug.nsaved;
+	memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
 
 
 	if (dump->start_stack < 0x04000000)
 	if (dump->start_stack < 0x04000000)
 		dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
 		dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;

+ 22 - 38
arch/arm/include/asm/bitops.h

@@ -148,15 +148,19 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
  * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0.
  * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0.
  */
  */
 
 
+/*
+ * Native endian assembly bitops.  nr = 0 -> word 0 bit 0.
+ */
+extern void _set_bit(int nr, volatile unsigned long * p);
+extern void _clear_bit(int nr, volatile unsigned long * p);
+extern void _change_bit(int nr, volatile unsigned long * p);
+extern int _test_and_set_bit(int nr, volatile unsigned long * p);
+extern int _test_and_clear_bit(int nr, volatile unsigned long * p);
+extern int _test_and_change_bit(int nr, volatile unsigned long * p);
+
 /*
 /*
  * Little endian assembly bitops.  nr = 0 -> byte 0 bit 0.
  * Little endian assembly bitops.  nr = 0 -> byte 0 bit 0.
  */
  */
-extern void _set_bit_le(int nr, volatile unsigned long * p);
-extern void _clear_bit_le(int nr, volatile unsigned long * p);
-extern void _change_bit_le(int nr, volatile unsigned long * p);
-extern int _test_and_set_bit_le(int nr, volatile unsigned long * p);
-extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p);
-extern int _test_and_change_bit_le(int nr, volatile unsigned long * p);
 extern int _find_first_zero_bit_le(const void * p, unsigned size);
 extern int _find_first_zero_bit_le(const void * p, unsigned size);
 extern int _find_next_zero_bit_le(const void * p, int size, int offset);
 extern int _find_next_zero_bit_le(const void * p, int size, int offset);
 extern int _find_first_bit_le(const unsigned long *p, unsigned size);
 extern int _find_first_bit_le(const unsigned long *p, unsigned size);
@@ -165,12 +169,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
 /*
 /*
  * Big endian assembly bitops.  nr = 0 -> byte 3 bit 0.
  * Big endian assembly bitops.  nr = 0 -> byte 3 bit 0.
  */
  */
-extern void _set_bit_be(int nr, volatile unsigned long * p);
-extern void _clear_bit_be(int nr, volatile unsigned long * p);
-extern void _change_bit_be(int nr, volatile unsigned long * p);
-extern int _test_and_set_bit_be(int nr, volatile unsigned long * p);
-extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p);
-extern int _test_and_change_bit_be(int nr, volatile unsigned long * p);
 extern int _find_first_zero_bit_be(const void * p, unsigned size);
 extern int _find_first_zero_bit_be(const void * p, unsigned size);
 extern int _find_next_zero_bit_be(const void * p, int size, int offset);
 extern int _find_next_zero_bit_be(const void * p, int size, int offset);
 extern int _find_first_bit_be(const unsigned long *p, unsigned size);
 extern int _find_first_bit_be(const unsigned long *p, unsigned size);
@@ -180,33 +178,26 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
 /*
 /*
  * The __* form of bitops are non-atomic and may be reordered.
  * The __* form of bitops are non-atomic and may be reordered.
  */
  */
-#define	ATOMIC_BITOP_LE(name,nr,p)		\
-	(__builtin_constant_p(nr) ?		\
-	 ____atomic_##name(nr, p) :		\
-	 _##name##_le(nr,p))
-
-#define	ATOMIC_BITOP_BE(name,nr,p)		\
-	(__builtin_constant_p(nr) ?		\
-	 ____atomic_##name(nr, p) :		\
-	 _##name##_be(nr,p))
+#define ATOMIC_BITOP(name,nr,p)			\
+	(__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p))
 #else
 #else
-#define ATOMIC_BITOP_LE(name,nr,p)	_##name##_le(nr,p)
-#define ATOMIC_BITOP_BE(name,nr,p)	_##name##_be(nr,p)
+#define ATOMIC_BITOP(name,nr,p)		_##name(nr,p)
 #endif
 #endif
 
 
-#define NONATOMIC_BITOP(name,nr,p)		\
-	(____nonatomic_##name(nr, p))
+/*
+ * Native endian atomic definitions.
+ */
+#define set_bit(nr,p)			ATOMIC_BITOP(set_bit,nr,p)
+#define clear_bit(nr,p)			ATOMIC_BITOP(clear_bit,nr,p)
+#define change_bit(nr,p)		ATOMIC_BITOP(change_bit,nr,p)
+#define test_and_set_bit(nr,p)		ATOMIC_BITOP(test_and_set_bit,nr,p)
+#define test_and_clear_bit(nr,p)	ATOMIC_BITOP(test_and_clear_bit,nr,p)
+#define test_and_change_bit(nr,p)	ATOMIC_BITOP(test_and_change_bit,nr,p)
 
 
 #ifndef __ARMEB__
 #ifndef __ARMEB__
 /*
 /*
  * These are the little endian, atomic definitions.
  * These are the little endian, atomic definitions.
  */
  */
-#define set_bit(nr,p)			ATOMIC_BITOP_LE(set_bit,nr,p)
-#define clear_bit(nr,p)			ATOMIC_BITOP_LE(clear_bit,nr,p)
-#define change_bit(nr,p)		ATOMIC_BITOP_LE(change_bit,nr,p)
-#define test_and_set_bit(nr,p)		ATOMIC_BITOP_LE(test_and_set_bit,nr,p)
-#define test_and_clear_bit(nr,p)	ATOMIC_BITOP_LE(test_and_clear_bit,nr,p)
-#define test_and_change_bit(nr,p)	ATOMIC_BITOP_LE(test_and_change_bit,nr,p)
 #define find_first_zero_bit(p,sz)	_find_first_zero_bit_le(p,sz)
 #define find_first_zero_bit(p,sz)	_find_first_zero_bit_le(p,sz)
 #define find_next_zero_bit(p,sz,off)	_find_next_zero_bit_le(p,sz,off)
 #define find_next_zero_bit(p,sz,off)	_find_next_zero_bit_le(p,sz,off)
 #define find_first_bit(p,sz)		_find_first_bit_le(p,sz)
 #define find_first_bit(p,sz)		_find_first_bit_le(p,sz)
@@ -215,16 +206,9 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
 #define WORD_BITOFF_TO_LE(x)		((x))
 #define WORD_BITOFF_TO_LE(x)		((x))
 
 
 #else
 #else
-
 /*
 /*
  * These are the big endian, atomic definitions.
  * These are the big endian, atomic definitions.
  */
  */
-#define set_bit(nr,p)			ATOMIC_BITOP_BE(set_bit,nr,p)
-#define clear_bit(nr,p)			ATOMIC_BITOP_BE(clear_bit,nr,p)
-#define change_bit(nr,p)		ATOMIC_BITOP_BE(change_bit,nr,p)
-#define test_and_set_bit(nr,p)		ATOMIC_BITOP_BE(test_and_set_bit,nr,p)
-#define test_and_clear_bit(nr,p)	ATOMIC_BITOP_BE(test_and_clear_bit,nr,p)
-#define test_and_change_bit(nr,p)	ATOMIC_BITOP_BE(test_and_change_bit,nr,p)
 #define find_first_zero_bit(p,sz)	_find_first_zero_bit_be(p,sz)
 #define find_first_zero_bit(p,sz)	_find_first_zero_bit_be(p,sz)
 #define find_next_zero_bit(p,sz,off)	_find_next_zero_bit_be(p,sz,off)
 #define find_next_zero_bit(p,sz,off)	_find_next_zero_bit_be(p,sz,off)
 #define find_first_bit(p,sz)		_find_first_bit_be(p,sz)
 #define find_first_bit(p,sz)		_find_first_bit_be(p,sz)

+ 4 - 132
arch/arm/include/asm/cacheflush.h

@@ -12,130 +12,13 @@
 
 
 #include <linux/mm.h>
 #include <linux/mm.h>
 
 
-#include <asm/glue.h>
+#include <asm/glue-cache.h>
 #include <asm/shmparam.h>
 #include <asm/shmparam.h>
 #include <asm/cachetype.h>
 #include <asm/cachetype.h>
 #include <asm/outercache.h>
 #include <asm/outercache.h>
 
 
 #define CACHE_COLOUR(vaddr)	((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
 #define CACHE_COLOUR(vaddr)	((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
 
 
-/*
- *	Cache Model
- *	===========
- */
-#undef _CACHE
-#undef MULTI_CACHE
-
-#if defined(CONFIG_CPU_CACHE_V3)
-# ifdef _CACHE
-#  define MULTI_CACHE 1
-# else
-#  define _CACHE v3
-# endif
-#endif
-
-#if defined(CONFIG_CPU_CACHE_V4)
-# ifdef _CACHE
-#  define MULTI_CACHE 1
-# else
-#  define _CACHE v4
-# endif
-#endif
-
-#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
-    defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
-    defined(CONFIG_CPU_ARM1026)
-# define MULTI_CACHE 1
-#endif
-
-#if defined(CONFIG_CPU_FA526)
-# ifdef _CACHE
-#  define MULTI_CACHE 1
-# else
-#  define _CACHE fa
-# endif
-#endif
-
-#if defined(CONFIG_CPU_ARM926T)
-# ifdef _CACHE
-#  define MULTI_CACHE 1
-# else
-#  define _CACHE arm926
-# endif
-#endif
-
-#if defined(CONFIG_CPU_ARM940T)
-# ifdef _CACHE
-#  define MULTI_CACHE 1
-# else
-#  define _CACHE arm940
-# endif
-#endif
-
-#if defined(CONFIG_CPU_ARM946E)
-# ifdef _CACHE
-#  define MULTI_CACHE 1
-# else
-#  define _CACHE arm946
-# endif
-#endif
-
-#if defined(CONFIG_CPU_CACHE_V4WB)
-# ifdef _CACHE
-#  define MULTI_CACHE 1
-# else
-#  define _CACHE v4wb
-# endif
-#endif
-
-#if defined(CONFIG_CPU_XSCALE)
-# ifdef _CACHE
-#  define MULTI_CACHE 1
-# else
-#  define _CACHE xscale
-# endif
-#endif
-
-#if defined(CONFIG_CPU_XSC3)
-# ifdef _CACHE
-#  define MULTI_CACHE 1
-# else
-#  define _CACHE xsc3
-# endif
-#endif
-
-#if defined(CONFIG_CPU_MOHAWK)
-# ifdef _CACHE
-#  define MULTI_CACHE 1
-# else
-#  define _CACHE mohawk
-# endif
-#endif
-
-#if defined(CONFIG_CPU_FEROCEON)
-# define MULTI_CACHE 1
-#endif
-
-#if defined(CONFIG_CPU_V6)
-//# ifdef _CACHE
-#  define MULTI_CACHE 1
-//# else
-//#  define _CACHE v6
-//# endif
-#endif
-
-#if defined(CONFIG_CPU_V7)
-//# ifdef _CACHE
-#  define MULTI_CACHE 1
-//# else
-//#  define _CACHE v7
-//# endif
-#endif
-
-#if !defined(_CACHE) && !defined(MULTI_CACHE)
-#error Unknown cache maintainence model
-#endif
-
 /*
 /*
  * This flag is used to indicate that the page pointed to by a pte is clean
  * This flag is used to indicate that the page pointed to by a pte is clean
  * and does not require cleaning before returning it to the user.
  * and does not require cleaning before returning it to the user.
@@ -249,19 +132,11 @@ extern struct cpu_cache_fns cpu_cache;
  * visible to the CPU.
  * visible to the CPU.
  */
  */
 #define dmac_map_area			cpu_cache.dma_map_area
 #define dmac_map_area			cpu_cache.dma_map_area
-#define dmac_unmap_area		cpu_cache.dma_unmap_area
+#define dmac_unmap_area			cpu_cache.dma_unmap_area
 #define dmac_flush_range		cpu_cache.dma_flush_range
 #define dmac_flush_range		cpu_cache.dma_flush_range
 
 
 #else
 #else
 
 
-#define __cpuc_flush_icache_all		__glue(_CACHE,_flush_icache_all)
-#define __cpuc_flush_kern_all		__glue(_CACHE,_flush_kern_cache_all)
-#define __cpuc_flush_user_all		__glue(_CACHE,_flush_user_cache_all)
-#define __cpuc_flush_user_range		__glue(_CACHE,_flush_user_cache_range)
-#define __cpuc_coherent_kern_range	__glue(_CACHE,_coherent_kern_range)
-#define __cpuc_coherent_user_range	__glue(_CACHE,_coherent_user_range)
-#define __cpuc_flush_dcache_area	__glue(_CACHE,_flush_kern_dcache_area)
-
 extern void __cpuc_flush_icache_all(void);
 extern void __cpuc_flush_icache_all(void);
 extern void __cpuc_flush_kern_all(void);
 extern void __cpuc_flush_kern_all(void);
 extern void __cpuc_flush_user_all(void);
 extern void __cpuc_flush_user_all(void);
@@ -276,10 +151,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
  * is visible to DMA, or data written by DMA to system memory is
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  * visible to the CPU.
  */
  */
-#define dmac_map_area			__glue(_CACHE,_dma_map_area)
-#define dmac_unmap_area		__glue(_CACHE,_dma_unmap_area)
-#define dmac_flush_range		__glue(_CACHE,_dma_flush_range)
-
 extern void dmac_map_area(const void *, size_t, int);
 extern void dmac_map_area(const void *, size_t, int);
 extern void dmac_unmap_area(const void *, size_t, int);
 extern void dmac_unmap_area(const void *, size_t, int);
 extern void dmac_flush_range(const void *, const void *);
 extern void dmac_flush_range(const void *, const void *);
@@ -316,7 +187,8 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
  * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
  * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
  * will fall through to use __flush_icache_all_generic.
  * will fall through to use __flush_icache_all_generic.
  */
  */
-#if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) ||		\
+#if (defined(CONFIG_CPU_V7) && \
+     (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
 	defined(CONFIG_SMP_ON_UP)
 	defined(CONFIG_SMP_ON_UP)
 #define __flush_icache_preferred	__cpuc_flush_icache_all
 #define __flush_icache_preferred	__cpuc_flush_icache_all
 #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
 #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)

+ 0 - 69
arch/arm/include/asm/cpu-multi32.h

@@ -1,69 +0,0 @@
-/*
- *  arch/arm/include/asm/cpu-multi32.h
- *
- *  Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <asm/page.h>
-
-struct mm_struct;
-
-/*
- * Don't change this structure - ASM code
- * relies on it.
- */
-extern struct processor {
-	/* MISC
-	 * get data abort address/flags
-	 */
-	void (*_data_abort)(unsigned long pc);
-	/*
-	 * Retrieve prefetch fault address
-	 */
-	unsigned long (*_prefetch_abort)(unsigned long lr);
-	/*
-	 * Set up any processor specifics
-	 */
-	void (*_proc_init)(void);
-	/*
-	 * Disable any processor specifics
-	 */
-	void (*_proc_fin)(void);
-	/*
-	 * Special stuff for a reset
-	 */
-	void (*reset)(unsigned long addr) __attribute__((noreturn));
-	/*
-	 * Idle the processor
-	 */
-	int (*_do_idle)(void);
-	/*
-	 * Processor architecture specific
-	 */
-	/*
-	 * clean a virtual address range from the
-	 * D-cache without flushing the cache.
-	 */
-	void (*dcache_clean_area)(void *addr, int size);
-
-	/*
-	 * Set the page table
-	 */
-	void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
-	/*
-	 * Set a possibly extended PTE.  Non-extended PTEs should
-	 * ignore 'ext'.
-	 */
-	void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
-} processor;
-
-#define cpu_proc_init()			processor._proc_init()
-#define cpu_proc_fin()			processor._proc_fin()
-#define cpu_reset(addr)			processor.reset(addr)
-#define cpu_do_idle()			processor._do_idle()
-#define cpu_dcache_clean_area(addr,sz)	processor.dcache_clean_area(addr,sz)
-#define cpu_set_pte_ext(ptep,pte,ext)	processor.set_pte_ext(ptep,pte,ext)
-#define cpu_do_switch_mm(pgd,mm)	processor.switch_mm(pgd,mm)

+ 0 - 44
arch/arm/include/asm/cpu-single.h

@@ -1,44 +0,0 @@
-/*
- *  arch/arm/include/asm/cpu-single.h
- *
- *  Copyright (C) 2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-/*
- * Single CPU
- */
-#ifdef __STDC__
-#define __catify_fn(name,x)	name##x
-#else
-#define __catify_fn(name,x)	name/**/x
-#endif
-#define __cpu_fn(name,x)	__catify_fn(name,x)
-
-/*
- * If we are supporting multiple CPUs, then we must use a table of
- * function pointers for this lot.  Otherwise, we can optimise the
- * table away.
- */
-#define cpu_proc_init			__cpu_fn(CPU_NAME,_proc_init)
-#define cpu_proc_fin			__cpu_fn(CPU_NAME,_proc_fin)
-#define cpu_reset			__cpu_fn(CPU_NAME,_reset)
-#define cpu_do_idle			__cpu_fn(CPU_NAME,_do_idle)
-#define cpu_dcache_clean_area		__cpu_fn(CPU_NAME,_dcache_clean_area)
-#define cpu_do_switch_mm		__cpu_fn(CPU_NAME,_switch_mm)
-#define cpu_set_pte_ext			__cpu_fn(CPU_NAME,_set_pte_ext)
-
-#include <asm/page.h>
-
-struct mm_struct;
-
-/* declare all the functions as extern */
-extern void cpu_proc_init(void);
-extern void cpu_proc_fin(void);
-extern int cpu_do_idle(void);
-extern void cpu_dcache_clean_area(void *, int);
-extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
-extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
-extern void cpu_reset(unsigned long addr) __attribute__((noreturn));

+ 2 - 1
arch/arm/include/asm/cputype.h

@@ -23,6 +23,8 @@
 #define CPUID_EXT_ISAR4	"c2, 4"
 #define CPUID_EXT_ISAR4	"c2, 4"
 #define CPUID_EXT_ISAR5	"c2, 5"
 #define CPUID_EXT_ISAR5	"c2, 5"
 
 
+extern unsigned int processor_id;
+
 #ifdef CONFIG_CPU_CP15
 #ifdef CONFIG_CPU_CP15
 #define read_cpuid(reg)							\
 #define read_cpuid(reg)							\
 	({								\
 	({								\
@@ -43,7 +45,6 @@
 		__val;							\
 		__val;							\
 	})
 	})
 #else
 #else
-extern unsigned int processor_id;
 #define read_cpuid(reg) (processor_id)
 #define read_cpuid(reg) (processor_id)
 #define read_cpuid_ext(reg) 0
 #define read_cpuid_ext(reg) 0
 #endif
 #endif

+ 94 - 0
arch/arm/include/asm/fncpy.h

@@ -0,0 +1,94 @@
+/*
+ * arch/arm/include/asm/fncpy.h - helper macros for function body copying
+ *
+ * Copyright (C) 2011 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * These macros are intended for use when there is a need to copy a low-level
+ * function body into special memory.
+ *
+ * For example, when reconfiguring the SDRAM controller, the code doing the
+ * reconfiguration may need to run from SRAM.
+ *
+ * NOTE: that the copied function body must be entirely self-contained and
+ * position-independent in order for this to work properly.
+ *
+ * NOTE: in order for embedded literals and data to get referenced correctly,
+ * the alignment of functions must be preserved when copying.  To ensure this,
+ * the source and destination addresses for fncpy() must be aligned to a
+ * multiple of 8 bytes: you will be get a BUG() if this condition is not met.
+ * You will typically need a ".align 3" directive in the assembler where the
+ * function to be copied is defined, and ensure that your allocator for the
+ * destination buffer returns 8-byte-aligned pointers.
+ *
+ * Typical usage example:
+ *
+ * extern int f(args);
+ * extern uint32_t size_of_f;
+ * int (*copied_f)(args);
+ * void *sram_buffer;
+ *
+ * copied_f = fncpy(sram_buffer, &f, size_of_f);
+ *
+ * ... later, call the function: ...
+ *
+ * copied_f(args);
+ *
+ * The size of the function to be copied can't be determined from C:
+ * this must be determined by other means, such as adding assmbler directives
+ * in the file where f is defined.
+ */
+
+#ifndef __ASM_FNCPY_H
+#define __ASM_FNCPY_H
+
+#include <linux/types.h>
+#include <linux/string.h>
+
+#include <asm/bug.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Minimum alignment requirement for the source and destination addresses
+ * for function copying.
+ */
+#define FNCPY_ALIGN 8
+
+#define fncpy(dest_buf, funcp, size) ({					\
+	uintptr_t __funcp_address;					\
+	typeof(funcp) __result;						\
+									\
+	asm("" : "=r" (__funcp_address) : "0" (funcp));			\
+									\
+	/*								\
+	 * Ensure alignment of source and destination addresses,	\
+	 * disregarding the function's Thumb bit:			\
+	 */								\
+	BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) ||		\
+		(__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1)));	\
+									\
+	memcpy(dest_buf, (void const *)(__funcp_address & ~1), size);	\
+	flush_icache_range((unsigned long)(dest_buf),			\
+		(unsigned long)(dest_buf) + (size));			\
+									\
+	asm("" : "=r" (__result)					\
+		: "0" ((uintptr_t)(dest_buf) | (__funcp_address & 1)));	\
+									\
+	__result;							\
+})
+
+#endif /* !__ASM_FNCPY_H */

+ 146 - 0
arch/arm/include/asm/glue-cache.h

@@ -0,0 +1,146 @@
+/*
+ *  arch/arm/include/asm/glue-cache.h
+ *
+ *  Copyright (C) 1999-2002 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ASM_GLUE_CACHE_H
+#define ASM_GLUE_CACHE_H
+
+#include <asm/glue.h>
+
+/*
+ *	Cache Model
+ *	===========
+ */
+#undef _CACHE
+#undef MULTI_CACHE
+
+#if defined(CONFIG_CPU_CACHE_V3)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE v3
+# endif
+#endif
+
+#if defined(CONFIG_CPU_CACHE_V4)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE v4
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
+    defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
+    defined(CONFIG_CPU_ARM1026)
+# define MULTI_CACHE 1
+#endif
+
+#if defined(CONFIG_CPU_FA526)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE fa
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM926T)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE arm926
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM940T)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE arm940
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM946E)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE arm946
+# endif
+#endif
+
+#if defined(CONFIG_CPU_CACHE_V4WB)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE v4wb
+# endif
+#endif
+
+#if defined(CONFIG_CPU_XSCALE)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE xscale
+# endif
+#endif
+
+#if defined(CONFIG_CPU_XSC3)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE xsc3
+# endif
+#endif
+
+#if defined(CONFIG_CPU_MOHAWK)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE mohawk
+# endif
+#endif
+
+#if defined(CONFIG_CPU_FEROCEON)
+# define MULTI_CACHE 1
+#endif
+
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
+//# ifdef _CACHE
+#  define MULTI_CACHE 1
+//# else
+//#  define _CACHE v6
+//# endif
+#endif
+
+#if defined(CONFIG_CPU_V7)
+//# ifdef _CACHE
+#  define MULTI_CACHE 1
+//# else
+//#  define _CACHE v7
+//# endif
+#endif
+
+#if !defined(_CACHE) && !defined(MULTI_CACHE)
+#error Unknown cache maintainence model
+#endif
+
+#ifndef MULTI_CACHE
+#define __cpuc_flush_icache_all		__glue(_CACHE,_flush_icache_all)
+#define __cpuc_flush_kern_all		__glue(_CACHE,_flush_kern_cache_all)
+#define __cpuc_flush_user_all		__glue(_CACHE,_flush_user_cache_all)
+#define __cpuc_flush_user_range		__glue(_CACHE,_flush_user_cache_range)
+#define __cpuc_coherent_kern_range	__glue(_CACHE,_coherent_kern_range)
+#define __cpuc_coherent_user_range	__glue(_CACHE,_coherent_user_range)
+#define __cpuc_flush_dcache_area	__glue(_CACHE,_flush_kern_dcache_area)
+
+#define dmac_map_area			__glue(_CACHE,_dma_map_area)
+#define dmac_unmap_area			__glue(_CACHE,_dma_unmap_area)
+#define dmac_flush_range		__glue(_CACHE,_dma_flush_range)
+#endif
+
+#endif

+ 110 - 0
arch/arm/include/asm/glue-df.h

@@ -0,0 +1,110 @@
+/*
+ *  arch/arm/include/asm/glue-df.h
+ *
+ *  Copyright (C) 1997-1999 Russell King
+ *  Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ASM_GLUE_DF_H
+#define ASM_GLUE_DF_H
+
+#include <asm/glue.h>
+
+/*
+ *	Data Abort Model
+ *	================
+ *
+ *	We have the following to choose from:
+ *	  arm6          - ARM6 style
+ *	  arm7		- ARM7 style
+ *	  v4_early	- ARMv4 without Thumb early abort handler
+ *	  v4t_late	- ARMv4 with Thumb late abort handler
+ *	  v4t_early	- ARMv4 with Thumb early abort handler
+ *	  v5tej_early	- ARMv5 with Thumb and Java early abort handler
+ *	  xscale	- ARMv5 with Thumb with Xscale extensions
+ *	  v6_early	- ARMv6 generic early abort handler
+ *	  v7_early	- ARMv7 generic early abort handler
+ */
+#undef CPU_DABORT_HANDLER
+#undef MULTI_DABORT
+
+#if defined(CONFIG_CPU_ARM610)
+# ifdef CPU_DABORT_HANDLER
+#  define MULTI_DABORT 1
+# else
+#  define CPU_DABORT_HANDLER cpu_arm6_data_abort
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM710)
+# ifdef CPU_DABORT_HANDLER
+#  define MULTI_DABORT 1
+# else
+#  define CPU_DABORT_HANDLER cpu_arm7_data_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_LV4T
+# ifdef CPU_DABORT_HANDLER
+#  define MULTI_DABORT 1
+# else
+#  define CPU_DABORT_HANDLER v4t_late_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV4
+# ifdef CPU_DABORT_HANDLER
+#  define MULTI_DABORT 1
+# else
+#  define CPU_DABORT_HANDLER v4_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV4T
+# ifdef CPU_DABORT_HANDLER
+#  define MULTI_DABORT 1
+# else
+#  define CPU_DABORT_HANDLER v4t_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV5TJ
+# ifdef CPU_DABORT_HANDLER
+#  define MULTI_DABORT 1
+# else
+#  define CPU_DABORT_HANDLER v5tj_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV5T
+# ifdef CPU_DABORT_HANDLER
+#  define MULTI_DABORT 1
+# else
+#  define CPU_DABORT_HANDLER v5t_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV6
+# ifdef CPU_DABORT_HANDLER
+#  define MULTI_DABORT 1
+# else
+#  define CPU_DABORT_HANDLER v6_early_abort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ABRT_EV7
+# ifdef CPU_DABORT_HANDLER
+#  define MULTI_DABORT 1
+# else
+#  define CPU_DABORT_HANDLER v7_early_abort
+# endif
+#endif
+
+#ifndef CPU_DABORT_HANDLER
+#error Unknown data abort handler type
+#endif
+
+#endif

+ 57 - 0
arch/arm/include/asm/glue-pf.h

@@ -0,0 +1,57 @@
+/*
+ *  arch/arm/include/asm/glue-pf.h
+ *
+ *  Copyright (C) 1997-1999 Russell King
+ *  Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ASM_GLUE_PF_H
+#define ASM_GLUE_PF_H
+
+#include <asm/glue.h>
+
+/*
+ *	Prefetch Abort Model
+ *	================
+ *
+ *	We have the following to choose from:
+ *	  legacy	- no IFSR, no IFAR
+ *	  v6		- ARMv6: IFSR, no IFAR
+ *	  v7		- ARMv7: IFSR and IFAR
+ */
+
+#undef CPU_PABORT_HANDLER
+#undef MULTI_PABORT
+
+#ifdef CONFIG_CPU_PABRT_LEGACY
+# ifdef CPU_PABORT_HANDLER
+#  define MULTI_PABORT 1
+# else
+#  define CPU_PABORT_HANDLER legacy_pabort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_PABRT_V6
+# ifdef CPU_PABORT_HANDLER
+#  define MULTI_PABORT 1
+# else
+#  define CPU_PABORT_HANDLER v6_pabort
+# endif
+#endif
+
+#ifdef CONFIG_CPU_PABRT_V7
+# ifdef CPU_PABORT_HANDLER
+#  define MULTI_PABORT 1
+# else
+#  define CPU_PABORT_HANDLER v7_pabort
+# endif
+#endif
+
+#ifndef CPU_PABORT_HANDLER
+#error Unknown prefetch abort handler type
+#endif
+
+#endif

+ 264 - 0
arch/arm/include/asm/glue-proc.h

@@ -0,0 +1,264 @@
+/*
+ *  arch/arm/include/asm/glue-proc.h
+ *
+ *  Copyright (C) 1997-1999 Russell King
+ *  Copyright (C) 2000 Deep Blue Solutions Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ASM_GLUE_PROC_H
+#define ASM_GLUE_PROC_H
+
+#include <asm/glue.h>
+
+/*
+ * Work out if we need multiple CPU support
+ */
+#undef MULTI_CPU
+#undef CPU_NAME
+
+/*
+ * CPU_NAME - the prefix for CPU related functions
+ */
+
+#ifdef CONFIG_CPU_ARM610
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_arm6
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM7TDMI
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_arm7tdmi
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM710
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_arm7
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM720T
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_arm720
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM740T
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_arm740
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM9TDMI
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_arm9tdmi
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM920T
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_arm920
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM922T
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_arm922
+# endif
+#endif
+
+#ifdef CONFIG_CPU_FA526
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_fa526
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM925T
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_arm925
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM926T
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_arm926
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM940T
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_arm940
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM946E
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_arm946
+# endif
+#endif
+
+#ifdef CONFIG_CPU_SA110
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_sa110
+# endif
+#endif
+
+#ifdef CONFIG_CPU_SA1100
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_sa1100
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM1020
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_arm1020
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM1020E
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_arm1020e
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM1022
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_arm1022
+# endif
+#endif
+
+#ifdef CONFIG_CPU_ARM1026
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_arm1026
+# endif
+#endif
+
+#ifdef CONFIG_CPU_XSCALE
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_xscale
+# endif
+#endif
+
+#ifdef CONFIG_CPU_XSC3
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_xsc3
+# endif
+#endif
+
+#ifdef CONFIG_CPU_MOHAWK
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_mohawk
+# endif
+#endif
+
+#ifdef CONFIG_CPU_FEROCEON
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_feroceon
+# endif
+#endif
+
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_v6
+# endif
+#endif
+
+#ifdef CONFIG_CPU_V7
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_v7
+# endif
+#endif
+
+#ifndef MULTI_CPU
+#define cpu_proc_init			__glue(CPU_NAME,_proc_init)
+#define cpu_proc_fin			__glue(CPU_NAME,_proc_fin)
+#define cpu_reset			__glue(CPU_NAME,_reset)
+#define cpu_do_idle			__glue(CPU_NAME,_do_idle)
+#define cpu_dcache_clean_area		__glue(CPU_NAME,_dcache_clean_area)
+#define cpu_do_switch_mm		__glue(CPU_NAME,_switch_mm)
+#define cpu_set_pte_ext			__glue(CPU_NAME,_set_pte_ext)
+#define cpu_suspend_size		__glue(CPU_NAME,_suspend_size)
+#define cpu_do_suspend			__glue(CPU_NAME,_do_suspend)
+#define cpu_do_resume			__glue(CPU_NAME,_do_resume)
+#endif
+
+#endif

+ 0 - 138
arch/arm/include/asm/glue.h

@@ -15,7 +15,6 @@
  */
  */
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 
 
-
 #ifdef __STDC__
 #ifdef __STDC__
 #define ____glue(name,fn)	name##fn
 #define ____glue(name,fn)	name##fn
 #else
 #else
@@ -23,141 +22,4 @@
 #endif
 #endif
 #define __glue(name,fn)		____glue(name,fn)
 #define __glue(name,fn)		____glue(name,fn)
 
 
-
-
-/*
- *	Data Abort Model
- *	================
- *
- *	We have the following to choose from:
- *	  arm6          - ARM6 style
- *	  arm7		- ARM7 style
- *	  v4_early	- ARMv4 without Thumb early abort handler
- *	  v4t_late	- ARMv4 with Thumb late abort handler
- *	  v4t_early	- ARMv4 with Thumb early abort handler
- *	  v5tej_early	- ARMv5 with Thumb and Java early abort handler
- *	  xscale	- ARMv5 with Thumb with Xscale extensions
- *	  v6_early	- ARMv6 generic early abort handler
- *	  v7_early	- ARMv7 generic early abort handler
- */
-#undef CPU_DABORT_HANDLER
-#undef MULTI_DABORT
-
-#if defined(CONFIG_CPU_ARM610)
-# ifdef CPU_DABORT_HANDLER
-#  define MULTI_DABORT 1
-# else
-#  define CPU_DABORT_HANDLER cpu_arm6_data_abort
-# endif
-#endif
-
-#if defined(CONFIG_CPU_ARM710)
-# ifdef CPU_DABORT_HANDLER
-#  define MULTI_DABORT 1
-# else
-#  define CPU_DABORT_HANDLER cpu_arm7_data_abort
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ABRT_LV4T
-# ifdef CPU_DABORT_HANDLER
-#  define MULTI_DABORT 1
-# else
-#  define CPU_DABORT_HANDLER v4t_late_abort
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ABRT_EV4
-# ifdef CPU_DABORT_HANDLER
-#  define MULTI_DABORT 1
-# else
-#  define CPU_DABORT_HANDLER v4_early_abort
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ABRT_EV4T
-# ifdef CPU_DABORT_HANDLER
-#  define MULTI_DABORT 1
-# else
-#  define CPU_DABORT_HANDLER v4t_early_abort
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ABRT_EV5TJ
-# ifdef CPU_DABORT_HANDLER
-#  define MULTI_DABORT 1
-# else
-#  define CPU_DABORT_HANDLER v5tj_early_abort
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ABRT_EV5T
-# ifdef CPU_DABORT_HANDLER
-#  define MULTI_DABORT 1
-# else
-#  define CPU_DABORT_HANDLER v5t_early_abort
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ABRT_EV6
-# ifdef CPU_DABORT_HANDLER
-#  define MULTI_DABORT 1
-# else
-#  define CPU_DABORT_HANDLER v6_early_abort
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ABRT_EV7
-# ifdef CPU_DABORT_HANDLER
-#  define MULTI_DABORT 1
-# else
-#  define CPU_DABORT_HANDLER v7_early_abort
-# endif
-#endif
-
-#ifndef CPU_DABORT_HANDLER
-#error Unknown data abort handler type
-#endif
-
-/*
- *	Prefetch Abort Model
- *	================
- *
- *	We have the following to choose from:
- *	  legacy	- no IFSR, no IFAR
- *	  v6		- ARMv6: IFSR, no IFAR
- *	  v7		- ARMv7: IFSR and IFAR
- */
-
-#undef CPU_PABORT_HANDLER
-#undef MULTI_PABORT
-
-#ifdef CONFIG_CPU_PABRT_LEGACY
-# ifdef CPU_PABORT_HANDLER
-#  define MULTI_PABORT 1
-# else
-#  define CPU_PABORT_HANDLER legacy_pabort
-# endif
-#endif
-
-#ifdef CONFIG_CPU_PABRT_V6
-# ifdef CPU_PABORT_HANDLER
-#  define MULTI_PABORT 1
-# else
-#  define CPU_PABORT_HANDLER v6_pabort
-# endif
-#endif
-
-#ifdef CONFIG_CPU_PABRT_V7
-# ifdef CPU_PABORT_HANDLER
-#  define MULTI_PABORT 1
-# else
-#  define CPU_PABORT_HANDLER v7_pabort
-# endif
-#endif
-
-#ifndef CPU_PABORT_HANDLER
-#error Unknown prefetch abort handler type
-#endif
-
 #endif
 #endif

+ 1 - 0
arch/arm/include/asm/hardware/gic.h

@@ -34,6 +34,7 @@
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 extern void __iomem *gic_cpu_base_addr;
 extern void __iomem *gic_cpu_base_addr;
+extern struct irq_chip gic_arch_extn;
 
 
 void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *);
 void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *);
 void gic_secondary_init(unsigned int);
 void gic_secondary_init(unsigned int);

+ 27 - 2
arch/arm/include/asm/highmem.h

@@ -19,11 +19,36 @@
 
 
 extern pte_t *pkmap_page_table;
 extern pte_t *pkmap_page_table;
 
 
+extern void *kmap_high(struct page *page);
+extern void kunmap_high(struct page *page);
+
+/*
+ * The reason for kmap_high_get() is to ensure that the currently kmap'd
+ * page usage count does not decrease to zero while we're using its
+ * existing virtual mapping in an atomic context.  With a VIVT cache this
+ * is essential to do, but with a VIPT cache this is only an optimization
+ * so not to pay the price of establishing a second mapping if an existing
+ * one can be used.  However, on platforms without hardware TLB maintenance
+ * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since
+ * the locking involved must also disable IRQs which is incompatible with
+ * the IPI mechanism used by global TLB operations.
+ */
 #define ARCH_NEEDS_KMAP_HIGH_GET
 #define ARCH_NEEDS_KMAP_HIGH_GET
+#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6)
+#undef ARCH_NEEDS_KMAP_HIGH_GET
+#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT)
+#error "The sum of features in your kernel config cannot be supported together"
+#endif
+#endif
 
 
-extern void *kmap_high(struct page *page);
+#ifdef ARCH_NEEDS_KMAP_HIGH_GET
 extern void *kmap_high_get(struct page *page);
 extern void *kmap_high_get(struct page *page);
-extern void kunmap_high(struct page *page);
+#else
+static inline void *kmap_high_get(struct page *page)
+{
+	return NULL;
+}
+#endif
 
 
 /*
 /*
  * The following functions are already defined by <linux/highmem.h>
  * The following functions are already defined by <linux/highmem.h>

+ 31 - 0
arch/arm/include/asm/mach/irq.h

@@ -34,4 +34,35 @@ do {							\
 	raw_spin_unlock(&desc->lock);			\
 	raw_spin_unlock(&desc->lock);			\
 } while(0)
 } while(0)
 
 
+#ifndef __ASSEMBLY__
+/*
+ * Entry/exit functions for chained handlers where the primary IRQ chip
+ * may implement either fasteoi or level-trigger flow control.
+ */
+static inline void chained_irq_enter(struct irq_chip *chip,
+				     struct irq_desc *desc)
+{
+	/* FastEOI controllers require no action on entry. */
+	if (chip->irq_eoi)
+		return;
+
+	if (chip->irq_mask_ack) {
+		chip->irq_mask_ack(&desc->irq_data);
+	} else {
+		chip->irq_mask(&desc->irq_data);
+		if (chip->irq_ack)
+			chip->irq_ack(&desc->irq_data);
+	}
+}
+
+static inline void chained_irq_exit(struct irq_chip *chip,
+				    struct irq_desc *desc)
+{
+	if (chip->irq_eoi)
+		chip->irq_eoi(&desc->irq_data);
+	else
+		chip->irq_unmask(&desc->irq_data);
+}
+#endif
+
 #endif
 #endif

+ 61 - 14
arch/arm/include/asm/memory.h

@@ -15,6 +15,7 @@
 
 
 #include <linux/compiler.h>
 #include <linux/compiler.h>
 #include <linux/const.h>
 #include <linux/const.h>
+#include <linux/types.h>
 #include <mach/memory.h>
 #include <mach/memory.h>
 #include <asm/sizes.h>
 #include <asm/sizes.h>
 
 
@@ -132,21 +133,11 @@
 #define DTCM_OFFSET	UL(0xfffe8000)
 #define DTCM_OFFSET	UL(0xfffe8000)
 #endif
 #endif
 
 
-/*
- * Physical vs virtual RAM address space conversion.  These are
- * private definitions which should NOT be used outside memory.h
- * files.  Use virt_to_phys/phys_to_virt/__pa/__va instead.
- */
-#ifndef __virt_to_phys
-#define __virt_to_phys(x)	((x) - PAGE_OFFSET + PHYS_OFFSET)
-#define __phys_to_virt(x)	((x) - PHYS_OFFSET + PAGE_OFFSET)
-#endif
-
 /*
 /*
  * Convert a physical address to a Page Frame Number and back
  * Convert a physical address to a Page Frame Number and back
  */
  */
-#define	__phys_to_pfn(paddr)	((paddr) >> PAGE_SHIFT)
-#define	__pfn_to_phys(pfn)	((pfn) << PAGE_SHIFT)
+#define	__phys_to_pfn(paddr)	((unsigned long)((paddr) >> PAGE_SHIFT))
+#define	__pfn_to_phys(pfn)	((phys_addr_t)(pfn) << PAGE_SHIFT)
 
 
 /*
 /*
  * Convert a page to/from a physical address
  * Convert a page to/from a physical address
@@ -156,6 +147,62 @@
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 
+/*
+ * Physical vs virtual RAM address space conversion.  These are
+ * private definitions which should NOT be used outside memory.h
+ * files.  Use virt_to_phys/phys_to_virt/__pa/__va instead.
+ */
+#ifndef __virt_to_phys
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+
+/*
+ * Constants used to force the right instruction encodings and shifts
+ * so that all we need to do is modify the 8-bit constant field.
+ */
+#define __PV_BITS_31_24	0x81000000
+#define __PV_BITS_23_16	0x00810000
+
+extern unsigned long __pv_phys_offset;
+#define PHYS_OFFSET __pv_phys_offset
+
+#define __pv_stub(from,to,instr,type)			\
+	__asm__("@ __pv_stub\n"				\
+	"1:	" instr "	%0, %1, %2\n"		\
+	"	.pushsection .pv_table,\"a\"\n"		\
+	"	.long	1b\n"				\
+	"	.popsection\n"				\
+	: "=r" (to)					\
+	: "r" (from), "I" (type))
+
+static inline unsigned long __virt_to_phys(unsigned long x)
+{
+	unsigned long t;
+	__pv_stub(x, t, "add", __PV_BITS_31_24);
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
+	__pv_stub(t, t, "add", __PV_BITS_23_16);
+#endif
+	return t;
+}
+
+static inline unsigned long __phys_to_virt(unsigned long x)
+{
+	unsigned long t;
+	__pv_stub(x, t, "sub", __PV_BITS_31_24);
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
+	__pv_stub(t, t, "sub", __PV_BITS_23_16);
+#endif
+	return t;
+}
+#else
+#define __virt_to_phys(x)	((x) - PAGE_OFFSET + PHYS_OFFSET)
+#define __phys_to_virt(x)	((x) - PHYS_OFFSET + PAGE_OFFSET)
+#endif
+#endif
+
+#ifndef PHYS_OFFSET
+#define PHYS_OFFSET	PLAT_PHYS_OFFSET
+#endif
+
 /*
 /*
  * The DMA mask corresponding to the maximum bus address allocatable
  * The DMA mask corresponding to the maximum bus address allocatable
  * using GFP_DMA.  The default here places no restriction on DMA
  * using GFP_DMA.  The default here places no restriction on DMA
@@ -188,12 +235,12 @@
  * translation for translating DMA addresses.  Use the driver
  * translation for translating DMA addresses.  Use the driver
  * DMA support - see dma-mapping.h.
  * DMA support - see dma-mapping.h.
  */
  */
-static inline unsigned long virt_to_phys(const volatile void *x)
+static inline phys_addr_t virt_to_phys(const volatile void *x)
 {
 {
 	return __virt_to_phys((unsigned long)(x));
 	return __virt_to_phys((unsigned long)(x));
 }
 }
 
 
-static inline void *phys_to_virt(unsigned long x)
+static inline void *phys_to_virt(phys_addr_t x)
 {
 {
 	return (void *)(__phys_to_virt((unsigned long)(x)));
 	return (void *)(__phys_to_virt((unsigned long)(x)));
 }
 }

+ 25 - 2
arch/arm/include/asm/module.h

@@ -25,8 +25,31 @@ struct mod_arch_specific {
 };
 };
 
 
 /*
 /*
- * Include the ARM architecture version.
+ * Add the ARM architecture version to the version magic string
  */
  */
-#define MODULE_ARCH_VERMAGIC	"ARMv" __stringify(__LINUX_ARM_ARCH__) " "
+#define MODULE_ARCH_VERMAGIC_ARMVSN "ARMv" __stringify(__LINUX_ARM_ARCH__) " "
+
+/* Add __virt_to_phys patching state as well */
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
+#define MODULE_ARCH_VERMAGIC_P2V "p2v16 "
+#else
+#define MODULE_ARCH_VERMAGIC_P2V "p2v8 "
+#endif
+#else
+#define MODULE_ARCH_VERMAGIC_P2V ""
+#endif
+
+/* Add instruction set architecture tag to distinguish ARM/Thumb kernels */
+#ifdef CONFIG_THUMB2_KERNEL
+#define MODULE_ARCH_VERMAGIC_ARMTHUMB "thumb2 "
+#else
+#define MODULE_ARCH_VERMAGIC_ARMTHUMB ""
+#endif
+
+#define MODULE_ARCH_VERMAGIC \
+	MODULE_ARCH_VERMAGIC_ARMVSN \
+	MODULE_ARCH_VERMAGIC_ARMTHUMB \
+	MODULE_ARCH_VERMAGIC_P2V
 
 
 #endif /* _ASM_ARM_MODULE_H */
 #endif /* _ASM_ARM_MODULE_H */

+ 1 - 0
arch/arm/include/asm/outercache.h

@@ -31,6 +31,7 @@ struct outer_cache_fns {
 #ifdef CONFIG_OUTER_CACHE_SYNC
 #ifdef CONFIG_OUTER_CACHE_SYNC
 	void (*sync)(void);
 	void (*sync)(void);
 #endif
 #endif
+	void (*set_debug)(unsigned long);
 };
 };
 
 
 #ifdef CONFIG_OUTER_CACHE
 #ifdef CONFIG_OUTER_CACHE

+ 71 - 235
arch/arm/include/asm/proc-fns.h

@@ -13,250 +13,86 @@
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 
 
+#include <asm/glue-proc.h>
+#include <asm/page.h>
 
 
-/*
- * Work out if we need multiple CPU support
- */
-#undef MULTI_CPU
-#undef CPU_NAME
+#ifndef __ASSEMBLY__
+
+struct mm_struct;
 
 
 /*
 /*
- * CPU_NAME - the prefix for CPU related functions
+ * Don't change this structure - ASM code relies on it.
  */
  */
-
-#ifdef CONFIG_CPU_ARM610
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_arm6
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM7TDMI
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_arm7tdmi
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM710
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_arm7
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM720T
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_arm720
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM740T
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_arm740
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM9TDMI
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_arm9tdmi
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM920T
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_arm920
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM922T
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_arm922
-# endif
-#endif
-
-#ifdef CONFIG_CPU_FA526
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_fa526
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM925T
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_arm925
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM926T
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_arm926
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM940T
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_arm940
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM946E
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_arm946
-# endif
-#endif
-
-#ifdef CONFIG_CPU_SA110
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_sa110
-# endif
-#endif
-
-#ifdef CONFIG_CPU_SA1100
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_sa1100
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM1020
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_arm1020
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM1020E
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_arm1020e
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM1022
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_arm1022
-# endif
-#endif
-
-#ifdef CONFIG_CPU_ARM1026
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_arm1026
-# endif
-#endif
-
-#ifdef CONFIG_CPU_XSCALE
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_xscale
-# endif
-#endif
-
-#ifdef CONFIG_CPU_XSC3
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_xsc3
-# endif
-#endif
-
-#ifdef CONFIG_CPU_MOHAWK
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_mohawk
-# endif
-#endif
-
-#ifdef CONFIG_CPU_FEROCEON
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_feroceon
-# endif
-#endif
-
-#ifdef CONFIG_CPU_V6
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_v6
-# endif
-#endif
-
-#ifdef CONFIG_CPU_V7
-# ifdef CPU_NAME
-#  undef  MULTI_CPU
-#  define MULTI_CPU
-# else
-#  define CPU_NAME cpu_v7
-# endif
-#endif
-
-#ifndef __ASSEMBLY__
+extern struct processor {
+	/* MISC
+	 * get data abort address/flags
+	 */
+	void (*_data_abort)(unsigned long pc);
+	/*
+	 * Retrieve prefetch fault address
+	 */
+	unsigned long (*_prefetch_abort)(unsigned long lr);
+	/*
+	 * Set up any processor specifics
+	 */
+	void (*_proc_init)(void);
+	/*
+	 * Disable any processor specifics
+	 */
+	void (*_proc_fin)(void);
+	/*
+	 * Special stuff for a reset
+	 */
+	void (*reset)(unsigned long addr) __attribute__((noreturn));
+	/*
+	 * Idle the processor
+	 */
+	int (*_do_idle)(void);
+	/*
+	 * Processor architecture specific
+	 */
+	/*
+	 * clean a virtual address range from the
+	 * D-cache without flushing the cache.
+	 */
+	void (*dcache_clean_area)(void *addr, int size);
+
+	/*
+	 * Set the page table
+	 */
+	void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
+	/*
+	 * Set a possibly extended PTE.  Non-extended PTEs should
+	 * ignore 'ext'.
+	 */
+	void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
+
+	/* Suspend/resume */
+	unsigned int suspend_size;
+	void (*do_suspend)(void *);
+	void (*do_resume)(void *);
+} processor;
 
 
 #ifndef MULTI_CPU
 #ifndef MULTI_CPU
-#include <asm/cpu-single.h>
+extern void cpu_proc_init(void);
+extern void cpu_proc_fin(void);
+extern int cpu_do_idle(void);
+extern void cpu_dcache_clean_area(void *, int);
+extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
+extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
+extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
 #else
 #else
-#include <asm/cpu-multi32.h>
+#define cpu_proc_init()			processor._proc_init()
+#define cpu_proc_fin()			processor._proc_fin()
+#define cpu_reset(addr)			processor.reset(addr)
+#define cpu_do_idle()			processor._do_idle()
+#define cpu_dcache_clean_area(addr,sz)	processor.dcache_clean_area(addr,sz)
+#define cpu_set_pte_ext(ptep,pte,ext)	processor.set_pte_ext(ptep,pte,ext)
+#define cpu_do_switch_mm(pgd,mm)	processor.switch_mm(pgd,mm)
 #endif
 #endif
 
 
+extern void cpu_resume(void);
+
 #include <asm/memory.h>
 #include <asm/memory.h>
 
 
 #ifdef CONFIG_MMU
 #ifdef CONFIG_MMU

+ 1 - 13
arch/arm/include/asm/processor.h

@@ -29,19 +29,7 @@
 #define STACK_TOP_MAX	TASK_SIZE
 #define STACK_TOP_MAX	TASK_SIZE
 #endif
 #endif
 
 
-union debug_insn {
-	u32	arm;
-	u16	thumb;
-};
-
-struct debug_entry {
-	u32			address;
-	union debug_insn	insn;
-};
-
 struct debug_info {
 struct debug_info {
-	int			nsaved;
-	struct debug_entry	bp[2];
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
 	struct perf_event	*hbp[ARM_MAX_HBP_SLOTS];
 	struct perf_event	*hbp[ARM_MAX_HBP_SLOTS];
 #endif
 #endif
@@ -95,7 +83,7 @@ extern void release_thread(struct task_struct *);
 
 
 unsigned long get_wchan(struct task_struct *p);
 unsigned long get_wchan(struct task_struct *p);
 
 
-#if __LINUX_ARM_ARCH__ == 6
+#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
 #define cpu_relax()			smp_mb()
 #define cpu_relax()			smp_mb()
 #else
 #else
 #define cpu_relax()			barrier()
 #define cpu_relax()			barrier()

+ 0 - 2
arch/arm/include/asm/ptrace.h

@@ -130,8 +130,6 @@ struct pt_regs {
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 
 
-#define arch_has_single_step()	(1)
-
 #define user_mode(regs)	\
 #define user_mode(regs)	\
 	(((regs)->ARM_cpsr & 0xf) == 0)
 	(((regs)->ARM_cpsr & 0xf) == 0)
 
 

+ 7 - 0
arch/arm/include/asm/smp_scu.h

@@ -1,7 +1,14 @@
 #ifndef __ASMARM_ARCH_SCU_H
 #ifndef __ASMARM_ARCH_SCU_H
 #define __ASMARM_ARCH_SCU_H
 #define __ASMARM_ARCH_SCU_H
 
 
+#define SCU_PM_NORMAL	0
+#define SCU_PM_DORMANT	2
+#define SCU_PM_POWEROFF	3
+
+#ifndef __ASSEMBLER__
 unsigned int scu_get_core_count(void __iomem *);
 unsigned int scu_get_core_count(void __iomem *);
 void scu_enable(void __iomem *);
 void scu_enable(void __iomem *);
+int scu_power_mode(void __iomem *, unsigned int);
+#endif
 
 
 #endif
 #endif

+ 41 - 12
arch/arm/include/asm/spinlock.h

@@ -5,17 +5,52 @@
 #error SMP not supported on pre-ARMv6 CPUs
 #error SMP not supported on pre-ARMv6 CPUs
 #endif
 #endif
 
 
+/*
+ * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K
+ * extensions, so when running on UP, we have to patch these instructions away.
+ */
+#define ALT_SMP(smp, up)					\
+	"9998:	" smp "\n"					\
+	"	.pushsection \".alt.smp.init\", \"a\"\n"	\
+	"	.long	9998b\n"				\
+	"	" up "\n"					\
+	"	.popsection\n"
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define SEV		ALT_SMP("sev.w", "nop.w")
+/*
+ * For Thumb-2, special care is needed to ensure that the conditional WFE
+ * instruction really does assemble to exactly 4 bytes (as required by
+ * the SMP_ON_UP fixup code).   By itself "wfene" might cause the
+ * assembler to insert a extra (16-bit) IT instruction, depending on the
+ * presence or absence of neighbouring conditional instructions.
+ *
+ * To avoid this unpredictableness, an approprite IT is inserted explicitly:
+ * the assembler won't change IT instructions which are explicitly present
+ * in the input.
+ */
+#define WFE(cond)	ALT_SMP(		\
+	"it " cond "\n\t"			\
+	"wfe" cond ".n",			\
+						\
+	"nop.w"					\
+)
+#else
+#define SEV		ALT_SMP("sev", "nop")
+#define WFE(cond)	ALT_SMP("wfe" cond, "nop")
+#endif
+
 static inline void dsb_sev(void)
 static inline void dsb_sev(void)
 {
 {
 #if __LINUX_ARM_ARCH__ >= 7
 #if __LINUX_ARM_ARCH__ >= 7
 	__asm__ __volatile__ (
 	__asm__ __volatile__ (
 		"dsb\n"
 		"dsb\n"
-		"sev"
+		SEV
 	);
 	);
-#elif defined(CONFIG_CPU_32v6K)
+#else
 	__asm__ __volatile__ (
 	__asm__ __volatile__ (
 		"mcr p15, 0, %0, c7, c10, 4\n"
 		"mcr p15, 0, %0, c7, c10, 4\n"
-		"sev"
+		SEV
 		: : "r" (0)
 		: : "r" (0)
 	);
 	);
 #endif
 #endif
@@ -46,9 +81,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
 	__asm__ __volatile__(
 	__asm__ __volatile__(
 "1:	ldrex	%0, [%1]\n"
 "1:	ldrex	%0, [%1]\n"
 "	teq	%0, #0\n"
 "	teq	%0, #0\n"
-#ifdef CONFIG_CPU_32v6K
-"	wfene\n"
-#endif
+	WFE("ne")
 "	strexeq	%0, %2, [%1]\n"
 "	strexeq	%0, %2, [%1]\n"
 "	teqeq	%0, #0\n"
 "	teqeq	%0, #0\n"
 "	bne	1b"
 "	bne	1b"
@@ -107,9 +140,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
 	__asm__ __volatile__(
 	__asm__ __volatile__(
 "1:	ldrex	%0, [%1]\n"
 "1:	ldrex	%0, [%1]\n"
 "	teq	%0, #0\n"
 "	teq	%0, #0\n"
-#ifdef CONFIG_CPU_32v6K
-"	wfene\n"
-#endif
+	WFE("ne")
 "	strexeq	%0, %2, [%1]\n"
 "	strexeq	%0, %2, [%1]\n"
 "	teq	%0, #0\n"
 "	teq	%0, #0\n"
 "	bne	1b"
 "	bne	1b"
@@ -176,9 +207,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
 "1:	ldrex	%0, [%2]\n"
 "1:	ldrex	%0, [%2]\n"
 "	adds	%0, %0, #1\n"
 "	adds	%0, %0, #1\n"
 "	strexpl	%1, %0, [%2]\n"
 "	strexpl	%1, %0, [%2]\n"
-#ifdef CONFIG_CPU_32v6K
-"	wfemi\n"
-#endif
+	WFE("mi")
 "	rsbpls	%0, %1, #0\n"
 "	rsbpls	%0, %1, #0\n"
 "	bmi	1b"
 "	bmi	1b"
 	: "=&r" (tmp), "=&r" (tmp2)
 	: "=&r" (tmp), "=&r" (tmp2)

+ 9 - 8
arch/arm/include/asm/system.h

@@ -347,6 +347,7 @@ void cpu_idle_wait(void);
 #include <asm-generic/cmpxchg-local.h>
 #include <asm-generic/cmpxchg-local.h>
 
 
 #if __LINUX_ARM_ARCH__ < 6
 #if __LINUX_ARM_ARCH__ < 6
+/* min ARCH < ARMv6 */
 
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 #error "SMP is not supported on this platform"
 #error "SMP is not supported on this platform"
@@ -365,7 +366,7 @@ void cpu_idle_wait(void);
 #include <asm-generic/cmpxchg.h>
 #include <asm-generic/cmpxchg.h>
 #endif
 #endif
 
 
-#else	/* __LINUX_ARM_ARCH__ >= 6 */
+#else	/* min ARCH >= ARMv6 */
 
 
 extern void __bad_cmpxchg(volatile void *ptr, int size);
 extern void __bad_cmpxchg(volatile void *ptr, int size);
 
 
@@ -379,7 +380,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 	unsigned long oldval, res;
 	unsigned long oldval, res;
 
 
 	switch (size) {
 	switch (size) {
-#ifdef CONFIG_CPU_32v6K
+#ifndef CONFIG_CPU_V6	/* min ARCH >= ARMv6K */
 	case 1:
 	case 1:
 		do {
 		do {
 			asm volatile("@ __cmpxchg1\n"
 			asm volatile("@ __cmpxchg1\n"
@@ -404,7 +405,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 				: "memory", "cc");
 				: "memory", "cc");
 		} while (res);
 		} while (res);
 		break;
 		break;
-#endif /* CONFIG_CPU_32v6K */
+#endif
 	case 4:
 	case 4:
 		do {
 		do {
 			asm volatile("@ __cmpxchg4\n"
 			asm volatile("@ __cmpxchg4\n"
@@ -450,12 +451,12 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
 	unsigned long ret;
 	unsigned long ret;
 
 
 	switch (size) {
 	switch (size) {
-#ifndef CONFIG_CPU_32v6K
+#ifdef CONFIG_CPU_V6	/* min ARCH == ARMv6 */
 	case 1:
 	case 1:
 	case 2:
 	case 2:
 		ret = __cmpxchg_local_generic(ptr, old, new, size);
 		ret = __cmpxchg_local_generic(ptr, old, new, size);
 		break;
 		break;
-#endif	/* !CONFIG_CPU_32v6K */
+#endif
 	default:
 	default:
 		ret = __cmpxchg(ptr, old, new, size);
 		ret = __cmpxchg(ptr, old, new, size);
 	}
 	}
@@ -469,7 +470,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
 				       (unsigned long)(n),		\
 				       (unsigned long)(n),		\
 				       sizeof(*(ptr))))
 				       sizeof(*(ptr))))
 
 
-#ifdef CONFIG_CPU_32v6K
+#ifndef CONFIG_CPU_V6	/* min ARCH >= ARMv6K */
 
 
 /*
 /*
  * Note : ARMv7-M (currently unsupported by Linux) does not support
  * Note : ARMv7-M (currently unsupported by Linux) does not support
@@ -524,11 +525,11 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
 					 (unsigned long long)(o),	\
 					 (unsigned long long)(o),	\
 					 (unsigned long long)(n)))
 					 (unsigned long long)(n)))
 
 
-#else	/* !CONFIG_CPU_32v6K */
+#else /* min ARCH = ARMv6 */
 
 
 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
 
 
-#endif	/* CONFIG_CPU_32v6K */
+#endif
 
 
 #endif	/* __LINUX_ARM_ARCH__ >= 6 */
 #endif	/* __LINUX_ARM_ARCH__ >= 6 */
 
 

+ 5 - 6
arch/arm/include/asm/tls.h

@@ -28,15 +28,14 @@
 #define tls_emu		1
 #define tls_emu		1
 #define has_tls_reg		1
 #define has_tls_reg		1
 #define set_tls		set_tls_none
 #define set_tls		set_tls_none
-#elif __LINUX_ARM_ARCH__ >= 7 ||					\
-	(__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K))
-#define tls_emu		0
-#define has_tls_reg		1
-#define set_tls		set_tls_v6k
-#elif __LINUX_ARM_ARCH__ == 6
+#elif defined(CONFIG_CPU_V6)
 #define tls_emu		0
 #define tls_emu		0
 #define has_tls_reg		(elf_hwcap & HWCAP_TLS)
 #define has_tls_reg		(elf_hwcap & HWCAP_TLS)
 #define set_tls		set_tls_v6
 #define set_tls		set_tls_v6
+#elif defined(CONFIG_CPU_32v6K)
+#define tls_emu		0
+#define has_tls_reg		1
+#define set_tls		set_tls_v6k
 #else
 #else
 #define tls_emu		0
 #define tls_emu		0
 #define has_tls_reg		0
 #define has_tls_reg		0

+ 1 - 0
arch/arm/include/asm/traps.h

@@ -45,6 +45,7 @@ static inline int in_exception_text(unsigned long ptr)
 
 
 extern void __init early_trap_init(void);
 extern void __init early_trap_init(void);
 extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
 extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
+extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs);
 
 
 extern void *vectors_page;
 extern void *vectors_page;
 
 

+ 1 - 1
arch/arm/include/asm/user.h

@@ -71,7 +71,7 @@ struct user{
 				/* the registers. */
 				/* the registers. */
   unsigned long magic;		/* To uniquely identify a core file */
   unsigned long magic;		/* To uniquely identify a core file */
   char u_comm[32];		/* User command that was responsible */
   char u_comm[32];		/* User command that was responsible */
-  int u_debugreg[8];
+  int u_debugreg[8];		/* No longer used */
   struct user_fp u_fp;		/* FP state */
   struct user_fp u_fp;		/* FP state */
   struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */
   struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */
   				/* the FP registers. */
   				/* the FP registers. */

+ 1 - 0
arch/arm/kernel/Makefile

@@ -29,6 +29,7 @@ obj-$(CONFIG_MODULES)		+= armksyms.o module.o
 obj-$(CONFIG_ARTHUR)		+= arthur.o
 obj-$(CONFIG_ARTHUR)		+= arthur.o
 obj-$(CONFIG_ISA_DMA)		+= dma-isa.o
 obj-$(CONFIG_ISA_DMA)		+= dma-isa.o
 obj-$(CONFIG_PCI)		+= bios32.o isa.o
 obj-$(CONFIG_PCI)		+= bios32.o isa.o
+obj-$(CONFIG_PM)		+= sleep.o
 obj-$(CONFIG_HAVE_SCHED_CLOCK)	+= sched_clock.o
 obj-$(CONFIG_HAVE_SCHED_CLOCK)	+= sched_clock.o
 obj-$(CONFIG_SMP)		+= smp.o smp_tlb.o
 obj-$(CONFIG_SMP)		+= smp.o smp_tlb.o
 obj-$(CONFIG_HAVE_ARM_SCU)	+= smp_scu.o
 obj-$(CONFIG_HAVE_ARM_SCU)	+= smp_scu.o

+ 10 - 12
arch/arm/kernel/armksyms.c

@@ -140,24 +140,18 @@ EXPORT_SYMBOL(__aeabi_ulcmp);
 #endif
 #endif
 
 
 	/* bitops */
 	/* bitops */
-EXPORT_SYMBOL(_set_bit_le);
-EXPORT_SYMBOL(_test_and_set_bit_le);
-EXPORT_SYMBOL(_clear_bit_le);
-EXPORT_SYMBOL(_test_and_clear_bit_le);
-EXPORT_SYMBOL(_change_bit_le);
-EXPORT_SYMBOL(_test_and_change_bit_le);
+EXPORT_SYMBOL(_set_bit);
+EXPORT_SYMBOL(_test_and_set_bit);
+EXPORT_SYMBOL(_clear_bit);
+EXPORT_SYMBOL(_test_and_clear_bit);
+EXPORT_SYMBOL(_change_bit);
+EXPORT_SYMBOL(_test_and_change_bit);
 EXPORT_SYMBOL(_find_first_zero_bit_le);
 EXPORT_SYMBOL(_find_first_zero_bit_le);
 EXPORT_SYMBOL(_find_next_zero_bit_le);
 EXPORT_SYMBOL(_find_next_zero_bit_le);
 EXPORT_SYMBOL(_find_first_bit_le);
 EXPORT_SYMBOL(_find_first_bit_le);
 EXPORT_SYMBOL(_find_next_bit_le);
 EXPORT_SYMBOL(_find_next_bit_le);
 
 
 #ifdef __ARMEB__
 #ifdef __ARMEB__
-EXPORT_SYMBOL(_set_bit_be);
-EXPORT_SYMBOL(_test_and_set_bit_be);
-EXPORT_SYMBOL(_clear_bit_be);
-EXPORT_SYMBOL(_test_and_clear_bit_be);
-EXPORT_SYMBOL(_change_bit_be);
-EXPORT_SYMBOL(_test_and_change_bit_be);
 EXPORT_SYMBOL(_find_first_zero_bit_be);
 EXPORT_SYMBOL(_find_first_zero_bit_be);
 EXPORT_SYMBOL(_find_next_zero_bit_be);
 EXPORT_SYMBOL(_find_next_zero_bit_be);
 EXPORT_SYMBOL(_find_first_bit_be);
 EXPORT_SYMBOL(_find_first_bit_be);
@@ -170,3 +164,7 @@ EXPORT_SYMBOL(mcount);
 #endif
 #endif
 EXPORT_SYMBOL(__gnu_mcount_nc);
 EXPORT_SYMBOL(__gnu_mcount_nc);
 #endif
 #endif
+
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+EXPORT_SYMBOL(__pv_phys_offset);
+#endif

+ 11 - 0
arch/arm/kernel/asm-offsets.c

@@ -13,6 +13,9 @@
 #include <linux/sched.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include <asm/glue-df.h>
+#include <asm/glue-pf.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/arch.h>
 #include <asm/thread_info.h>
 #include <asm/thread_info.h>
 #include <asm/memory.h>
 #include <asm/memory.h>
@@ -113,6 +116,14 @@ int main(void)
 #endif
 #endif
 #ifdef MULTI_PABORT
 #ifdef MULTI_PABORT
   DEFINE(PROCESSOR_PABT_FUNC,	offsetof(struct processor, _prefetch_abort));
   DEFINE(PROCESSOR_PABT_FUNC,	offsetof(struct processor, _prefetch_abort));
+#endif
+#ifdef MULTI_CPU
+  DEFINE(CPU_SLEEP_SIZE,	offsetof(struct processor, suspend_size));
+  DEFINE(CPU_DO_SUSPEND,	offsetof(struct processor, do_suspend));
+  DEFINE(CPU_DO_RESUME,		offsetof(struct processor, do_resume));
+#endif
+#ifdef MULTI_CACHE
+  DEFINE(CACHE_FLUSH_KERN_ALL,	offsetof(struct cpu_cache_fns, flush_kern_all));
 #endif
 #endif
   BLANK();
   BLANK();
   DEFINE(DMA_BIDIRECTIONAL,	DMA_BIDIRECTIONAL);
   DEFINE(DMA_BIDIRECTIONAL,	DMA_BIDIRECTIONAL);

+ 5 - 0
arch/arm/kernel/bios32.c

@@ -583,6 +583,11 @@ void __init pci_common_init(struct hw_pci *hw)
 			 * Assign resources.
 			 * Assign resources.
 			 */
 			 */
 			pci_bus_assign_resources(bus);
 			pci_bus_assign_resources(bus);
+
+			/*
+			 * Enable bridges
+			 */
+			pci_enable_bridges(bus);
 		}
 		}
 
 
 		/*
 		/*

+ 1 - 1
arch/arm/kernel/debug.S

@@ -25,7 +25,7 @@
 		.macro	addruart, rp, rv
 		.macro	addruart, rp, rv
 		.endm
 		.endm
 
 
-#if defined(CONFIG_CPU_V6)
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
 
 
 		.macro	senduart, rd, rx
 		.macro	senduart, rd, rx
 		mcr	p14, 0, \rd, c0, c5, 0
 		mcr	p14, 0, \rd, c0, c5, 0

+ 2 - 1
arch/arm/kernel/entry-armv.S

@@ -16,7 +16,8 @@
  */
  */
 
 
 #include <asm/memory.h>
 #include <asm/memory.h>
-#include <asm/glue.h>
+#include <asm/glue-df.h>
+#include <asm/glue-pf.h>
 #include <asm/vfpmacros.h>
 #include <asm/vfpmacros.h>
 #include <mach/entry-macro.S>
 #include <mach/entry-macro.S>
 #include <asm/thread_notify.h>
 #include <asm/thread_notify.h>

+ 7 - 7
arch/arm/kernel/entry-header.S

@@ -76,13 +76,13 @@
 #ifndef CONFIG_THUMB2_KERNEL
 #ifndef CONFIG_THUMB2_KERNEL
 	.macro	svc_exit, rpsr
 	.macro	svc_exit, rpsr
 	msr	spsr_cxsf, \rpsr
 	msr	spsr_cxsf, \rpsr
-#if defined(CONFIG_CPU_32v6K)
-	clrex					@ clear the exclusive monitor
-	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
-#elif defined (CONFIG_CPU_V6)
+#if defined(CONFIG_CPU_V6)
 	ldr	r0, [sp]
 	ldr	r0, [sp]
 	strex	r1, r2, [sp]			@ clear the exclusive monitor
 	strex	r1, r2, [sp]			@ clear the exclusive monitor
 	ldmib	sp, {r1 - pc}^			@ load r1 - pc, cpsr
 	ldmib	sp, {r1 - pc}^			@ load r1 - pc, cpsr
+#elif defined(CONFIG_CPU_32v6K)
+	clrex					@ clear the exclusive monitor
+	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
 #else
 #else
 	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
 	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
 #endif
 #endif
@@ -92,10 +92,10 @@
 	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
 	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
 	ldr	lr, [sp, #\offset + S_PC]!	@ get pc
 	ldr	lr, [sp, #\offset + S_PC]!	@ get pc
 	msr	spsr_cxsf, r1			@ save in spsr_svc
 	msr	spsr_cxsf, r1			@ save in spsr_svc
-#if defined(CONFIG_CPU_32v6K)
-	clrex					@ clear the exclusive monitor
-#elif defined (CONFIG_CPU_V6)
+#if defined(CONFIG_CPU_V6)
 	strex	r1, r2, [sp]			@ clear the exclusive monitor
 	strex	r1, r2, [sp]			@ clear the exclusive monitor
+#elif defined(CONFIG_CPU_32v6K)
+	clrex					@ clear the exclusive monitor
 #endif
 #endif
 	.if	\fast
 	.if	\fast
 	ldmdb	sp, {r1 - lr}^			@ get calling r1 - lr
 	ldmdb	sp, {r1 - lr}^			@ get calling r1 - lr

+ 0 - 90
arch/arm/kernel/head-common.S

@@ -25,83 +25,6 @@
  * machine ID for example).
  * machine ID for example).
  */
  */
 	__HEAD
 	__HEAD
-__error_a:
-#ifdef CONFIG_DEBUG_LL
-	mov	r4, r1				@ preserve machine ID
-	adr	r0, str_a1
-	bl	printascii
-	mov	r0, r4
-	bl	printhex8
-	adr	r0, str_a2
-	bl	printascii
-	adr	r3, __lookup_machine_type_data
-	ldmia	r3, {r4, r5, r6}		@ get machine desc list
-	sub	r4, r3, r4			@ get offset between virt&phys
-	add	r5, r5, r4			@ convert virt addresses to
-	add	r6, r6, r4			@ physical address space
-1:	ldr	r0, [r5, #MACHINFO_TYPE]	@ get machine type
-	bl	printhex8
-	mov	r0, #'\t'
-	bl	printch
-	ldr     r0, [r5, #MACHINFO_NAME]	@ get machine name
-	add	r0, r0, r4
-	bl	printascii
-	mov	r0, #'\n'
-	bl	printch
-	add	r5, r5, #SIZEOF_MACHINE_DESC	@ next machine_desc
-	cmp	r5, r6
-	blo	1b
-	adr	r0, str_a3
-	bl	printascii
-	b	__error
-ENDPROC(__error_a)
-
-str_a1:	.asciz	"\nError: unrecognized/unsupported machine ID (r1 = 0x"
-str_a2:	.asciz	").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
-str_a3:	.asciz	"\nPlease check your kernel config and/or bootloader.\n"
-	.align
-#else
-	b	__error
-#endif
-
-/*
- * Lookup machine architecture in the linker-build list of architectures.
- * Note that we can't use the absolute addresses for the __arch_info
- * lists since we aren't running with the MMU on (and therefore, we are
- * not in the correct address space).  We have to calculate the offset.
- *
- *  r1 = machine architecture number
- * Returns:
- *  r3, r4, r6 corrupted
- *  r5 = mach_info pointer in physical address space
- */
-__lookup_machine_type:
-	adr	r3, __lookup_machine_type_data
-	ldmia	r3, {r4, r5, r6}
-	sub	r3, r3, r4			@ get offset between virt&phys
-	add	r5, r5, r3			@ convert virt addresses to
-	add	r6, r6, r3			@ physical address space
-1:	ldr	r3, [r5, #MACHINFO_TYPE]	@ get machine type
-	teq	r3, r1				@ matches loader number?
-	beq	2f				@ found
-	add	r5, r5, #SIZEOF_MACHINE_DESC	@ next machine_desc
-	cmp	r5, r6
-	blo	1b
-	mov	r5, #0				@ unknown machine
-2:	mov	pc, lr
-ENDPROC(__lookup_machine_type)
-
-/*
- * Look in arch/arm/kernel/arch.[ch] for information about the
- * __arch_info structures.
- */
-	.align	2
-	.type	__lookup_machine_type_data, %object
-__lookup_machine_type_data:
-	.long	.
-	.long	__arch_info_begin
-	.long	__arch_info_end
-	.size	__lookup_machine_type_data, . - __lookup_machine_type_data
 
 
 /* Determine validity of the r2 atags pointer.  The heuristic requires
 /* Determine validity of the r2 atags pointer.  The heuristic requires
  * that the pointer be aligned, in the first 16k of physical RAM and
  * that the pointer be aligned, in the first 16k of physical RAM and
@@ -109,8 +32,6 @@ __lookup_machine_type_data:
  * of this function may be more lenient with the physical address and
  * of this function may be more lenient with the physical address and
  * may also be able to move the ATAGS block if necessary.
  * may also be able to move the ATAGS block if necessary.
  *
  *
- * r8  = machinfo
- *
  * Returns:
  * Returns:
  *  r2 either valid atags pointer, or zero
  *  r2 either valid atags pointer, or zero
  *  r5, r6 corrupted
  *  r5, r6 corrupted
@@ -184,17 +105,6 @@ __mmap_switched_data:
 	.long	init_thread_union + THREAD_START_SP @ sp
 	.long	init_thread_union + THREAD_START_SP @ sp
 	.size	__mmap_switched_data, . - __mmap_switched_data
 	.size	__mmap_switched_data, . - __mmap_switched_data
 
 
-/*
- * This provides a C-API version of __lookup_machine_type
- */
-ENTRY(lookup_machine_type)
-	stmfd	sp!, {r4 - r6, lr}
-	mov	r1, r0
-	bl	__lookup_machine_type
-	mov	r0, r5
-	ldmfd	sp!, {r4 - r6, pc}
-ENDPROC(lookup_machine_type)
-
 /*
 /*
  * This provides a C-API version of __lookup_processor_type
  * This provides a C-API version of __lookup_processor_type
  */
  */

+ 0 - 3
arch/arm/kernel/head-nommu.S

@@ -44,9 +44,6 @@ ENTRY(stext)
 	bl	__lookup_processor_type		@ r5=procinfo r9=cpuid
 	bl	__lookup_processor_type		@ r5=procinfo r9=cpuid
 	movs	r10, r5				@ invalid processor (r5=0)?
 	movs	r10, r5				@ invalid processor (r5=0)?
 	beq	__error_p				@ yes, error 'p'
 	beq	__error_p				@ yes, error 'p'
-	bl	__lookup_machine_type		@ r5=machinfo
-	movs	r8, r5				@ invalid machine (r5=0)?
-	beq	__error_a			@ yes, error 'a'
 
 
 	adr	lr, BSYM(__after_proc_init)	@ return (PIC) address
 	adr	lr, BSYM(__after_proc_init)	@ return (PIC) address
  ARM(	add	pc, r10, #PROCINFO_INITFUNC	)
  ARM(	add	pc, r10, #PROCINFO_INITFUNC	)

+ 160 - 31
arch/arm/kernel/head.S

@@ -26,14 +26,6 @@
 #include <mach/debug-macro.S>
 #include <mach/debug-macro.S>
 #endif
 #endif
 
 
-#if (PHYS_OFFSET & 0x001fffff)
-#error "PHYS_OFFSET must be at an even 2MiB boundary!"
-#endif
-
-#define KERNEL_RAM_VADDR	(PAGE_OFFSET + TEXT_OFFSET)
-#define KERNEL_RAM_PADDR	(PHYS_OFFSET + TEXT_OFFSET)
-
-
 /*
 /*
  * swapper_pg_dir is the virtual address of the initial page table.
  * swapper_pg_dir is the virtual address of the initial page table.
  * We place the page tables 16K below KERNEL_RAM_VADDR.  Therefore, we must
  * We place the page tables 16K below KERNEL_RAM_VADDR.  Therefore, we must
@@ -41,6 +33,7 @@
  * the least significant 16 bits to be 0x8000, but we could probably
  * the least significant 16 bits to be 0x8000, but we could probably
  * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
  * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
  */
  */
+#define KERNEL_RAM_VADDR	(PAGE_OFFSET + TEXT_OFFSET)
 #if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
 #if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
 #error KERNEL_RAM_VADDR must start at 0xXXXX8000
 #error KERNEL_RAM_VADDR must start at 0xXXXX8000
 #endif
 #endif
@@ -48,8 +41,8 @@
 	.globl	swapper_pg_dir
 	.globl	swapper_pg_dir
 	.equ	swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000
 	.equ	swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000
 
 
-	.macro	pgtbl, rd
-	ldr	\rd, =(KERNEL_RAM_PADDR - 0x4000)
+	.macro	pgtbl, rd, phys
+	add	\rd, \phys, #TEXT_OFFSET - 0x4000
 	.endm
 	.endm
 
 
 #ifdef CONFIG_XIP_KERNEL
 #ifdef CONFIG_XIP_KERNEL
@@ -87,25 +80,33 @@ ENTRY(stext)
 	movs	r10, r5				@ invalid processor (r5=0)?
 	movs	r10, r5				@ invalid processor (r5=0)?
  THUMB( it	eq )		@ force fixup-able long branch encoding
  THUMB( it	eq )		@ force fixup-able long branch encoding
 	beq	__error_p			@ yes, error 'p'
 	beq	__error_p			@ yes, error 'p'
-	bl	__lookup_machine_type		@ r5=machinfo
-	movs	r8, r5				@ invalid machine (r5=0)?
- THUMB( it	eq )		@ force fixup-able long branch encoding
-	beq	__error_a			@ yes, error 'a'
+
+#ifndef CONFIG_XIP_KERNEL
+	adr	r3, 2f
+	ldmia	r3, {r4, r8}
+	sub	r4, r3, r4			@ (PHYS_OFFSET - PAGE_OFFSET)
+	add	r8, r8, r4			@ PHYS_OFFSET
+#else
+	ldr	r8, =PLAT_PHYS_OFFSET
+#endif
 
 
 	/*
 	/*
 	 * r1 = machine no, r2 = atags,
 	 * r1 = machine no, r2 = atags,
-	 * r8 = machinfo, r9 = cpuid, r10 = procinfo
+	 * r8 = phys_offset, r9 = cpuid, r10 = procinfo
 	 */
 	 */
 	bl	__vet_atags
 	bl	__vet_atags
 #ifdef CONFIG_SMP_ON_UP
 #ifdef CONFIG_SMP_ON_UP
 	bl	__fixup_smp
 	bl	__fixup_smp
+#endif
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+	bl	__fixup_pv_table
 #endif
 #endif
 	bl	__create_page_tables
 	bl	__create_page_tables
 
 
 	/*
 	/*
 	 * The following calls CPU specific code in a position independent
 	 * The following calls CPU specific code in a position independent
 	 * manner.  See arch/arm/mm/proc-*.S for details.  r10 = base of
 	 * manner.  See arch/arm/mm/proc-*.S for details.  r10 = base of
-	 * xxx_proc_info structure selected by __lookup_machine_type
+	 * xxx_proc_info structure selected by __lookup_processor_type
 	 * above.  On return, the CPU will be ready for the MMU to be
 	 * above.  On return, the CPU will be ready for the MMU to be
 	 * turned on, and r0 will hold the CPU control register value.
 	 * turned on, and r0 will hold the CPU control register value.
 	 */
 	 */
@@ -118,22 +119,24 @@ ENTRY(stext)
 1:	b	__enable_mmu
 1:	b	__enable_mmu
 ENDPROC(stext)
 ENDPROC(stext)
 	.ltorg
 	.ltorg
+#ifndef CONFIG_XIP_KERNEL
+2:	.long	.
+	.long	PAGE_OFFSET
+#endif
 
 
 /*
 /*
  * Setup the initial page tables.  We only setup the barest
  * Setup the initial page tables.  We only setup the barest
  * amount which are required to get the kernel running, which
  * amount which are required to get the kernel running, which
  * generally means mapping in the kernel code.
  * generally means mapping in the kernel code.
  *
  *
- * r8  = machinfo
- * r9  = cpuid
- * r10 = procinfo
+ * r8 = phys_offset, r9 = cpuid, r10 = procinfo
  *
  *
  * Returns:
  * Returns:
  *  r0, r3, r5-r7 corrupted
  *  r0, r3, r5-r7 corrupted
  *  r4 = physical page table address
  *  r4 = physical page table address
  */
  */
 __create_page_tables:
 __create_page_tables:
-	pgtbl	r4				@ page table address
+	pgtbl	r4, r8				@ page table address
 
 
 	/*
 	/*
 	 * Clear the 16K level 1 swapper page table
 	 * Clear the 16K level 1 swapper page table
@@ -189,10 +192,8 @@ __create_page_tables:
 	/*
 	/*
 	 * Map some ram to cover our .data and .bss areas.
 	 * Map some ram to cover our .data and .bss areas.
 	 */
 	 */
-	orr	r3, r7, #(KERNEL_RAM_PADDR & 0xff000000)
-	.if	(KERNEL_RAM_PADDR & 0x00f00000)
-	orr	r3, r3, #(KERNEL_RAM_PADDR & 0x00f00000)
-	.endif
+	add	r3, r8, #TEXT_OFFSET
+	orr	r3, r3, r7
 	add	r0, r4,  #(KERNEL_RAM_VADDR & 0xff000000) >> 18
 	add	r0, r4,  #(KERNEL_RAM_VADDR & 0xff000000) >> 18
 	str	r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> 18]!
 	str	r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> 18]!
 	ldr	r6, =(_end - 1)
 	ldr	r6, =(_end - 1)
@@ -205,14 +206,17 @@ __create_page_tables:
 #endif
 #endif
 
 
 	/*
 	/*
-	 * Then map first 1MB of ram in case it contains our boot params.
+	 * Then map boot params address in r2 or
+	 * the first 1MB of ram if boot params address is not specified.
 	 */
 	 */
-	add	r0, r4, #PAGE_OFFSET >> 18
-	orr	r6, r7, #(PHYS_OFFSET & 0xff000000)
-	.if	(PHYS_OFFSET & 0x00f00000)
-	orr	r6, r6, #(PHYS_OFFSET & 0x00f00000)
-	.endif
-	str	r6, [r0]
+	mov	r0, r2, lsr #20
+	movs	r0, r0, lsl #20
+	moveq	r0, r8
+	sub	r3, r0, r8
+	add	r3, r3, #PAGE_OFFSET
+	add	r3, r4, r3, lsr #18
+	orr	r6, r7, r0
+	str	r6, [r3]
 
 
 #ifdef CONFIG_DEBUG_LL
 #ifdef CONFIG_DEBUG_LL
 #ifndef CONFIG_DEBUG_ICEDCC
 #ifndef CONFIG_DEBUG_ICEDCC
@@ -457,4 +461,129 @@ ENTRY(fixup_smp)
 	ldmfd	sp!, {r4 - r6, pc}
 	ldmfd	sp!, {r4 - r6, pc}
 ENDPROC(fixup_smp)
 ENDPROC(fixup_smp)
 
 
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+
+/* __fixup_pv_table - patch the stub instructions with the delta between
+ * PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and
+ * can be expressed by an immediate shifter operand. The stub instruction
+ * has a form of '(add|sub) rd, rn, #imm'.
+ */
+	__HEAD
+__fixup_pv_table:
+	adr	r0, 1f
+	ldmia	r0, {r3-r5, r7}
+	sub	r3, r0, r3	@ PHYS_OFFSET - PAGE_OFFSET
+	add	r4, r4, r3	@ adjust table start address
+	add	r5, r5, r3	@ adjust table end address
+	add	r7, r7, r3	@ adjust __pv_phys_offset address
+	str	r8, [r7]	@ save computed PHYS_OFFSET to __pv_phys_offset
+#ifndef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
+	mov	r6, r3, lsr #24	@ constant for add/sub instructions
+	teq	r3, r6, lsl #24 @ must be 16MiB aligned
+#else
+	mov	r6, r3, lsr #16	@ constant for add/sub instructions
+	teq	r3, r6, lsl #16	@ must be 64kiB aligned
+#endif
+THUMB(	it	ne		@ cross section branch )
+	bne	__error
+	str	r6, [r7, #4]	@ save to __pv_offset
+	b	__fixup_a_pv_table
+ENDPROC(__fixup_pv_table)
+
+	.align
+1:	.long	.
+	.long	__pv_table_begin
+	.long	__pv_table_end
+2:	.long	__pv_phys_offset
+
+	.text
+__fixup_a_pv_table:
+#ifdef CONFIG_THUMB2_KERNEL
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
+	lsls	r0, r6, #24
+	lsr	r6, #8
+	beq	1f
+	clz	r7, r0
+	lsr	r0, #24
+	lsl	r0, r7
+	bic	r0, 0x0080
+	lsrs	r7, #1
+	orrcs   r0, #0x0080
+	orr	r0, r0, r7, lsl #12
+#endif
+1:	lsls	r6, #24
+	beq	4f
+	clz	r7, r6
+	lsr	r6, #24
+	lsl	r6, r7
+	bic	r6, #0x0080
+	lsrs	r7, #1
+	orrcs	r6, #0x0080
+	orr	r6, r6, r7, lsl #12
+	orr	r6, #0x4000
+	b	4f
+2:	@ at this point the C flag is always clear
+	add     r7, r3
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
+	ldrh	ip, [r7]
+	tst	ip, 0x0400	@ the i bit tells us LS or MS byte
+	beq	3f
+	cmp	r0, #0		@ set C flag, and ...
+	biceq	ip, 0x0400	@ immediate zero value has a special encoding
+	streqh	ip, [r7]	@ that requires the i bit cleared
+#endif
+3:	ldrh	ip, [r7, #2]
+	and	ip, 0x8f00
+	orrcc	ip, r6	@ mask in offset bits 31-24
+	orrcs	ip, r0	@ mask in offset bits 23-16
+	strh	ip, [r7, #2]
+4:	cmp	r4, r5
+	ldrcc	r7, [r4], #4	@ use branch for delay slot
+	bcc	2b
+	bx	lr
+#else
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
+	and	r0, r6, #255	@ offset bits 23-16
+	mov	r6, r6, lsr #8	@ offset bits 31-24
+#else
+	mov	r0, #0		@ just in case...
+#endif
+	b	3f
+2:	ldr	ip, [r7, r3]
+	bic	ip, ip, #0x000000ff
+	tst	ip, #0x400	@ rotate shift tells us LS or MS byte
+	orrne	ip, ip, r6	@ mask in offset bits 31-24
+	orreq	ip, ip, r0	@ mask in offset bits 23-16
+	str	ip, [r7, r3]
+3:	cmp	r4, r5
+	ldrcc	r7, [r4], #4	@ use branch for delay slot
+	bcc	2b
+	mov	pc, lr
+#endif
+ENDPROC(__fixup_a_pv_table)
+
+ENTRY(fixup_pv_table)
+	stmfd	sp!, {r4 - r7, lr}
+	ldr	r2, 2f			@ get address of __pv_phys_offset
+	mov	r3, #0			@ no offset
+	mov	r4, r0			@ r0 = table start
+	add	r5, r0, r1		@ r1 = table size
+	ldr	r6, [r2, #4]		@ get __pv_offset
+	bl	__fixup_a_pv_table
+	ldmfd	sp!, {r4 - r7, pc}
+ENDPROC(fixup_pv_table)
+
+	.align
+2:	.long	__pv_phys_offset
+
+	.data
+	.globl	__pv_phys_offset
+	.type	__pv_phys_offset, %object
+__pv_phys_offset:
+	.long	0
+	.size	__pv_phys_offset, . - __pv_phys_offset
+__pv_offset:
+	.long	0
+#endif
+
 #include "head-common.S"
 #include "head-common.S"

+ 31 - 19
arch/arm/kernel/irq.c

@@ -179,14 +179,21 @@ int __init arch_probe_nr_irqs(void)
 
 
 #ifdef CONFIG_HOTPLUG_CPU
 #ifdef CONFIG_HOTPLUG_CPU
 
 
-static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
+static bool migrate_one_irq(struct irq_data *d)
 {
 {
-	pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->irq_data.node, cpu);
+	unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask);
+	bool ret = false;
 
 
-	raw_spin_lock_irq(&desc->lock);
-	desc->irq_data.chip->irq_set_affinity(&desc->irq_data,
-					      cpumask_of(cpu), false);
-	raw_spin_unlock_irq(&desc->lock);
+	if (cpu >= nr_cpu_ids) {
+		cpu = cpumask_any(cpu_online_mask);
+		ret = true;
+	}
+
+	pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu);
+
+	d->chip->irq_set_affinity(d, cpumask_of(cpu), true);
+
+	return ret;
 }
 }
 
 
 /*
 /*
@@ -198,25 +205,30 @@ void migrate_irqs(void)
 {
 {
 	unsigned int i, cpu = smp_processor_id();
 	unsigned int i, cpu = smp_processor_id();
 	struct irq_desc *desc;
 	struct irq_desc *desc;
+	unsigned long flags;
+
+	local_irq_save(flags);
 
 
 	for_each_irq_desc(i, desc) {
 	for_each_irq_desc(i, desc) {
 		struct irq_data *d = &desc->irq_data;
 		struct irq_data *d = &desc->irq_data;
+		bool affinity_broken = false;
 
 
-		if (d->node == cpu) {
-			unsigned int newcpu = cpumask_any_and(d->affinity,
-							      cpu_online_mask);
-			if (newcpu >= nr_cpu_ids) {
-				if (printk_ratelimit())
-					printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
-					       i, cpu);
+		raw_spin_lock(&desc->lock);
+		do {
+			if (desc->action == NULL)
+				break;
 
 
-				cpumask_setall(d->affinity);
-				newcpu = cpumask_any_and(d->affinity,
-							 cpu_online_mask);
-			}
+			if (d->node != cpu)
+				break;
 
 
-			route_irq(desc, i, newcpu);
-		}
+			affinity_broken = migrate_one_irq(d);
+		} while (0);
+		raw_spin_unlock(&desc->lock);
+
+		if (affinity_broken && printk_ratelimit())
+			pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu);
 	}
 	}
+
+	local_irq_restore(flags);
 }
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 #endif /* CONFIG_HOTPLUG_CPU */

+ 21 - 14
arch/arm/kernel/module.c

@@ -76,6 +76,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
 	for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
 	for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
 		unsigned long loc;
 		unsigned long loc;
 		Elf32_Sym *sym;
 		Elf32_Sym *sym;
+		const char *symname;
 		s32 offset;
 		s32 offset;
 #ifdef CONFIG_THUMB2_KERNEL
 #ifdef CONFIG_THUMB2_KERNEL
 		u32 upper, lower, sign, j1, j2;
 		u32 upper, lower, sign, j1, j2;
@@ -83,18 +84,18 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
 
 
 		offset = ELF32_R_SYM(rel->r_info);
 		offset = ELF32_R_SYM(rel->r_info);
 		if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
 		if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
-			printk(KERN_ERR "%s: bad relocation, section %d reloc %d\n",
+			pr_err("%s: section %u reloc %u: bad relocation sym offset\n",
 				module->name, relindex, i);
 				module->name, relindex, i);
 			return -ENOEXEC;
 			return -ENOEXEC;
 		}
 		}
 
 
 		sym = ((Elf32_Sym *)symsec->sh_addr) + offset;
 		sym = ((Elf32_Sym *)symsec->sh_addr) + offset;
+		symname = strtab + sym->st_name;
 
 
 		if (rel->r_offset < 0 || rel->r_offset > dstsec->sh_size - sizeof(u32)) {
 		if (rel->r_offset < 0 || rel->r_offset > dstsec->sh_size - sizeof(u32)) {
-			printk(KERN_ERR "%s: out of bounds relocation, "
-				"section %d reloc %d offset %d size %d\n",
-				module->name, relindex, i, rel->r_offset,
-				dstsec->sh_size);
+			pr_err("%s: section %u reloc %u sym '%s': out of bounds relocation, offset %d size %u\n",
+			       module->name, relindex, i, symname,
+			       rel->r_offset, dstsec->sh_size);
 			return -ENOEXEC;
 			return -ENOEXEC;
 		}
 		}
 
 
@@ -120,10 +121,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
 			if (offset & 3 ||
 			if (offset & 3 ||
 			    offset <= (s32)0xfe000000 ||
 			    offset <= (s32)0xfe000000 ||
 			    offset >= (s32)0x02000000) {
 			    offset >= (s32)0x02000000) {
-				printk(KERN_ERR
-				       "%s: relocation out of range, section "
-				       "%d reloc %d sym '%s'\n", module->name,
-				       relindex, i, strtab + sym->st_name);
+				pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
+				       module->name, relindex, i, symname,
+				       ELF32_R_TYPE(rel->r_info), loc,
+				       sym->st_value);
 				return -ENOEXEC;
 				return -ENOEXEC;
 			}
 			}
 
 
@@ -196,10 +197,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
 			if (!(offset & 1) ||
 			if (!(offset & 1) ||
 			    offset <= (s32)0xff000000 ||
 			    offset <= (s32)0xff000000 ||
 			    offset >= (s32)0x01000000) {
 			    offset >= (s32)0x01000000) {
-				printk(KERN_ERR
-				       "%s: relocation out of range, section "
-				       "%d reloc %d sym '%s'\n", module->name,
-				       relindex, i, strtab + sym->st_name);
+				pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
+				       module->name, relindex, i, symname,
+				       ELF32_R_TYPE(rel->r_info), loc,
+				       sym->st_value);
 				return -ENOEXEC;
 				return -ENOEXEC;
 			}
 			}
 
 
@@ -282,12 +283,13 @@ static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
 	return NULL;
 	return NULL;
 }
 }
 
 
+extern void fixup_pv_table(const void *, unsigned long);
 extern void fixup_smp(const void *, unsigned long);
 extern void fixup_smp(const void *, unsigned long);
 
 
 int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
 int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
 		    struct module *mod)
 		    struct module *mod)
 {
 {
-	const Elf_Shdr * __maybe_unused s = NULL;
+	const Elf_Shdr *s = NULL;
 #ifdef CONFIG_ARM_UNWIND
 #ifdef CONFIG_ARM_UNWIND
 	const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 	const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 	const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
 	const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
@@ -331,6 +333,11 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
 					         maps[i].unw_sec->sh_size,
 					         maps[i].unw_sec->sh_size,
 					         maps[i].txt_sec->sh_addr,
 					         maps[i].txt_sec->sh_addr,
 					         maps[i].txt_sec->sh_size);
 					         maps[i].txt_sec->sh_size);
+#endif
+#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
+	s = find_mod_section(hdr, sechdrs, ".pv_table");
+	if (s)
+		fixup_pv_table((void *)s->sh_addr, s->sh_size);
 #endif
 #endif
 	s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
 	s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
 	if (s && !is_smp())
 	if (s && !is_smp())

+ 2 - 2
arch/arm/kernel/perf_event_v6.c

@@ -30,7 +30,7 @@
  * enable the interrupt.
  * enable the interrupt.
  */
  */
 
 
-#ifdef CONFIG_CPU_V6
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
 enum armv6_perf_types {
 enum armv6_perf_types {
 	ARMV6_PERFCTR_ICACHE_MISS	    = 0x0,
 	ARMV6_PERFCTR_ICACHE_MISS	    = 0x0,
 	ARMV6_PERFCTR_IBUF_STALL	    = 0x1,
 	ARMV6_PERFCTR_IBUF_STALL	    = 0x1,
@@ -669,4 +669,4 @@ static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
 {
 {
 	return NULL;
 	return NULL;
 }
 }
-#endif	/* CONFIG_CPU_V6 */
+#endif	/* CONFIG_CPU_V6 || CONFIG_CPU_V6K */

+ 1 - 382
arch/arm/kernel/ptrace.c

@@ -26,8 +26,6 @@
 #include <asm/system.h>
 #include <asm/system.h>
 #include <asm/traps.h>
 #include <asm/traps.h>
 
 
-#include "ptrace.h"
-
 #define REG_PC	15
 #define REG_PC	15
 #define REG_PSR	16
 #define REG_PSR	16
 /*
 /*
@@ -184,389 +182,12 @@ put_user_reg(struct task_struct *task, int offset, long data)
 	return ret;
 	return ret;
 }
 }
 
 
-static inline int
-read_u32(struct task_struct *task, unsigned long addr, u32 *res)
-{
-	int ret;
-
-	ret = access_process_vm(task, addr, res, sizeof(*res), 0);
-
-	return ret == sizeof(*res) ? 0 : -EIO;
-}
-
-static inline int
-read_instr(struct task_struct *task, unsigned long addr, u32 *res)
-{
-	int ret;
-
-	if (addr & 1) {
-		u16 val;
-		ret = access_process_vm(task, addr & ~1, &val, sizeof(val), 0);
-		ret = ret == sizeof(val) ? 0 : -EIO;
-		*res = val;
-	} else {
-		u32 val;
-		ret = access_process_vm(task, addr & ~3, &val, sizeof(val), 0);
-		ret = ret == sizeof(val) ? 0 : -EIO;
-		*res = val;
-	}
-	return ret;
-}
-
-/*
- * Get value of register `rn' (in the instruction)
- */
-static unsigned long
-ptrace_getrn(struct task_struct *child, unsigned long insn)
-{
-	unsigned int reg = (insn >> 16) & 15;
-	unsigned long val;
-
-	val = get_user_reg(child, reg);
-	if (reg == 15)
-		val += 8;
-
-	return val;
-}
-
-/*
- * Get value of operand 2 (in an ALU instruction)
- */
-static unsigned long
-ptrace_getaluop2(struct task_struct *child, unsigned long insn)
-{
-	unsigned long val;
-	int shift;
-	int type;
-
-	if (insn & 1 << 25) {
-		val = insn & 255;
-		shift = (insn >> 8) & 15;
-		type = 3;
-	} else {
-		val = get_user_reg (child, insn & 15);
-
-		if (insn & (1 << 4))
-			shift = (int)get_user_reg (child, (insn >> 8) & 15);
-		else
-			shift = (insn >> 7) & 31;
-
-		type = (insn >> 5) & 3;
-	}
-
-	switch (type) {
-	case 0:	val <<= shift;	break;
-	case 1:	val >>= shift;	break;
-	case 2:
-		val = (((signed long)val) >> shift);
-		break;
-	case 3:
- 		val = (val >> shift) | (val << (32 - shift));
-		break;
-	}
-	return val;
-}
-
-/*
- * Get value of operand 2 (in a LDR instruction)
- */
-static unsigned long
-ptrace_getldrop2(struct task_struct *child, unsigned long insn)
-{
-	unsigned long val;
-	int shift;
-	int type;
-
-	val = get_user_reg(child, insn & 15);
-	shift = (insn >> 7) & 31;
-	type = (insn >> 5) & 3;
-
-	switch (type) {
-	case 0:	val <<= shift;	break;
-	case 1:	val >>= shift;	break;
-	case 2:
-		val = (((signed long)val) >> shift);
-		break;
-	case 3:
- 		val = (val >> shift) | (val << (32 - shift));
-		break;
-	}
-	return val;
-}
-
-#define OP_MASK	0x01e00000
-#define OP_AND	0x00000000
-#define OP_EOR	0x00200000
-#define OP_SUB	0x00400000
-#define OP_RSB	0x00600000
-#define OP_ADD	0x00800000
-#define OP_ADC	0x00a00000
-#define OP_SBC	0x00c00000
-#define OP_RSC	0x00e00000
-#define OP_ORR	0x01800000
-#define OP_MOV	0x01a00000
-#define OP_BIC	0x01c00000
-#define OP_MVN	0x01e00000
-
-static unsigned long
-get_branch_address(struct task_struct *child, unsigned long pc, unsigned long insn)
-{
-	u32 alt = 0;
-
-	switch (insn & 0x0e000000) {
-	case 0x00000000:
-	case 0x02000000: {
-		/*
-		 * data processing
-		 */
-		long aluop1, aluop2, ccbit;
-
-	        if ((insn & 0x0fffffd0) == 0x012fff10) {
-		        /*
-			 * bx or blx
-			 */
-			alt = get_user_reg(child, insn & 15);
-			break;
-		}
-
-
-		if ((insn & 0xf000) != 0xf000)
-			break;
-
-		aluop1 = ptrace_getrn(child, insn);
-		aluop2 = ptrace_getaluop2(child, insn);
-		ccbit  = get_user_reg(child, REG_PSR) & PSR_C_BIT ? 1 : 0;
-
-		switch (insn & OP_MASK) {
-		case OP_AND: alt = aluop1 & aluop2;		break;
-		case OP_EOR: alt = aluop1 ^ aluop2;		break;
-		case OP_SUB: alt = aluop1 - aluop2;		break;
-		case OP_RSB: alt = aluop2 - aluop1;		break;
-		case OP_ADD: alt = aluop1 + aluop2;		break;
-		case OP_ADC: alt = aluop1 + aluop2 + ccbit;	break;
-		case OP_SBC: alt = aluop1 - aluop2 + ccbit;	break;
-		case OP_RSC: alt = aluop2 - aluop1 + ccbit;	break;
-		case OP_ORR: alt = aluop1 | aluop2;		break;
-		case OP_MOV: alt = aluop2;			break;
-		case OP_BIC: alt = aluop1 & ~aluop2;		break;
-		case OP_MVN: alt = ~aluop2;			break;
-		}
-		break;
-	}
-
-	case 0x04000000:
-	case 0x06000000:
-		/*
-		 * ldr
-		 */
-		if ((insn & 0x0010f000) == 0x0010f000) {
-			unsigned long base;
-
-			base = ptrace_getrn(child, insn);
-			if (insn & 1 << 24) {
-				long aluop2;
-
-				if (insn & 0x02000000)
-					aluop2 = ptrace_getldrop2(child, insn);
-				else
-					aluop2 = insn & 0xfff;
-
-				if (insn & 1 << 23)
-					base += aluop2;
-				else
-					base -= aluop2;
-			}
-			read_u32(child, base, &alt);
-		}
-		break;
-
-	case 0x08000000:
-		/*
-		 * ldm
-		 */
-		if ((insn & 0x00108000) == 0x00108000) {
-			unsigned long base;
-			unsigned int nr_regs;
-
-			if (insn & (1 << 23)) {
-				nr_regs = hweight16(insn & 65535) << 2;
-
-				if (!(insn & (1 << 24)))
-					nr_regs -= 4;
-			} else {
-				if (insn & (1 << 24))
-					nr_regs = -4;
-				else
-					nr_regs = 0;
-			}
-
-			base = ptrace_getrn(child, insn);
-
-			read_u32(child, base + nr_regs, &alt);
-			break;
-		}
-		break;
-
-	case 0x0a000000: {
-		/*
-		 * bl or b
-		 */
-		signed long displ;
-		/* It's a branch/branch link: instead of trying to
-		 * figure out whether the branch will be taken or not,
-		 * we'll put a breakpoint at both locations.  This is
-		 * simpler, more reliable, and probably not a whole lot
-		 * slower than the alternative approach of emulating the
-		 * branch.
-		 */
-		displ = (insn & 0x00ffffff) << 8;
-		displ = (displ >> 6) + 8;
-		if (displ != 0 && displ != 4)
-			alt = pc + displ;
-	    }
-	    break;
-	}
-
-	return alt;
-}
-
-static int
-swap_insn(struct task_struct *task, unsigned long addr,
-	  void *old_insn, void *new_insn, int size)
-{
-	int ret;
-
-	ret = access_process_vm(task, addr, old_insn, size, 0);
-	if (ret == size)
-		ret = access_process_vm(task, addr, new_insn, size, 1);
-	return ret;
-}
-
-static void
-add_breakpoint(struct task_struct *task, struct debug_info *dbg, unsigned long addr)
-{
-	int nr = dbg->nsaved;
-
-	if (nr < 2) {
-		u32 new_insn = BREAKINST_ARM;
-		int res;
-
-		res = swap_insn(task, addr, &dbg->bp[nr].insn, &new_insn, 4);
-
-		if (res == 4) {
-			dbg->bp[nr].address = addr;
-			dbg->nsaved += 1;
-		}
-	} else
-		printk(KERN_ERR "ptrace: too many breakpoints\n");
-}
-
-/*
- * Clear one breakpoint in the user program.  We copy what the hardware
- * does and use bit 0 of the address to indicate whether this is a Thumb
- * breakpoint or an ARM breakpoint.
- */
-static void clear_breakpoint(struct task_struct *task, struct debug_entry *bp)
-{
-	unsigned long addr = bp->address;
-	union debug_insn old_insn;
-	int ret;
-
-	if (addr & 1) {
-		ret = swap_insn(task, addr & ~1, &old_insn.thumb,
-				&bp->insn.thumb, 2);
-
-		if (ret != 2 || old_insn.thumb != BREAKINST_THUMB)
-			printk(KERN_ERR "%s:%d: corrupted Thumb breakpoint at "
-				"0x%08lx (0x%04x)\n", task->comm,
-				task_pid_nr(task), addr, old_insn.thumb);
-	} else {
-		ret = swap_insn(task, addr & ~3, &old_insn.arm,
-				&bp->insn.arm, 4);
-
-		if (ret != 4 || old_insn.arm != BREAKINST_ARM)
-			printk(KERN_ERR "%s:%d: corrupted ARM breakpoint at "
-				"0x%08lx (0x%08x)\n", task->comm,
-				task_pid_nr(task), addr, old_insn.arm);
-	}
-}
-
-void ptrace_set_bpt(struct task_struct *child)
-{
-	struct pt_regs *regs;
-	unsigned long pc;
-	u32 insn;
-	int res;
-
-	regs = task_pt_regs(child);
-	pc = instruction_pointer(regs);
-
-	if (thumb_mode(regs)) {
-		printk(KERN_WARNING "ptrace: can't handle thumb mode\n");
-		return;
-	}
-
-	res = read_instr(child, pc, &insn);
-	if (!res) {
-		struct debug_info *dbg = &child->thread.debug;
-		unsigned long alt;
-
-		dbg->nsaved = 0;
-
-		alt = get_branch_address(child, pc, insn);
-		if (alt)
-			add_breakpoint(child, dbg, alt);
-
-		/*
-		 * Note that we ignore the result of setting the above
-		 * breakpoint since it may fail.  When it does, this is
-		 * not so much an error, but a forewarning that we may
-		 * be receiving a prefetch abort shortly.
-		 *
-		 * If we don't set this breakpoint here, then we can
-		 * lose control of the thread during single stepping.
-		 */
-		if (!alt || predicate(insn) != PREDICATE_ALWAYS)
-			add_breakpoint(child, dbg, pc + 4);
-	}
-}
-
-/*
- * Ensure no single-step breakpoint is pending.  Returns non-zero
- * value if child was being single-stepped.
- */
-void ptrace_cancel_bpt(struct task_struct *child)
-{
-	int i, nsaved = child->thread.debug.nsaved;
-
-	child->thread.debug.nsaved = 0;
-
-	if (nsaved > 2) {
-		printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved);
-		nsaved = 2;
-	}
-
-	for (i = 0; i < nsaved; i++)
-		clear_breakpoint(child, &child->thread.debug.bp[i]);
-}
-
-void user_disable_single_step(struct task_struct *task)
-{
-	task->ptrace &= ~PT_SINGLESTEP;
-	ptrace_cancel_bpt(task);
-}
-
-void user_enable_single_step(struct task_struct *task)
-{
-	task->ptrace |= PT_SINGLESTEP;
-}
-
 /*
 /*
  * Called by kernel/ptrace.c when detaching..
  * Called by kernel/ptrace.c when detaching..
  */
  */
 void ptrace_disable(struct task_struct *child)
 void ptrace_disable(struct task_struct *child)
 {
 {
-	user_disable_single_step(child);
+	/* Nothing to do. */
 }
 }
 
 
 /*
 /*
@@ -576,8 +197,6 @@ void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
 {
 {
 	siginfo_t info;
 	siginfo_t info;
 
 
-	ptrace_cancel_bpt(tsk);
-
 	info.si_signo = SIGTRAP;
 	info.si_signo = SIGTRAP;
 	info.si_errno = 0;
 	info.si_errno = 0;
 	info.si_code  = TRAP_BRKPT;
 	info.si_code  = TRAP_BRKPT;

+ 0 - 37
arch/arm/kernel/ptrace.h

@@ -1,37 +0,0 @@
-/*
- *  linux/arch/arm/kernel/ptrace.h
- *
- *  Copyright (C) 2000-2003 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/ptrace.h>
-
-extern void ptrace_cancel_bpt(struct task_struct *);
-extern void ptrace_set_bpt(struct task_struct *);
-extern void ptrace_break(struct task_struct *, struct pt_regs *);
-
-/*
- * Send SIGTRAP if we're single-stepping
- */
-static inline void single_step_trap(struct task_struct *task)
-{
-	if (task->ptrace & PT_SINGLESTEP) {
-		ptrace_cancel_bpt(task);
-		send_sig(SIGTRAP, task, 1);
-	}
-}
-
-static inline void single_step_clear(struct task_struct *task)
-{
-	if (task->ptrace & PT_SINGLESTEP)
-		ptrace_cancel_bpt(task);
-}
-
-static inline void single_step_set(struct task_struct *task)
-{
-	if (task->ptrace & PT_SINGLESTEP)
-		ptrace_set_bpt(task);
-}

+ 1 - 0
arch/arm/kernel/return_address.c

@@ -9,6 +9,7 @@
  * the Free Software Foundation.
  * the Free Software Foundation.
  */
  */
 #include <linux/module.h>
 #include <linux/module.h>
+#include <linux/ftrace.h>
 
 
 #if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
 #if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
 #include <linux/sched.h>
 #include <linux/sched.h>

+ 55 - 13
arch/arm/kernel/setup.c

@@ -308,7 +308,22 @@ static void __init cacheid_init(void)
  * already provide the required functionality.
  * already provide the required functionality.
  */
  */
 extern struct proc_info_list *lookup_processor_type(unsigned int);
 extern struct proc_info_list *lookup_processor_type(unsigned int);
-extern struct machine_desc *lookup_machine_type(unsigned int);
+
+static void __init early_print(const char *str, ...)
+{
+	extern void printascii(const char *);
+	char buf[256];
+	va_list ap;
+
+	va_start(ap, str);
+	vsnprintf(buf, sizeof(buf), str, ap);
+	va_end(ap);
+
+#ifdef CONFIG_DEBUG_LL
+	printascii(buf);
+#endif
+	printk("%s", buf);
+}
 
 
 static void __init feat_v6_fixup(void)
 static void __init feat_v6_fixup(void)
 {
 {
@@ -426,21 +441,29 @@ void cpu_init(void)
 
 
 static struct machine_desc * __init setup_machine(unsigned int nr)
 static struct machine_desc * __init setup_machine(unsigned int nr)
 {
 {
-	struct machine_desc *list;
+	extern struct machine_desc __arch_info_begin[], __arch_info_end[];
+	struct machine_desc *p;
 
 
 	/*
 	/*
 	 * locate machine in the list of supported machines.
 	 * locate machine in the list of supported machines.
 	 */
 	 */
-	list = lookup_machine_type(nr);
-	if (!list) {
-		printk("Machine configuration botched (nr %d), unable "
-		       "to continue.\n", nr);
-		while (1);
-	}
+	for (p = __arch_info_begin; p < __arch_info_end; p++)
+		if (nr == p->nr) {
+			printk("Machine: %s\n", p->name);
+			return p;
+		}
 
 
-	printk("Machine: %s\n", list->name);
+	early_print("\n"
+		"Error: unrecognized/unsupported machine ID (r1 = 0x%08x).\n\n"
+		"Available machine support:\n\nID (hex)\tNAME\n", nr);
 
 
-	return list;
+	for (p = __arch_info_begin; p < __arch_info_end; p++)
+		early_print("%08x\t%s\n", p->nr, p->name);
+
+	early_print("\nPlease check your kernel config and/or bootloader.\n");
+
+	while (true)
+		/* can't use cpu_relax() here as it may require MMU setup */;
 }
 }
 
 
 static int __init arm_add_memory(unsigned long start, unsigned long size)
 static int __init arm_add_memory(unsigned long start, unsigned long size)
@@ -703,7 +726,7 @@ static struct init_tags {
 	{ tag_size(tag_core), ATAG_CORE },
 	{ tag_size(tag_core), ATAG_CORE },
 	{ 1, PAGE_SIZE, 0xff },
 	{ 1, PAGE_SIZE, 0xff },
 	{ tag_size(tag_mem32), ATAG_MEM },
 	{ tag_size(tag_mem32), ATAG_MEM },
-	{ MEM_SIZE, PHYS_OFFSET },
+	{ MEM_SIZE },
 	{ 0, ATAG_NONE }
 	{ 0, ATAG_NONE }
 };
 };
 
 
@@ -802,6 +825,8 @@ void __init setup_arch(char **cmdline_p)
 	struct machine_desc *mdesc;
 	struct machine_desc *mdesc;
 	char *from = default_command_line;
 	char *from = default_command_line;
 
 
+	init_tags.mem.start = PHYS_OFFSET;
+
 	unwind_init();
 	unwind_init();
 
 
 	setup_processor();
 	setup_processor();
@@ -814,8 +839,25 @@ void __init setup_arch(char **cmdline_p)
 
 
 	if (__atags_pointer)
 	if (__atags_pointer)
 		tags = phys_to_virt(__atags_pointer);
 		tags = phys_to_virt(__atags_pointer);
-	else if (mdesc->boot_params)
-		tags = phys_to_virt(mdesc->boot_params);
+	else if (mdesc->boot_params) {
+#ifdef CONFIG_MMU
+		/*
+		 * We still are executing with a minimal MMU mapping created
+		 * with the presumption that the machine default for this
+		 * is located in the first MB of RAM.  Anything else will
+		 * fault and silently hang the kernel at this point.
+		 */
+		if (mdesc->boot_params < PHYS_OFFSET ||
+		    mdesc->boot_params >= PHYS_OFFSET + SZ_1M) {
+			printk(KERN_WARNING
+			       "Default boot params at physical 0x%08lx out of reach\n",
+			       mdesc->boot_params);
+		} else
+#endif
+		{
+			tags = phys_to_virt(mdesc->boot_params);
+		}
+	}
 
 
 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
 	/*
 	/*

+ 0 - 9
arch/arm/kernel/signal.c

@@ -20,7 +20,6 @@
 #include <asm/unistd.h>
 #include <asm/unistd.h>
 #include <asm/vfp.h>
 #include <asm/vfp.h>
 
 
-#include "ptrace.h"
 #include "signal.h"
 #include "signal.h"
 
 
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
@@ -348,8 +347,6 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs)
 	if (restore_sigframe(regs, frame))
 	if (restore_sigframe(regs, frame))
 		goto badframe;
 		goto badframe;
 
 
-	single_step_trap(current);
-
 	return regs->ARM_r0;
 	return regs->ARM_r0;
 
 
 badframe:
 badframe:
@@ -383,8 +380,6 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
 	if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT)
 	if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT)
 		goto badframe;
 		goto badframe;
 
 
-	single_step_trap(current);
-
 	return regs->ARM_r0;
 	return regs->ARM_r0;
 
 
 badframe:
 badframe:
@@ -706,8 +701,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
 	if (try_to_freeze())
 	if (try_to_freeze())
 		goto no_signal;
 		goto no_signal;
 
 
-	single_step_clear(current);
-
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	signr = get_signal_to_deliver(&info, &ka, regs, NULL);
 	if (signr > 0) {
 	if (signr > 0) {
 		sigset_t *oldset;
 		sigset_t *oldset;
@@ -726,7 +719,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
 			if (test_thread_flag(TIF_RESTORE_SIGMASK))
 			if (test_thread_flag(TIF_RESTORE_SIGMASK))
 				clear_thread_flag(TIF_RESTORE_SIGMASK);
 				clear_thread_flag(TIF_RESTORE_SIGMASK);
 		}
 		}
-		single_step_set(current);
 		return;
 		return;
 	}
 	}
 
 
@@ -772,7 +764,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
 			sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
 			sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
 		}
 		}
 	}
 	}
-	single_step_set(current);
 }
 }
 
 
 asmlinkage void
 asmlinkage void

+ 134 - 0
arch/arm/kernel/sleep.S

@@ -0,0 +1,134 @@
+#include <linux/linkage.h>
+#include <linux/threads.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+#include <asm/glue-cache.h>
+#include <asm/glue-proc.h>
+#include <asm/system.h>
+	.text
+
+/*
+ * Save CPU state for a suspend
+ *  r1 = v:p offset
+ *  r3 = virtual return function
+ * Note: sp is decremented to allocate space for CPU state on stack
+ * r0-r3,r9,r10,lr corrupted
+ */
+ENTRY(cpu_suspend)
+	mov	r9, lr
+#ifdef MULTI_CPU
+	ldr	r10, =processor
+	mov	r2, sp			@ current virtual SP
+	ldr	r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
+	ldr	ip, [r10, #CPU_DO_RESUME] @ virtual resume function
+	sub	sp, sp, r0		@ allocate CPU state on stack
+	mov	r0, sp			@ save pointer
+	add	ip, ip, r1		@ convert resume fn to phys
+	stmfd	sp!, {r1, r2, r3, ip}	@ save v:p, virt SP, retfn, phys resume fn
+	ldr	r3, =sleep_save_sp
+	add	r2, sp, r1		@ convert SP to phys
+#ifdef CONFIG_SMP
+	ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
+	ALT_UP(mov lr, #0)
+	and	lr, lr, #15
+	str	r2, [r3, lr, lsl #2]	@ save phys SP
+#else
+	str	r2, [r3]		@ save phys SP
+#endif
+	mov	lr, pc
+	ldr	pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
+#else
+	mov	r2, sp			@ current virtual SP
+	ldr	r0, =cpu_suspend_size
+	sub	sp, sp, r0		@ allocate CPU state on stack
+	mov	r0, sp			@ save pointer
+	stmfd	sp!, {r1, r2, r3}	@ save v:p, virt SP, return fn
+	ldr	r3, =sleep_save_sp
+	add	r2, sp, r1		@ convert SP to phys
+#ifdef CONFIG_SMP
+	ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
+	ALT_UP(mov lr, #0)
+	and	lr, lr, #15
+	str	r2, [r3, lr, lsl #2]	@ save phys SP
+#else
+	str	r2, [r3]		@ save phys SP
+#endif
+	bl	cpu_do_suspend
+#endif
+
+	@ flush data cache
+#ifdef MULTI_CACHE
+	ldr	r10, =cpu_cache
+	mov	lr, r9
+	ldr	pc, [r10, #CACHE_FLUSH_KERN_ALL]
+#else
+	mov	lr, r9
+	b	__cpuc_flush_kern_all
+#endif
+ENDPROC(cpu_suspend)
+	.ltorg
+
+/*
+ * r0 = control register value
+ * r1 = v:p offset (preserved by cpu_do_resume)
+ * r2 = phys page table base
+ * r3 = L1 section flags
+ */
+ENTRY(cpu_resume_mmu)
+	adr	r4, cpu_resume_turn_mmu_on
+	mov	r4, r4, lsr #20
+	orr	r3, r3, r4, lsl #20
+	ldr	r5, [r2, r4, lsl #2]	@ save old mapping
+	str	r3, [r2, r4, lsl #2]	@ setup 1:1 mapping for mmu code
+	sub	r2, r2, r1
+	ldr	r3, =cpu_resume_after_mmu
+	bic	r1, r0, #CR_C		@ ensure D-cache is disabled
+	b	cpu_resume_turn_mmu_on
+ENDPROC(cpu_resume_mmu)
+	.ltorg
+	.align	5
+cpu_resume_turn_mmu_on:
+	mcr	p15, 0, r1, c1, c0, 0	@ turn on MMU, I-cache, etc
+	mrc	p15, 0, r1, c0, c0, 0	@ read id reg
+	mov	r1, r1
+	mov	r1, r1
+	mov	pc, r3			@ jump to virtual address
+ENDPROC(cpu_resume_turn_mmu_on)
+cpu_resume_after_mmu:
+	str	r5, [r2, r4, lsl #2]	@ restore old mapping
+	mcr	p15, 0, r0, c1, c0, 0	@ turn on D-cache
+	mov	pc, lr
+ENDPROC(cpu_resume_after_mmu)
+
+/*
+ * Note: Yes, part of the following code is located into the .data section.
+ *       This is to allow sleep_save_sp to be accessed with a relative load
+ *       while we can't rely on any MMU translation.  We could have put
+ *       sleep_save_sp in the .text section as well, but some setups might
+ *       insist on it to be truly read-only.
+ */
+	.data
+	.align
+ENTRY(cpu_resume)
+#ifdef CONFIG_SMP
+	adr	r0, sleep_save_sp
+	ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
+	ALT_UP(mov r1, #0)
+	and	r1, r1, #15
+	ldr	r0, [r0, r1, lsl #2]	@ stack phys addr
+#else
+	ldr	r0, sleep_save_sp	@ stack phys addr
+#endif
+	msr	cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
+#ifdef MULTI_CPU
+	ldmia	r0!, {r1, sp, lr, pc}	@ load v:p, stack, return fn, resume fn
+#else
+	ldmia	r0!, {r1, sp, lr}	@ load v:p, stack, return fn
+	b	cpu_do_resume
+#endif
+ENDPROC(cpu_resume)
+
+sleep_save_sp:
+	.rept	CONFIG_NR_CPUS
+	.long	0				@ preserve stack phys ptr here
+	.endr

+ 23 - 0
arch/arm/kernel/smp_scu.c

@@ -50,3 +50,26 @@ void __init scu_enable(void __iomem *scu_base)
 	 */
 	 */
 	flush_cache_all();
 	flush_cache_all();
 }
 }
+
+/*
+ * Set the executing CPUs power mode as defined.  This will be in
+ * preparation for it executing a WFI instruction.
+ *
+ * This function must be called with preemption disabled, and as it
+ * has the side effect of disabling coherency, caches must have been
+ * flushed.  Interrupts must also have been disabled.
+ */
+int scu_power_mode(void __iomem *scu_base, unsigned int mode)
+{
+	unsigned int val;
+	int cpu = smp_processor_id();
+
+	if (mode > 3 || mode == 1 || cpu > 3)
+		return -EINVAL;
+
+	val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
+	val |= mode;
+	__raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu);
+
+	return 0;
+}

+ 1 - 1
arch/arm/kernel/tcm.c

@@ -15,7 +15,7 @@
 #include <linux/string.h> /* memcpy */
 #include <linux/string.h> /* memcpy */
 #include <asm/cputype.h>
 #include <asm/cputype.h>
 #include <asm/mach/map.h>
 #include <asm/mach/map.h>
-#include <mach/memory.h>
+#include <asm/memory.h>
 #include "tcm.h"
 #include "tcm.h"
 
 
 static struct gen_pool *tcm_pool;
 static struct gen_pool *tcm_pool;

+ 2 - 2
arch/arm/kernel/traps.c

@@ -23,6 +23,7 @@
 #include <linux/kexec.h>
 #include <linux/kexec.h>
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/init.h>
+#include <linux/sched.h>
 
 
 #include <asm/atomic.h>
 #include <asm/atomic.h>
 #include <asm/cacheflush.h>
 #include <asm/cacheflush.h>
@@ -32,7 +33,6 @@
 #include <asm/unwind.h>
 #include <asm/unwind.h>
 #include <asm/tls.h>
 #include <asm/tls.h>
 
 
-#include "ptrace.h"
 #include "signal.h"
 #include "signal.h"
 
 
 static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
 static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
@@ -256,7 +256,7 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
 	return ret;
 	return ret;
 }
 }
 
 
-DEFINE_SPINLOCK(die_lock);
+static DEFINE_SPINLOCK(die_lock);
 
 
 /*
 /*
  * This function is protected against re-entrancy.
  * This function is protected against re-entrancy.

+ 4 - 0
arch/arm/kernel/vmlinux.lds.S

@@ -64,6 +64,10 @@ SECTIONS
 		__smpalt_end = .;
 		__smpalt_end = .;
 #endif
 #endif
 
 
+		__pv_table_begin = .;
+			*(.pv_table)
+		__pv_table_end = .;
+
 		INIT_SETUP(16)
 		INIT_SETUP(16)
 
 
 		INIT_CALLS
 		INIT_CALLS

+ 30 - 20
arch/arm/lib/bitops.h

@@ -1,44 +1,52 @@
-
-#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_CPU_32v6K)
+#if __LINUX_ARM_ARCH__ >= 6
 	.macro	bitop, instr
 	.macro	bitop, instr
+	ands	ip, r1, #3
+	strneb	r1, [ip]		@ assert word-aligned
 	mov	r2, #1
 	mov	r2, #1
-	and	r3, r0, #7		@ Get bit offset
-	add	r1, r1, r0, lsr #3	@ Get byte offset
+	and	r3, r0, #31		@ Get bit offset
+	mov	r0, r0, lsr #5
+	add	r1, r1, r0, lsl #2	@ Get word offset
 	mov	r3, r2, lsl r3
 	mov	r3, r2, lsl r3
-1:	ldrexb	r2, [r1]
+1:	ldrex	r2, [r1]
 	\instr	r2, r2, r3
 	\instr	r2, r2, r3
-	strexb	r0, r2, [r1]
+	strex	r0, r2, [r1]
 	cmp	r0, #0
 	cmp	r0, #0
 	bne	1b
 	bne	1b
-	mov	pc, lr
+	bx	lr
 	.endm
 	.endm
 
 
 	.macro	testop, instr, store
 	.macro	testop, instr, store
-	and	r3, r0, #7		@ Get bit offset
+	ands	ip, r1, #3
+	strneb	r1, [ip]		@ assert word-aligned
 	mov	r2, #1
 	mov	r2, #1
-	add	r1, r1, r0, lsr #3	@ Get byte offset
+	and	r3, r0, #31		@ Get bit offset
+	mov	r0, r0, lsr #5
+	add	r1, r1, r0, lsl #2	@ Get word offset
 	mov	r3, r2, lsl r3		@ create mask
 	mov	r3, r2, lsl r3		@ create mask
 	smp_dmb
 	smp_dmb
-1:	ldrexb	r2, [r1]
+1:	ldrex	r2, [r1]
 	ands	r0, r2, r3		@ save old value of bit
 	ands	r0, r2, r3		@ save old value of bit
-	\instr	r2, r2, r3			@ toggle bit
-	strexb	ip, r2, [r1]
+	\instr	r2, r2, r3		@ toggle bit
+	strex	ip, r2, [r1]
 	cmp	ip, #0
 	cmp	ip, #0
 	bne	1b
 	bne	1b
 	smp_dmb
 	smp_dmb
 	cmp	r0, #0
 	cmp	r0, #0
 	movne	r0, #1
 	movne	r0, #1
-2:	mov	pc, lr
+2:	bx	lr
 	.endm
 	.endm
 #else
 #else
 	.macro	bitop, instr
 	.macro	bitop, instr
-	and	r2, r0, #7
+	ands	ip, r1, #3
+	strneb	r1, [ip]		@ assert word-aligned
+	and	r2, r0, #31
+	mov	r0, r0, lsr #5
 	mov	r3, #1
 	mov	r3, #1
 	mov	r3, r3, lsl r2
 	mov	r3, r3, lsl r2
 	save_and_disable_irqs ip
 	save_and_disable_irqs ip
-	ldrb	r2, [r1, r0, lsr #3]
+	ldr	r2, [r1, r0, lsl #2]
 	\instr	r2, r2, r3
 	\instr	r2, r2, r3
-	strb	r2, [r1, r0, lsr #3]
+	str	r2, [r1, r0, lsl #2]
 	restore_irqs ip
 	restore_irqs ip
 	mov	pc, lr
 	mov	pc, lr
 	.endm
 	.endm
@@ -52,11 +60,13 @@
  * to avoid dirtying the data cache.
  * to avoid dirtying the data cache.
  */
  */
 	.macro	testop, instr, store
 	.macro	testop, instr, store
-	add	r1, r1, r0, lsr #3
-	and	r3, r0, #7
-	mov	r0, #1
+	ands	ip, r1, #3
+	strneb	r1, [ip]		@ assert word-aligned
+	and	r3, r0, #31
+	mov	r0, r0, lsr #5
 	save_and_disable_irqs ip
 	save_and_disable_irqs ip
-	ldrb	r2, [r1]
+	ldr	r2, [r1, r0, lsl #2]!
+	mov	r0, #1
 	tst	r2, r0, lsl r3
 	tst	r2, r0, lsl r3
 	\instr	r2, r2, r0, lsl r3
 	\instr	r2, r2, r0, lsl r3
 	\store	r2, [r1]
 	\store	r2, [r1]

+ 2 - 8
arch/arm/lib/changebit.S

@@ -12,12 +12,6 @@
 #include "bitops.h"
 #include "bitops.h"
                 .text
                 .text
 
 
-/* Purpose  : Function to change a bit
- * Prototype: int change_bit(int bit, void *addr)
- */
-ENTRY(_change_bit_be)
-		eor	r0, r0, #0x18		@ big endian byte ordering
-ENTRY(_change_bit_le)
+ENTRY(_change_bit)
 	bitop	eor
 	bitop	eor
-ENDPROC(_change_bit_be)
-ENDPROC(_change_bit_le)
+ENDPROC(_change_bit)

+ 2 - 9
arch/arm/lib/clearbit.S

@@ -12,13 +12,6 @@
 #include "bitops.h"
 #include "bitops.h"
                 .text
                 .text
 
 
-/*
- * Purpose  : Function to clear a bit
- * Prototype: int clear_bit(int bit, void *addr)
- */
-ENTRY(_clear_bit_be)
-		eor	r0, r0, #0x18		@ big endian byte ordering
-ENTRY(_clear_bit_le)
+ENTRY(_clear_bit)
 	bitop	bic
 	bitop	bic
-ENDPROC(_clear_bit_be)
-ENDPROC(_clear_bit_le)
+ENDPROC(_clear_bit)

+ 2 - 9
arch/arm/lib/setbit.S

@@ -12,13 +12,6 @@
 #include "bitops.h"
 #include "bitops.h"
 		.text
 		.text
 
 
-/*
- * Purpose  : Function to set a bit
- * Prototype: int set_bit(int bit, void *addr)
- */
-ENTRY(_set_bit_be)
-		eor	r0, r0, #0x18		@ big endian byte ordering
-ENTRY(_set_bit_le)
+ENTRY(_set_bit)
 	bitop	orr
 	bitop	orr
-ENDPROC(_set_bit_be)
-ENDPROC(_set_bit_le)
+ENDPROC(_set_bit)

+ 3 - 6
arch/arm/lib/testchangebit.S

@@ -12,9 +12,6 @@
 #include "bitops.h"
 #include "bitops.h"
                 .text
                 .text
 
 
-ENTRY(_test_and_change_bit_be)
-		eor	r0, r0, #0x18		@ big endian byte ordering
-ENTRY(_test_and_change_bit_le)
-	testop	eor, strb
-ENDPROC(_test_and_change_bit_be)
-ENDPROC(_test_and_change_bit_le)
+ENTRY(_test_and_change_bit)
+	testop	eor, str
+ENDPROC(_test_and_change_bit)

+ 3 - 6
arch/arm/lib/testclearbit.S

@@ -12,9 +12,6 @@
 #include "bitops.h"
 #include "bitops.h"
                 .text
                 .text
 
 
-ENTRY(_test_and_clear_bit_be)
-		eor	r0, r0, #0x18		@ big endian byte ordering
-ENTRY(_test_and_clear_bit_le)
-	testop	bicne, strneb
-ENDPROC(_test_and_clear_bit_be)
-ENDPROC(_test_and_clear_bit_le)
+ENTRY(_test_and_clear_bit)
+	testop	bicne, strne
+ENDPROC(_test_and_clear_bit)

+ 3 - 6
arch/arm/lib/testsetbit.S

@@ -12,9 +12,6 @@
 #include "bitops.h"
 #include "bitops.h"
                 .text
                 .text
 
 
-ENTRY(_test_and_set_bit_be)
-		eor	r0, r0, #0x18		@ big endian byte ordering
-ENTRY(_test_and_set_bit_le)
-	testop	orreq, streqb
-ENDPROC(_test_and_set_bit_be)
-ENDPROC(_test_and_set_bit_le)
+ENTRY(_test_and_set_bit)
+	testop	orreq, streq
+ENDPROC(_test_and_set_bit)

+ 1 - 1
arch/arm/mach-aaec2000/include/mach/memory.h

@@ -12,6 +12,6 @@
 #define __ASM_ARCH_MEMORY_H
 #define __ASM_ARCH_MEMORY_H
 
 
 
 
-#define PHYS_OFFSET	UL(0xf0000000)
+#define PLAT_PHYS_OFFSET	UL(0xf0000000)
 
 
 #endif /* __ASM_ARCH_MEMORY_H */
 #endif /* __ASM_ARCH_MEMORY_H */

+ 1 - 0
arch/arm/mach-at91/board-snapper9260.c

@@ -153,6 +153,7 @@ static struct i2c_board_info __initdata snapper9260_i2c_devices[] = {
 	{
 	{
 		/* RTC */
 		/* RTC */
 		I2C_BOARD_INFO("isl1208", 0x6f),
 		I2C_BOARD_INFO("isl1208", 0x6f),
+		.irq = gpio_to_irq(AT91_PIN_PA31),
 	},
 	},
 };
 };
 
 

+ 2 - 9
arch/arm/mach-at91/include/mach/gpio.h

@@ -220,15 +220,8 @@ extern void at91_gpio_resume(void);
 #define gpio_set_value	__gpio_set_value
 #define gpio_set_value	__gpio_set_value
 #define gpio_cansleep	__gpio_cansleep
 #define gpio_cansleep	__gpio_cansleep
 
 
-static inline int gpio_to_irq(unsigned gpio)
-{
-	return gpio;
-}
-
-static inline int irq_to_gpio(unsigned irq)
-{
-	return irq;
-}
+#define gpio_to_irq(gpio) (gpio)
+#define irq_to_gpio(irq)  (irq)
 
 
 #endif	/* __ASSEMBLY__ */
 #endif	/* __ASSEMBLY__ */
 
 

+ 1 - 1
arch/arm/mach-at91/include/mach/memory.h

@@ -23,6 +23,6 @@
 
 
 #include <mach/hardware.h>
 #include <mach/hardware.h>
 
 
-#define PHYS_OFFSET	(AT91_SDRAM_BASE)
+#define PLAT_PHYS_OFFSET	(AT91_SDRAM_BASE)
 
 
 #endif
 #endif

+ 1 - 1
arch/arm/mach-bcmring/include/mach/hardware.h

@@ -31,7 +31,7 @@
  *  *_SIZE  is the size of the region
  *  *_SIZE  is the size of the region
  *  *_BASE  is the virtual address
  *  *_BASE  is the virtual address
  */
  */
-#define RAM_START               PHYS_OFFSET
+#define RAM_START               PLAT_PHYS_OFFSET
 
 
 #define RAM_SIZE                (CFG_GLOBAL_RAM_SIZE-CFG_GLOBAL_RAM_SIZE_RESERVED)
 #define RAM_SIZE                (CFG_GLOBAL_RAM_SIZE-CFG_GLOBAL_RAM_SIZE_RESERVED)
 #define RAM_BASE                PAGE_OFFSET
 #define RAM_BASE                PAGE_OFFSET

+ 1 - 1
arch/arm/mach-bcmring/include/mach/memory.h

@@ -23,7 +23,7 @@
  * files.  Use virt_to_phys/phys_to_virt/__pa/__va instead.
  * files.  Use virt_to_phys/phys_to_virt/__pa/__va instead.
  */
  */
 
 
-#define PHYS_OFFSET CFG_GLOBAL_RAM_BASE
+#define PLAT_PHYS_OFFSET CFG_GLOBAL_RAM_BASE
 
 
 /*
 /*
  * Maximum DMA memory allowed is 14M
  * Maximum DMA memory allowed is 14M

+ 1 - 1
arch/arm/mach-clps711x/include/mach/memory.h

@@ -23,7 +23,7 @@
 /*
 /*
  * Physical DRAM offset.
  * Physical DRAM offset.
  */
  */
-#define PHYS_OFFSET	UL(0xc0000000)
+#define PLAT_PHYS_OFFSET	UL(0xc0000000)
 
 
 #if !defined(CONFIG_ARCH_CDB89712) && !defined (CONFIG_ARCH_AUTCPU12)
 #if !defined(CONFIG_ARCH_CDB89712) && !defined (CONFIG_ARCH_AUTCPU12)
 
 

+ 1 - 1
arch/arm/mach-cns3xxx/include/mach/memory.h

@@ -13,7 +13,7 @@
 /*
 /*
  * Physical DRAM offset.
  * Physical DRAM offset.
  */
  */
-#define PHYS_OFFSET		UL(0x00000000)
+#define PLAT_PHYS_OFFSET		UL(0x00000000)
 
 
 #define __phys_to_bus(x)	((x) + PHYS_OFFSET)
 #define __phys_to_bus(x)	((x) + PHYS_OFFSET)
 #define __bus_to_phys(x)	((x) - PHYS_OFFSET)
 #define __bus_to_phys(x)	((x) - PHYS_OFFSET)

+ 2 - 2
arch/arm/mach-davinci/include/mach/memory.h

@@ -26,9 +26,9 @@
 #if defined(CONFIG_ARCH_DAVINCI_DA8XX) && defined(CONFIG_ARCH_DAVINCI_DMx)
 #if defined(CONFIG_ARCH_DAVINCI_DA8XX) && defined(CONFIG_ARCH_DAVINCI_DMx)
 #error Cannot enable DaVinci and DA8XX platforms concurrently
 #error Cannot enable DaVinci and DA8XX platforms concurrently
 #elif defined(CONFIG_ARCH_DAVINCI_DA8XX)
 #elif defined(CONFIG_ARCH_DAVINCI_DA8XX)
-#define PHYS_OFFSET DA8XX_DDR_BASE
+#define PLAT_PHYS_OFFSET DA8XX_DDR_BASE
 #else
 #else
-#define PHYS_OFFSET DAVINCI_DDR_BASE
+#define PLAT_PHYS_OFFSET DAVINCI_DDR_BASE
 #endif
 #endif
 
 
 #define DDR2_SDRCR_OFFSET	0xc
 #define DDR2_SDRCR_OFFSET	0xc

+ 1 - 1
arch/arm/mach-dove/Kconfig

@@ -9,7 +9,7 @@ config MACH_DOVE_DB
 	  Say 'Y' here if you want your kernel to support the
 	  Say 'Y' here if you want your kernel to support the
 	  Marvell DB-MV88AP510 Development Board.
 	  Marvell DB-MV88AP510 Development Board.
 
 
- config MACH_CM_A510
+config MACH_CM_A510
 	bool "CompuLab CM-A510 Board"
 	bool "CompuLab CM-A510 Board"
 	help
 	help
 	  Say 'Y' here if you want your kernel to support the
 	  Say 'Y' here if you want your kernel to support the

+ 1 - 1
arch/arm/mach-dove/include/mach/memory.h

@@ -5,6 +5,6 @@
 #ifndef __ASM_ARCH_MEMORY_H
 #ifndef __ASM_ARCH_MEMORY_H
 #define __ASM_ARCH_MEMORY_H
 #define __ASM_ARCH_MEMORY_H
 
 
-#define PHYS_OFFSET		UL(0x00000000)
+#define PLAT_PHYS_OFFSET		UL(0x00000000)
 
 
 #endif
 #endif

+ 1 - 1
arch/arm/mach-ebsa110/include/mach/memory.h

@@ -19,7 +19,7 @@
 /*
 /*
  * Physical DRAM offset.
  * Physical DRAM offset.
  */
  */
-#define PHYS_OFFSET	UL(0x00000000)
+#define PLAT_PHYS_OFFSET	UL(0x00000000)
 
 
 /*
 /*
  * Cache flushing area - SRAM
  * Cache flushing area - SRAM

+ 116 - 0
arch/arm/mach-ep93xx/edb93xx.c

@@ -30,8 +30,13 @@
 #include <linux/gpio.h>
 #include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <linux/i2c.h>
 #include <linux/i2c-gpio.h>
 #include <linux/i2c-gpio.h>
+#include <linux/spi/spi.h>
+
+#include <sound/cs4271.h>
 
 
 #include <mach/hardware.h>
 #include <mach/hardware.h>
+#include <mach/fb.h>
+#include <mach/ep93xx_spi.h>
 
 
 #include <asm/mach-types.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/arch.h>
@@ -92,6 +97,83 @@ static void __init edb93xx_register_i2c(void)
 }
 }
 
 
 
 
+/*************************************************************************
+ * EDB93xx SPI peripheral handling
+ *************************************************************************/
+static struct cs4271_platform_data edb93xx_cs4271_data = {
+	.gpio_nreset	= -EINVAL,	/* filled in later */
+};
+
+static int edb93xx_cs4271_hw_setup(struct spi_device *spi)
+{
+	return gpio_request_one(EP93XX_GPIO_LINE_EGPIO6,
+				GPIOF_OUT_INIT_HIGH, spi->modalias);
+}
+
+static void edb93xx_cs4271_hw_cleanup(struct spi_device *spi)
+{
+	gpio_free(EP93XX_GPIO_LINE_EGPIO6);
+}
+
+static void edb93xx_cs4271_hw_cs_control(struct spi_device *spi, int value)
+{
+	gpio_set_value(EP93XX_GPIO_LINE_EGPIO6, value);
+}
+
+static struct ep93xx_spi_chip_ops edb93xx_cs4271_hw = {
+	.setup		= edb93xx_cs4271_hw_setup,
+	.cleanup	= edb93xx_cs4271_hw_cleanup,
+	.cs_control	= edb93xx_cs4271_hw_cs_control,
+};
+
+static struct spi_board_info edb93xx_spi_board_info[] __initdata = {
+	{
+		.modalias		= "cs4271",
+		.platform_data		= &edb93xx_cs4271_data,
+		.controller_data	= &edb93xx_cs4271_hw,
+		.max_speed_hz		= 6000000,
+		.bus_num		= 0,
+		.chip_select		= 0,
+		.mode			= SPI_MODE_3,
+	},
+};
+
+static struct ep93xx_spi_info edb93xx_spi_info __initdata = {
+	.num_chipselect	= ARRAY_SIZE(edb93xx_spi_board_info),
+};
+
+static void __init edb93xx_register_spi(void)
+{
+	if (machine_is_edb9301() || machine_is_edb9302())
+		edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_EGPIO1;
+	else if (machine_is_edb9302a() || machine_is_edb9307a())
+		edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_H(2);
+	else if (machine_is_edb9315a())
+		edb93xx_cs4271_data.gpio_nreset = EP93XX_GPIO_LINE_EGPIO14;
+
+	ep93xx_register_spi(&edb93xx_spi_info, edb93xx_spi_board_info,
+			    ARRAY_SIZE(edb93xx_spi_board_info));
+}
+
+
+/*************************************************************************
+ * EDB93xx I2S
+ *************************************************************************/
+static int __init edb93xx_has_audio(void)
+{
+	return (machine_is_edb9301() || machine_is_edb9302() ||
+		machine_is_edb9302a() || machine_is_edb9307a() ||
+		machine_is_edb9315a());
+}
+
+static void __init edb93xx_register_i2s(void)
+{
+	if (edb93xx_has_audio()) {
+		ep93xx_register_i2s();
+	}
+}
+
+
 /*************************************************************************
 /*************************************************************************
  * EDB93xx pwm
  * EDB93xx pwm
  *************************************************************************/
  *************************************************************************/
@@ -111,13 +193,47 @@ static void __init edb93xx_register_pwm(void)
 }
 }
 
 
 
 
+/*************************************************************************
+ * EDB93xx framebuffer
+ *************************************************************************/
+static struct ep93xxfb_mach_info __initdata edb93xxfb_info = {
+	.num_modes	= EP93XXFB_USE_MODEDB,
+	.bpp		= 16,
+	.flags		= 0,
+};
+
+static int __init edb93xx_has_fb(void)
+{
+	/* These platforms have an ep93xx with video capability */
+	return machine_is_edb9307() || machine_is_edb9307a() ||
+	       machine_is_edb9312() || machine_is_edb9315() ||
+	       machine_is_edb9315a();
+}
+
+static void __init edb93xx_register_fb(void)
+{
+	if (!edb93xx_has_fb())
+		return;
+
+	if (machine_is_edb9307a() || machine_is_edb9315a())
+		edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN0;
+	else
+		edb93xxfb_info.flags |= EP93XXFB_USE_SDCSN3;
+
+	ep93xx_register_fb(&edb93xxfb_info);
+}
+
+
 static void __init edb93xx_init_machine(void)
 static void __init edb93xx_init_machine(void)
 {
 {
 	ep93xx_init_devices();
 	ep93xx_init_devices();
 	edb93xx_register_flash();
 	edb93xx_register_flash();
 	ep93xx_register_eth(&edb93xx_eth_data, 1);
 	ep93xx_register_eth(&edb93xx_eth_data, 1);
 	edb93xx_register_i2c();
 	edb93xx_register_i2c();
+	edb93xx_register_spi();
+	edb93xx_register_i2s();
 	edb93xx_register_pwm();
 	edb93xx_register_pwm();
+	edb93xx_register_fb();
 }
 }
 
 
 
 

+ 29 - 4
arch/arm/mach-ep93xx/gpio.c

@@ -61,7 +61,7 @@ static inline void ep93xx_gpio_int_mask(unsigned line)
 	gpio_int_unmasked[line >> 3] &= ~(1 << (line & 7));
 	gpio_int_unmasked[line >> 3] &= ~(1 << (line & 7));
 }
 }
 
 
-void ep93xx_gpio_int_debounce(unsigned int irq, int enable)
+static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable)
 {
 {
 	int line = irq_to_gpio(irq);
 	int line = irq_to_gpio(irq);
 	int port = line >> 3;
 	int port = line >> 3;
@@ -75,7 +75,6 @@ void ep93xx_gpio_int_debounce(unsigned int irq, int enable)
 	__raw_writeb(gpio_int_debounce[port],
 	__raw_writeb(gpio_int_debounce[port],
 		EP93XX_GPIO_REG(int_debounce_register_offset[port]));
 		EP93XX_GPIO_REG(int_debounce_register_offset[port]));
 }
 }
-EXPORT_SYMBOL(ep93xx_gpio_int_debounce);
 
 
 static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc)
 static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
 {
@@ -335,6 +334,20 @@ static void ep93xx_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
 	local_irq_restore(flags);
 	local_irq_restore(flags);
 }
 }
 
 
+static int ep93xx_gpio_set_debounce(struct gpio_chip *chip,
+				    unsigned offset, unsigned debounce)
+{
+	int gpio = chip->base + offset;
+	int irq = gpio_to_irq(gpio);
+
+	if (irq < 0)
+		return -EINVAL;
+
+	ep93xx_gpio_int_debounce(irq, debounce ? true : false);
+
+	return 0;
+}
+
 static void ep93xx_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
 static void ep93xx_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
 {
 {
 	struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip);
 	struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip);
@@ -434,6 +447,18 @@ void __init ep93xx_gpio_init(void)
 				 EP93XX_SYSCON_DEVCFG_GONIDE |
 				 EP93XX_SYSCON_DEVCFG_GONIDE |
 				 EP93XX_SYSCON_DEVCFG_HONIDE);
 				 EP93XX_SYSCON_DEVCFG_HONIDE);
 
 
-	for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++)
-		gpiochip_add(&ep93xx_gpio_banks[i].chip);
+	for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
+		struct gpio_chip *chip = &ep93xx_gpio_banks[i].chip;
+
+		/*
+		 * Ports A, B, and F support input debouncing when
+		 * used as interrupts.
+		 */
+		if (!strcmp(chip->label, "A") ||
+		    !strcmp(chip->label, "B") ||
+		    !strcmp(chip->label, "F"))
+			chip->set_debounce = ep93xx_gpio_set_debounce;
+
+		gpiochip_add(chip);
+	}
 }
 }

+ 0 - 2
arch/arm/mach-ep93xx/include/mach/gpio.h

@@ -99,8 +99,6 @@
 /* maximum value for irq capable line identifiers */
 /* maximum value for irq capable line identifiers */
 #define EP93XX_GPIO_LINE_MAX_IRQ	EP93XX_GPIO_LINE_F(7)
 #define EP93XX_GPIO_LINE_MAX_IRQ	EP93XX_GPIO_LINE_F(7)
 
 
-extern void ep93xx_gpio_int_debounce(unsigned int irq, int enable);
-
 /* new generic GPIO API - see Documentation/gpio.txt */
 /* new generic GPIO API - see Documentation/gpio.txt */
 
 
 #include <asm-generic/gpio.h>
 #include <asm-generic/gpio.h>

+ 5 - 5
arch/arm/mach-ep93xx/include/mach/memory.h

@@ -6,15 +6,15 @@
 #define __ASM_ARCH_MEMORY_H
 #define __ASM_ARCH_MEMORY_H
 
 
 #if defined(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET)
 #if defined(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET)
-#define PHYS_OFFSET		UL(0x00000000)
+#define PLAT_PHYS_OFFSET		UL(0x00000000)
 #elif defined(CONFIG_EP93XX_SDCE0_PHYS_OFFSET)
 #elif defined(CONFIG_EP93XX_SDCE0_PHYS_OFFSET)
-#define PHYS_OFFSET		UL(0xc0000000)
+#define PLAT_PHYS_OFFSET		UL(0xc0000000)
 #elif defined(CONFIG_EP93XX_SDCE1_PHYS_OFFSET)
 #elif defined(CONFIG_EP93XX_SDCE1_PHYS_OFFSET)
-#define PHYS_OFFSET		UL(0xd0000000)
+#define PLAT_PHYS_OFFSET		UL(0xd0000000)
 #elif defined(CONFIG_EP93XX_SDCE2_PHYS_OFFSET)
 #elif defined(CONFIG_EP93XX_SDCE2_PHYS_OFFSET)
-#define PHYS_OFFSET		UL(0xe0000000)
+#define PLAT_PHYS_OFFSET		UL(0xe0000000)
 #elif defined(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET)
 #elif defined(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET)
-#define PHYS_OFFSET		UL(0xf0000000)
+#define PLAT_PHYS_OFFSET		UL(0xf0000000)
 #else
 #else
 #error "Kconfig bug: No EP93xx PHYS_OFFSET set"
 #error "Kconfig bug: No EP93xx PHYS_OFFSET set"
 #endif
 #endif

+ 66 - 18
arch/arm/mach-footbridge/dc21285-timer.c

@@ -4,10 +4,11 @@
  *  Copyright (C) 1998 Russell King.
  *  Copyright (C) 1998 Russell King.
  *  Copyright (C) 1998 Phil Blundell
  *  Copyright (C) 1998 Phil Blundell
  */
  */
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/irq.h>
-#include <linux/spinlock.h>
 
 
 #include <asm/irq.h>
 #include <asm/irq.h>
 
 
@@ -16,32 +17,76 @@
 
 
 #include "common.h"
 #include "common.h"
 
 
-/*
- * Footbridge timer 1 support.
- */
-static unsigned long timer1_latch;
+static cycle_t cksrc_dc21285_read(struct clocksource *cs)
+{
+	return cs->mask - *CSR_TIMER2_VALUE;
+}
 
 
-static unsigned long timer1_gettimeoffset (void)
+static int cksrc_dc21285_enable(struct clocksource *cs)
 {
 {
-	unsigned long value = timer1_latch - *CSR_TIMER1_VALUE;
+	*CSR_TIMER2_LOAD = cs->mask;
+	*CSR_TIMER2_CLR = 0;
+	*CSR_TIMER2_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_DIV16;
+	return 0;
+}
 
 
-	return ((tick_nsec / 1000) * value) / timer1_latch;
+static int cksrc_dc21285_disable(struct clocksource *cs)
+{
+	*CSR_TIMER2_CNTL = 0;
 }
 }
 
 
-static irqreturn_t
-timer1_interrupt(int irq, void *dev_id)
+static struct clocksource cksrc_dc21285 = {
+	.name		= "dc21285_timer2",
+	.rating		= 200,
+	.read		= cksrc_dc21285_read,
+	.enable		= cksrc_dc21285_enable,
+	.disable	= cksrc_dc21285_disable,
+	.mask		= CLOCKSOURCE_MASK(24),
+	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void ckevt_dc21285_set_mode(enum clock_event_mode mode,
+	struct clock_event_device *c)
 {
 {
+	switch (mode) {
+	case CLOCK_EVT_MODE_RESUME:
+	case CLOCK_EVT_MODE_PERIODIC:
+		*CSR_TIMER1_CLR = 0;
+		*CSR_TIMER1_LOAD = (mem_fclk_21285 + 8 * HZ) / (16 * HZ);
+		*CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD |
+				   TIMER_CNTL_DIV16;
+		break;
+
+	default:
+		*CSR_TIMER1_CNTL = 0;
+		break;
+	}
+}
+
+static struct clock_event_device ckevt_dc21285 = {
+	.name		= "dc21285_timer1",
+	.features	= CLOCK_EVT_FEAT_PERIODIC,
+	.rating		= 200,
+	.irq		= IRQ_TIMER1,
+	.set_mode	= ckevt_dc21285_set_mode,
+};
+
+static irqreturn_t timer1_interrupt(int irq, void *dev_id)
+{
+	struct clock_event_device *ce = dev_id;
+
 	*CSR_TIMER1_CLR = 0;
 	*CSR_TIMER1_CLR = 0;
 
 
-	timer_tick();
+	ce->event_handler(ce);
 
 
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
 
 
 static struct irqaction footbridge_timer_irq = {
 static struct irqaction footbridge_timer_irq = {
-	.name		= "Timer1 timer tick",
+	.name		= "dc21285_timer1",
 	.handler	= timer1_interrupt,
 	.handler	= timer1_interrupt,
 	.flags		= IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
 	.flags		= IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+	.dev_id		= &ckevt_dc21285,
 };
 };
 
 
 /*
 /*
@@ -49,16 +94,19 @@ static struct irqaction footbridge_timer_irq = {
  */
  */
 static void __init footbridge_timer_init(void)
 static void __init footbridge_timer_init(void)
 {
 {
-	timer1_latch = (mem_fclk_21285 + 8 * HZ) / (16 * HZ);
+	struct clock_event_device *ce = &ckevt_dc21285;
+
+	clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16);
+
+	setup_irq(ce->irq, &footbridge_timer_irq);
 
 
-	*CSR_TIMER1_CLR  = 0;
-	*CSR_TIMER1_LOAD = timer1_latch;
-	*CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD | TIMER_CNTL_DIV16;
+	clockevents_calc_mult_shift(ce, mem_fclk_21285, 5);
+	ce->max_delta_ns = clockevent_delta2ns(0xffffff, ce);
+	ce->min_delta_ns = clockevent_delta2ns(0x000004, ce);
 
 
-	setup_irq(IRQ_TIMER1, &footbridge_timer_irq);
+	clockevents_register_device(ce);
 }
 }
 
 
 struct sys_timer footbridge_timer = {
 struct sys_timer footbridge_timer = {
 	.init		= footbridge_timer_init,
 	.init		= footbridge_timer_init,
-	.offset		= timer1_gettimeoffset,
 };
 };

+ 14 - 7
arch/arm/mach-footbridge/include/mach/hardware.h

@@ -23,26 +23,33 @@
  * 0xf9000000	0x50000000	1MB	Cache flush
  * 0xf9000000	0x50000000	1MB	Cache flush
  * 0xf0000000	0x80000000	16MB	ISA memory
  * 0xf0000000	0x80000000	16MB	ISA memory
  */
  */
+
+#ifdef CONFIG_MMU
+#define MMU_IO(a, b)	(a)
+#else
+#define MMU_IO(a, b)	(b)
+#endif
+
 #define XBUS_SIZE		0x00100000
 #define XBUS_SIZE		0x00100000
-#define XBUS_BASE		0xff800000
+#define XBUS_BASE		MMU_IO(0xff800000, 0x40000000)
 
 
 #define ARMCSR_SIZE		0x00100000
 #define ARMCSR_SIZE		0x00100000
-#define ARMCSR_BASE		0xfe000000
+#define ARMCSR_BASE		MMU_IO(0xfe000000, 0x42000000)
 
 
 #define WFLUSH_SIZE		0x00100000
 #define WFLUSH_SIZE		0x00100000
-#define WFLUSH_BASE		0xfd000000
+#define WFLUSH_BASE		MMU_IO(0xfd000000, 0x78000000)
 
 
 #define PCIIACK_SIZE		0x00100000
 #define PCIIACK_SIZE		0x00100000
-#define PCIIACK_BASE		0xfc000000
+#define PCIIACK_BASE		MMU_IO(0xfc000000, 0x79000000)
 
 
 #define PCICFG1_SIZE		0x01000000
 #define PCICFG1_SIZE		0x01000000
-#define PCICFG1_BASE		0xfb000000
+#define PCICFG1_BASE		MMU_IO(0xfb000000, 0x7a000000)
 
 
 #define PCICFG0_SIZE		0x01000000
 #define PCICFG0_SIZE		0x01000000
-#define PCICFG0_BASE		0xfa000000
+#define PCICFG0_BASE		MMU_IO(0xfa000000, 0x7b000000)
 
 
 #define PCIMEM_SIZE		0x01000000
 #define PCIMEM_SIZE		0x01000000
-#define PCIMEM_BASE		0xf0000000
+#define PCIMEM_BASE		MMU_IO(0xf0000000, 0x80000000)
 
 
 #define XBUS_LEDS		((volatile unsigned char *)(XBUS_BASE + 0x12000))
 #define XBUS_LEDS		((volatile unsigned char *)(XBUS_BASE + 0x12000))
 #define XBUS_LED_AMBER		(1 << 0)
 #define XBUS_LED_AMBER		(1 << 0)

+ 8 - 2
arch/arm/mach-footbridge/include/mach/io.h

@@ -14,8 +14,14 @@
 #ifndef __ASM_ARM_ARCH_IO_H
 #ifndef __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 #define __ASM_ARM_ARCH_IO_H
 
 
-#define PCIO_SIZE		0x00100000
-#define PCIO_BASE		0xff000000
+#ifdef CONFIG_MMU
+#define MMU_IO(a, b)	(a)
+#else
+#define MMU_IO(a, b)	(b)
+#endif
+
+#define PCIO_SIZE       0x00100000
+#define PCIO_BASE       MMU_IO(0xff000000, 0x7c000000)
 
 
 #define IO_SPACE_LIMIT 0xffff
 #define IO_SPACE_LIMIT 0xffff
 
 

+ 1 - 1
arch/arm/mach-footbridge/include/mach/memory.h

@@ -62,7 +62,7 @@ extern unsigned long __bus_to_pfn(unsigned long);
 /*
 /*
  * Physical DRAM offset.
  * Physical DRAM offset.
  */
  */
-#define PHYS_OFFSET		UL(0x00000000)
+#define PLAT_PHYS_OFFSET		UL(0x00000000)
 
 
 #define FLUSH_BASE_PHYS		0x50000000
 #define FLUSH_BASE_PHYS		0x50000000
 
 

+ 85 - 44
arch/arm/mach-footbridge/isa-timer.c

@@ -4,10 +4,13 @@
  *  Copyright (C) 1998 Russell King.
  *  Copyright (C) 1998 Russell King.
  *  Copyright (C) 1998 Phil Blundell
  *  Copyright (C) 1998 Phil Blundell
  */
  */
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/irq.h>
 #include <linux/io.h>
 #include <linux/io.h>
+#include <linux/timex.h>
 
 
 #include <asm/irq.h>
 #include <asm/irq.h>
 
 
@@ -15,77 +18,115 @@
 
 
 #include "common.h"
 #include "common.h"
 
 
-/*
- * ISA timer tick support
- */
-#define mSEC_10_from_14 ((14318180 + 100) / 200)
+#define PIT_MODE	0x43
+#define PIT_CH0		0x40
+
+#define PIT_LATCH	((PIT_TICK_RATE + HZ / 2) / HZ)
 
 
-static unsigned long isa_gettimeoffset(void)
+static cycle_t pit_read(struct clocksource *cs)
 {
 {
+	unsigned long flags;
+	static int old_count;
+	static u32 old_jifs;
 	int count;
 	int count;
+	u32 jifs;
 
 
-	static int count_p = (mSEC_10_from_14/6);    /* for the first call after boot */
-	static unsigned long jiffies_p = 0;
+	raw_local_irq_save(flags);
 
 
-	/*
-	 * cache volatile jiffies temporarily; we have IRQs turned off. 
-	 */
-	unsigned long jiffies_t;
+	jifs = jiffies;
+	outb_p(0x00, PIT_MODE);		/* latch the count */
+	count = inb_p(PIT_CH0);		/* read the latched count */
+	count |= inb_p(PIT_CH0) << 8;
 
 
-	/* timer count may underflow right here */
-	outb_p(0x00, 0x43);	/* latch the count ASAP */
+	if (count > old_count && jifs == old_jifs)
+		count = old_count;
 
 
-	count = inb_p(0x40);	/* read the latched count */
+	old_count = count;
+	old_jifs = jifs;
 
 
-	/*
-	 * We do this guaranteed double memory access instead of a _p 
-	 * postfix in the previous port access. Wheee, hackady hack
-	 */
- 	jiffies_t = jiffies;
+	raw_local_irq_restore(flags);
 
 
-	count |= inb_p(0x40) << 8;
+	count = (PIT_LATCH - 1) - count;
 
 
-	/* Detect timer underflows.  If we haven't had a timer tick since 
-	   the last time we were called, and time is apparently going
-	   backwards, the counter must have wrapped during this routine. */
-	if ((jiffies_t == jiffies_p) && (count > count_p))
-		count -= (mSEC_10_from_14/6);
-	else
-		jiffies_p = jiffies_t;
+	return (cycle_t)(jifs * PIT_LATCH) + count;
+}
 
 
-	count_p = count;
+static struct clocksource pit_cs = {
+	.name		= "pit",
+	.rating		= 110,
+	.read		= pit_read,
+	.mask		= CLOCKSOURCE_MASK(32),
+};
 
 
-	count = (((mSEC_10_from_14/6)-1) - count) * (tick_nsec / 1000);
-	count = (count + (mSEC_10_from_14/6)/2) / (mSEC_10_from_14/6);
+static void pit_set_mode(enum clock_event_mode mode,
+	struct clock_event_device *evt)
+{
+	unsigned long flags;
+
+	raw_local_irq_save(flags);
+
+	switch (mode) {
+	case CLOCK_EVT_MODE_PERIODIC:
+		outb_p(0x34, PIT_MODE);
+		outb_p(PIT_LATCH & 0xff, PIT_CH0);
+		outb_p(PIT_LATCH >> 8, PIT_CH0);
+		break;
+
+	case CLOCK_EVT_MODE_SHUTDOWN:
+	case CLOCK_EVT_MODE_UNUSED:
+		outb_p(0x30, PIT_MODE);
+		outb_p(0, PIT_CH0);
+		outb_p(0, PIT_CH0);
+		break;
+
+	case CLOCK_EVT_MODE_ONESHOT:
+	case CLOCK_EVT_MODE_RESUME:
+		break;
+	}
+	local_irq_restore(flags);
+}
 
 
-	return count;
+static int pit_set_next_event(unsigned long delta,
+	struct clock_event_device *evt)
+{
+	return 0;
 }
 }
 
 
-static irqreturn_t
-isa_timer_interrupt(int irq, void *dev_id)
+static struct clock_event_device pit_ce = {
+	.name		= "pit",
+	.features	= CLOCK_EVT_FEAT_PERIODIC,
+	.set_mode	= pit_set_mode,
+	.set_next_event	= pit_set_next_event,
+	.shift		= 32,
+};
+
+static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
 {
 {
-	timer_tick();
+	struct clock_event_device *ce = dev_id;
+	ce->event_handler(ce);
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
 
 
-static struct irqaction isa_timer_irq = {
-	.name		= "ISA timer tick",
-	.handler	= isa_timer_interrupt,
+static struct irqaction pit_timer_irq = {
+	.name		= "pit",
+	.handler	= pit_timer_interrupt,
 	.flags		= IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
 	.flags		= IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+	.dev_id		= &pit_ce,
 };
 };
 
 
 static void __init isa_timer_init(void)
 static void __init isa_timer_init(void)
 {
 {
-	/* enable PIT timer */
-	/* set for periodic (4) and LSB/MSB write (0x30) */
-	outb(0x34, 0x43);
-	outb((mSEC_10_from_14/6) & 0xFF, 0x40);
-	outb((mSEC_10_from_14/6) >> 8, 0x40);
+	pit_ce.cpumask = cpumask_of(smp_processor_id());
+	pit_ce.mult = div_sc(PIT_TICK_RATE, NSEC_PER_SEC, pit_ce.shift);
+	pit_ce.max_delta_ns = clockevent_delta2ns(0x7fff, &pit_ce);
+	pit_ce.min_delta_ns = clockevent_delta2ns(0x000f, &pit_ce);
+
+	clocksource_register_hz(&pit_cs, PIT_TICK_RATE);
 
 
-	setup_irq(IRQ_ISA_TIMER, &isa_timer_irq);
+	setup_irq(pit_ce.irq, &pit_timer_irq);
+	clockevents_register_device(&pit_ce);
 }
 }
 
 
 struct sys_timer isa_timer = {
 struct sys_timer isa_timer = {
 	.init		= isa_timer_init,
 	.init		= isa_timer_init,
-	.offset		= isa_gettimeoffset,
 };
 };

+ 1 - 0
arch/arm/mach-gemini/board-nas4220b.c

@@ -98,6 +98,7 @@ static void __init ib4220b_init(void)
 	platform_register_pflash(SZ_16M, NULL, 0);
 	platform_register_pflash(SZ_16M, NULL, 0);
 	platform_device_register(&ib4220b_led_device);
 	platform_device_register(&ib4220b_led_device);
 	platform_device_register(&ib4220b_key_device);
 	platform_device_register(&ib4220b_key_device);
+	platform_register_rtc();
 }
 }
 
 
 MACHINE_START(NAS4220B, "Raidsonic NAS IB-4220-B")
 MACHINE_START(NAS4220B, "Raidsonic NAS IB-4220-B")

+ 1 - 0
arch/arm/mach-gemini/board-rut1xx.c

@@ -82,6 +82,7 @@ static void __init rut1xx_init(void)
 	platform_register_pflash(SZ_8M, NULL, 0);
 	platform_register_pflash(SZ_8M, NULL, 0);
 	platform_device_register(&rut1xx_leds);
 	platform_device_register(&rut1xx_leds);
 	platform_device_register(&rut1xx_keys_device);
 	platform_device_register(&rut1xx_keys_device);
+	platform_register_rtc();
 }
 }
 
 
 MACHINE_START(RUT100, "Teltonika RUT100")
 MACHINE_START(RUT100, "Teltonika RUT100")

+ 1 - 0
arch/arm/mach-gemini/board-wbd111.c

@@ -130,6 +130,7 @@ static void __init wbd111_init(void)
 				 wbd111_num_partitions);
 				 wbd111_num_partitions);
 	platform_device_register(&wbd111_leds_device);
 	platform_device_register(&wbd111_leds_device);
 	platform_device_register(&wbd111_keys_device);
 	platform_device_register(&wbd111_keys_device);
+	platform_register_rtc();
 }
 }
 
 
 MACHINE_START(WBD111, "Wiliboard WBD-111")
 MACHINE_START(WBD111, "Wiliboard WBD-111")

+ 1 - 0
arch/arm/mach-gemini/board-wbd222.c

@@ -130,6 +130,7 @@ static void __init wbd222_init(void)
 		wbd222_num_partitions);
 		wbd222_num_partitions);
 	platform_device_register(&wbd222_leds_device);
 	platform_device_register(&wbd222_leds_device);
 	platform_device_register(&wbd222_keys_device);
 	platform_device_register(&wbd222_keys_device);
+	platform_register_rtc();
 }
 }
 
 
 MACHINE_START(WBD222, "Wiliboard WBD-222")
 MACHINE_START(WBD222, "Wiliboard WBD-222")

+ 1 - 0
arch/arm/mach-gemini/common.h

@@ -18,6 +18,7 @@ extern void gemini_map_io(void);
 extern void gemini_init_irq(void);
 extern void gemini_init_irq(void);
 extern void gemini_timer_init(void);
 extern void gemini_timer_init(void);
 extern void gemini_gpio_init(void);
 extern void gemini_gpio_init(void);
+extern void platform_register_rtc(void);
 
 
 /* Common platform devices registration functions */
 /* Common platform devices registration functions */
 extern int platform_register_uart(void);
 extern int platform_register_uart(void);

+ 26 - 0
arch/arm/mach-gemini/devices.c

@@ -90,3 +90,29 @@ int platform_register_pflash(unsigned int size, struct mtd_partition *parts,
 
 
 	return platform_device_register(&pflash_device);
 	return platform_device_register(&pflash_device);
 }
 }
+
+static struct resource gemini_rtc_resources[] = {
+	[0] = {
+		.start  = GEMINI_RTC_BASE,
+		.end    = GEMINI_RTC_BASE + 0x24,
+		.flags  = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start  = IRQ_RTC,
+		.end    = IRQ_RTC,
+		.flags  = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device gemini_rtc_device = {
+	.name		= "rtc-gemini",
+	.id		= 0,
+	.num_resources	= ARRAY_SIZE(gemini_rtc_resources),
+	.resource	= gemini_rtc_resources,
+};
+
+int __init platform_register_rtc(void)
+{
+	return platform_device_register(&gemini_rtc_device);
+}
+

+ 2 - 2
arch/arm/mach-gemini/include/mach/memory.h

@@ -11,9 +11,9 @@
 #define __MACH_MEMORY_H
 #define __MACH_MEMORY_H
 
 
 #ifdef CONFIG_GEMINI_MEM_SWAP
 #ifdef CONFIG_GEMINI_MEM_SWAP
-# define PHYS_OFFSET	UL(0x00000000)
+# define PLAT_PHYS_OFFSET	UL(0x00000000)
 #else
 #else
-# define PHYS_OFFSET	UL(0x10000000)
+# define PLAT_PHYS_OFFSET	UL(0x10000000)
 #endif
 #endif
 
 
 #endif /* __MACH_MEMORY_H */
 #endif /* __MACH_MEMORY_H */

+ 1 - 1
arch/arm/mach-h720x/include/mach/memory.h

@@ -7,7 +7,7 @@
 #ifndef __ASM_ARCH_MEMORY_H
 #ifndef __ASM_ARCH_MEMORY_H
 #define __ASM_ARCH_MEMORY_H
 #define __ASM_ARCH_MEMORY_H
 
 
-#define PHYS_OFFSET	UL(0x40000000)
+#define PLAT_PHYS_OFFSET	UL(0x40000000)
 /*
 /*
  * This is the maximum DMA address that can be DMAd to.
  * This is the maximum DMA address that can be DMAd to.
  * There should not be more than (0xd0000000 - 0xc0000000)
  * There should not be more than (0xd0000000 - 0xc0000000)

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно