瀏覽代碼

Merge git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300

* git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300: (44 commits)
  MN10300: Save frame pointer in thread_info struct rather than global var
  MN10300: Change "Matsushita" to "Panasonic".
  MN10300: Create a defconfig for the ASB2364 board
  MN10300: Update the ASB2303 defconfig
  MN10300: ASB2364: Add support for SMSC911X and SMC911X
  MN10300: ASB2364: Handle the IRQ multiplexer in the FPGA
  MN10300: Generic time support
  MN10300: Specify an ELF HWCAP flag for MN10300 Atomic Operations Unit support
  MN10300: Map userspace atomic op regs as a vmalloc page
  MN10300: And Panasonic AM34 subarch and implement SMP
  MN10300: Delete idle_timestamp from irq_cpustat_t
  MN10300: Make various interrupt priority settings configurable
  MN10300: Optimise do_csum()
  MN10300: Implement atomic ops using atomic ops unit
  MN10300: Make the FPU operate in non-lazy mode under SMP
  MN10300: SMP TLB flushing
  MN10300: Use the [ID]PTEL2 registers rather than [ID]PTEL for TLB control
  MN10300: Make the use of PIDR to mark TLB entries controllable
  MN10300: Rename __flush_tlb*() to local_flush_tlb*()
  MN10300: AM34 erratum requires MMUCTR read and write on exception entry
  ...
Linus Torvalds 14 年之前
父節點
當前提交
bdab225015
共有 100 個文件被更改,包括 7256 次插入1789 次删除
  1. 1 1
      Kbuild
  2. 1 1
      MAINTAINERS
  3. 219 60
      arch/mn10300/Kconfig
  4. 6 0
      arch/mn10300/Makefile
  5. 67 2
      arch/mn10300/boot/compressed/head.S
  6. 2 0
      arch/mn10300/configs/asb2303_defconfig
  7. 98 0
      arch/mn10300/configs/asb2364_defconfig
  8. 350 0
      arch/mn10300/include/asm/atomic.h
  9. 7 7
      arch/mn10300/include/asm/bitops.h
  10. 9 5
      arch/mn10300/include/asm/cache.h
  11. 109 55
      arch/mn10300/include/asm/cacheflush.h
  12. 77 14
      arch/mn10300/include/asm/cpu-regs.h
  13. 1 86
      arch/mn10300/include/asm/dmactl-regs.h
  14. 10 2
      arch/mn10300/include/asm/elf.h
  15. 4 4
      arch/mn10300/include/asm/exceptions.h
  16. 104 53
      arch/mn10300/include/asm/fpu.h
  17. 13 7
      arch/mn10300/include/asm/frame.inc
  18. 1 1
      arch/mn10300/include/asm/gdb-stub.h
  19. 2 1
      arch/mn10300/include/asm/hardirq.h
  20. 2 2
      arch/mn10300/include/asm/highmem.h
  21. 20 17
      arch/mn10300/include/asm/intctl-regs.h
  22. 13 0
      arch/mn10300/include/asm/io.h
  23. 10 2
      arch/mn10300/include/asm/irq.h
  24. 5 1
      arch/mn10300/include/asm/irq_regs.h
  25. 102 9
      arch/mn10300/include/asm/irqflags.h
  26. 48 25
      arch/mn10300/include/asm/mmu_context.h
  27. 0 1
      arch/mn10300/include/asm/pgalloc.h
  28. 52 36
      arch/mn10300/include/asm/pgtable.h
  29. 39 27
      arch/mn10300/include/asm/processor.h
  30. 2 11
      arch/mn10300/include/asm/ptrace.h
  31. 1 1
      arch/mn10300/include/asm/reset-regs.h
  32. 0 11
      arch/mn10300/include/asm/rtc.h
  33. 125 0
      arch/mn10300/include/asm/rwlock.h
  34. 41 10
      arch/mn10300/include/asm/serial-regs.h
  35. 4 4
      arch/mn10300/include/asm/serial.h
  36. 89 2
      arch/mn10300/include/asm/smp.h
  37. 1 0
      arch/mn10300/include/asm/smsc911x.h
  38. 178 1
      arch/mn10300/include/asm/spinlock.h
  39. 20 0
      arch/mn10300/include/asm/spinlock_types.h
  40. 22 51
      arch/mn10300/include/asm/system.h
  41. 14 4
      arch/mn10300/include/asm/thread_info.h
  42. 175 16
      arch/mn10300/include/asm/timer-regs.h
  43. 16 4
      arch/mn10300/include/asm/timex.h
  44. 124 50
      arch/mn10300/include/asm/tlbflush.h
  45. 2 4
      arch/mn10300/include/asm/uaccess.h
  46. 10 5
      arch/mn10300/kernel/Makefile
  47. 10 1
      arch/mn10300/kernel/asm-offsets.c
  48. 131 0
      arch/mn10300/kernel/cevt-mn10300.c
  49. 35 0
      arch/mn10300/kernel/csrc-mn10300.c
  50. 98 48
      arch/mn10300/kernel/entry.S
  51. 163 102
      arch/mn10300/kernel/fpu-low.S
  52. 39 0
      arch/mn10300/kernel/fpu-nofpu-low.S
  53. 30 0
      arch/mn10300/kernel/fpu-nofpu.c
  54. 56 85
      arch/mn10300/kernel/fpu.c
  55. 3 2
      arch/mn10300/kernel/gdb-io-serial-low.S
  56. 27 10
      arch/mn10300/kernel/gdb-io-serial.c
  57. 13 11
      arch/mn10300/kernel/gdb-io-ttysm.c
  58. 6 11
      arch/mn10300/kernel/gdb-stub.c
  59. 198 4
      arch/mn10300/kernel/head.S
  60. 25 0
      arch/mn10300/kernel/internal.h
  61. 246 30
      arch/mn10300/kernel/irq.c
  62. 4 0
      arch/mn10300/kernel/kprobes.c
  63. 3 3
      arch/mn10300/kernel/mn10300-serial-low.S
  64. 181 29
      arch/mn10300/kernel/mn10300-serial.c
  65. 8 1
      arch/mn10300/kernel/mn10300-watchdog-low.S
  66. 61 39
      arch/mn10300/kernel/mn10300-watchdog.c
  67. 47 14
      arch/mn10300/kernel/process.c
  68. 1 1
      arch/mn10300/kernel/profile.c
  69. 10 31
      arch/mn10300/kernel/rtc.c
  70. 43 36
      arch/mn10300/kernel/setup.c
  71. 11 9
      arch/mn10300/kernel/signal.c
  72. 97 0
      arch/mn10300/kernel/smp-low.S
  73. 1152 0
      arch/mn10300/kernel/smp.c
  74. 3 4
      arch/mn10300/kernel/switch_to.S
  75. 85 27
      arch/mn10300/kernel/time.c
  76. 13 29
      arch/mn10300/kernel/traps.c
  77. 2 2
      arch/mn10300/lib/bitops.c
  78. 4 4
      arch/mn10300/lib/delay.c
  79. 22 27
      arch/mn10300/lib/do_csum.S
  80. 101 0
      arch/mn10300/mm/Kconfig.cache
  81. 12 2
      arch/mn10300/mm/Makefile
  82. 308 0
      arch/mn10300/mm/cache-flush-by-reg.S
  83. 251 0
      arch/mn10300/mm/cache-flush-by-tag.S
  84. 155 0
      arch/mn10300/mm/cache-flush-icache.c
  85. 0 192
      arch/mn10300/mm/cache-flush-mn10300.S
  86. 356 0
      arch/mn10300/mm/cache-inv-by-reg.S
  87. 348 0
      arch/mn10300/mm/cache-inv-by-tag.S
  88. 129 0
      arch/mn10300/mm/cache-inv-icache.c
  89. 0 289
      arch/mn10300/mm/cache-mn10300.S
  90. 156 0
      arch/mn10300/mm/cache-smp-flush.c
  91. 153 0
      arch/mn10300/mm/cache-smp-inv.c
  92. 105 0
      arch/mn10300/mm/cache-smp.c
  93. 69 0
      arch/mn10300/mm/cache-smp.h
  94. 5 90
      arch/mn10300/mm/cache.c
  95. 4 13
      arch/mn10300/mm/fault.c
  96. 23 3
      arch/mn10300/mm/init.c
  97. 1 2
      arch/mn10300/mm/misalignment.c
  98. 11 30
      arch/mn10300/mm/mmu-context.c
  99. 1 1
      arch/mn10300/mm/pgtable.c
  100. 45 14
      arch/mn10300/mm/tlb-mn10300.S

+ 1 - 1
Kbuild

@@ -53,7 +53,7 @@ targets += arch/$(SRCARCH)/kernel/asm-offsets.s
 # Default sed regexp - multiline due to syntax constraints
 # Default sed regexp - multiline due to syntax constraints
 define sed-y
 define sed-y
 	"/^->/{s:->#\(.*\):/* \1 */:; \
 	"/^->/{s:->#\(.*\):/* \1 */:; \
-	s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 (\2) /* \3 */:; \
+	s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
 	s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
 	s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
 	s:->::; p;}"
 	s:->::; p;}"
 endef
 endef

+ 1 - 1
MAINTAINERS

@@ -4448,7 +4448,7 @@ L:	platform-driver-x86@vger.kernel.org
 S:	Maintained
 S:	Maintained
 F:	drivers/platform/x86/panasonic-laptop.c
 F:	drivers/platform/x86/panasonic-laptop.c
 
 
-PANASONIC MN10300/AM33 PORT
+PANASONIC MN10300/AM33/AM34 PORT
 M:	David Howells <dhowells@redhat.com>
 M:	David Howells <dhowells@redhat.com>
 M:	Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
 M:	Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
 L:	linux-am33-list@redhat.com (moderated for non-subscribers)
 L:	linux-am33-list@redhat.com (moderated for non-subscribers)

+ 219 - 60
arch/mn10300/Kconfig

@@ -9,8 +9,19 @@ config MN10300
 	def_bool y
 	def_bool y
 	select HAVE_OPROFILE
 	select HAVE_OPROFILE
 
 
-config AM33
-	def_bool y
+config AM33_2
+	def_bool n
+
+config AM33_3
+	def_bool n
+
+config AM34_2
+	def_bool n
+	select MN10300_HAS_ATOMIC_OPS_UNIT
+	select MN10300_HAS_CACHE_SNOOP
+
+config ERRATUM_NEED_TO_RELOAD_MMUCTR
+	def_bool y if AM33_3 || AM34_2
 
 
 config MMU
 config MMU
 	def_bool y
 	def_bool y
@@ -37,7 +48,7 @@ config GENERIC_CALIBRATE_DELAY
 	def_bool y
 	def_bool y
 
 
 config GENERIC_CMOS_UPDATE
 config GENERIC_CMOS_UPDATE
-        def_bool y
+        def_bool n
 
 
 config GENERIC_FIND_NEXT_BIT
 config GENERIC_FIND_NEXT_BIT
 	def_bool y
 	def_bool y
@@ -45,6 +56,27 @@ config GENERIC_FIND_NEXT_BIT
 config GENERIC_HWEIGHT
 config GENERIC_HWEIGHT
 	def_bool y
 	def_bool y
 
 
+config GENERIC_TIME
+	def_bool y
+
+config GENERIC_CLOCKEVENTS
+	def_bool y
+
+config GENERIC_CLOCKEVENTS_BUILD
+	def_bool y
+	depends on GENERIC_CLOCKEVENTS
+
+config GENERIC_CLOCKEVENTS_BROADCAST
+	bool
+
+config CEVT_MN10300
+       def_bool y
+       depends on GENERIC_CLOCKEVENTS
+
+config CSRC_MN10300
+       def_bool y
+       depends on GENERIC_TIME
+
 config GENERIC_BUG
 config GENERIC_BUG
 	def_bool y
 	def_bool y
 
 
@@ -61,18 +93,14 @@ config GENERIC_HARDIRQS
 config HOTPLUG_CPU
 config HOTPLUG_CPU
 	def_bool n
 	def_bool n
 
 
-config HZ
-	int
-	default 1000
-
-mainmenu "Matsushita MN10300/AM33 Kernel Configuration"
+mainmenu "Panasonic MN10300/AM33 Kernel Configuration"
 
 
 source "init/Kconfig"
 source "init/Kconfig"
 
 
 source "kernel/Kconfig.freezer"
 source "kernel/Kconfig.freezer"
 
 
 
 
-menu "Matsushita MN10300 system setup"
+menu "Panasonic MN10300 system setup"
 
 
 choice
 choice
 	prompt "Unit type"
 	prompt "Unit type"
@@ -87,6 +115,10 @@ config MN10300_UNIT_ASB2303
 config MN10300_UNIT_ASB2305
 config MN10300_UNIT_ASB2305
 	bool "ASB2305"
 	bool "ASB2305"
 
 
+config MN10300_UNIT_ASB2364
+	bool "ASB2364"
+	select SMSC911X_ARCH_HOOKS if SMSC911X
+
 endchoice
 endchoice
 
 
 choice
 choice
@@ -99,57 +131,51 @@ choice
 config MN10300_PROC_MN103E010
 config MN10300_PROC_MN103E010
 	bool "MN103E010"
 	bool "MN103E010"
 	depends on MN10300_UNIT_ASB2303 || MN10300_UNIT_ASB2305
 	depends on MN10300_UNIT_ASB2303 || MN10300_UNIT_ASB2305
+	select AM33_2
+	select MN10300_PROC_HAS_TTYSM0
+	select MN10300_PROC_HAS_TTYSM1
+	select MN10300_PROC_HAS_TTYSM2
+
+config MN10300_PROC_MN2WS0050
+	bool "MN2WS0050"
+	depends on MN10300_UNIT_ASB2364
+	select AM34_2
 	select MN10300_PROC_HAS_TTYSM0
 	select MN10300_PROC_HAS_TTYSM0
 	select MN10300_PROC_HAS_TTYSM1
 	select MN10300_PROC_HAS_TTYSM1
 	select MN10300_PROC_HAS_TTYSM2
 	select MN10300_PROC_HAS_TTYSM2
 
 
 endchoice
 endchoice
 
 
-choice
-	prompt "Processor core support"
-	default MN10300_CPU_AM33V2
+config MN10300_HAS_ATOMIC_OPS_UNIT
+	def_bool n
 	help
 	help
-	  This option specifies the processor core for which the kernel will be
-	  compiled. It affects the instruction set used.
-
-config MN10300_CPU_AM33V2
-	bool "AM33v2"
-
-endchoice
+	  This should be enabled if the processor has an atomic ops unit
+	  capable of doing LL/SC equivalent operations.
 
 
 config FPU
 config FPU
 	bool "FPU present"
 	bool "FPU present"
 	default y
 	default y
-	depends on MN10300_PROC_MN103E010
+	depends on MN10300_PROC_MN103E010 || MN10300_PROC_MN2WS0050
 
 
-choice
-	prompt "CPU Caching mode"
-	default MN10300_CACHE_WBACK
+config LAZY_SAVE_FPU
+	bool "Save FPU state lazily"
+	default y
+	depends on FPU && !SMP
 	help
 	help
-	  This option determines the caching mode for the kernel.
-
-	  Write-Back caching mode involves the all reads and writes causing
-	  the affected cacheline to be read into the cache first before being
-	  operated upon. Memory is not then updated by a write until the cache
-	  is filled and a cacheline needs to be displaced from the cache to
-	  make room. Only at that point is it written back.
-
-	  Write-Through caching only fetches cachelines from memory on a
-	  read. Writes always get written directly to memory. If the affected
-	  cacheline is also in cache, it will be updated too.
-
-	  The final option is to turn of caching entirely.
+	  Enable this to be lazy in the saving of the FPU state to the owning
+	  task's thread struct.  This is useful if most tasks on the system
+	  don't use the FPU as only those tasks that use it will pass it
+	  between them, and the state needn't be saved for a task that isn't
+	  using it.
 
 
-config MN10300_CACHE_WBACK
-	bool "Write-Back"
+	  This can't be so easily used on SMP as the process that owns the FPU
+	  state on a CPU may be currently running on another CPU, so for the
+	  moment, it is disabled.
 
 
-config MN10300_CACHE_WTHRU
-	bool "Write-Through"
+source "arch/mn10300/mm/Kconfig.cache"
 
 
-config MN10300_CACHE_DISABLED
-	bool "Disabled"
-
-endchoice
+config MN10300_TLB_USE_PIDR
+	def_bool y
 
 
 menu "Memory layout options"
 menu "Memory layout options"
 
 
@@ -170,24 +196,55 @@ config KERNEL_TEXT_ADDRESS
 
 
 config KERNEL_ZIMAGE_BASE_ADDRESS
 config KERNEL_ZIMAGE_BASE_ADDRESS
 	hex "Base address of compressed vmlinux image"
 	hex "Base address of compressed vmlinux image"
-	default "0x90700000"
+	default "0x50700000"
 
 
+config BOOT_STACK_OFFSET
+	hex
+	default	"0xF00"	if SMP
+	default	"0xFF0" if !SMP
+
+config BOOT_STACK_SIZE
+	hex
+	depends on SMP
+	default	"0x100"
 endmenu
 endmenu
 
 
-config PREEMPT
-	bool "Preemptible Kernel"
-	help
-	  This option reduces the latency of the kernel when reacting to
-	  real-time or interactive events by allowing a low priority process to
-	  be preempted even if it is in kernel mode executing a system call.
-	  This allows applications to run more reliably even when the system is
-	  under load.
+config SMP
+	bool "Symmetric multi-processing support"
+	default y
+	depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050
+	---help---
+	  This enables support for systems with more than one CPU. If you have
+	  a system with only one CPU, like most personal computers, say N. If
+	  you have a system with more than one CPU, say Y.
+
+	  If you say N here, the kernel will run on single and multiprocessor
+	  machines, but will use only one CPU of a multiprocessor machine. If
+	  you say Y here, the kernel will run on many, but not all,
+	  singleprocessor machines. On a singleprocessor machine, the kernel
+	  will run faster if you say N here.
 
 
-	  Say Y here if you are building a kernel for a desktop, embedded
-	  or real-time system.  Say N if you are unsure.
+	  See also <file:Documentation/i386/IO-APIC.txt>,
+	  <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
+	  <http://www.tldp.org/docs.html#howto>.
+
+	  If you don't know what to do here, say N.
+
+config NR_CPUS
+	int
+	depends on SMP
+	default "2"
+
+config USE_GENERIC_SMP_HELPERS
+	bool
+	depends on SMP
+	default y
+
+source "kernel/Kconfig.preempt"
 
 
 config MN10300_CURRENT_IN_E2
 config MN10300_CURRENT_IN_E2
 	bool "Hold current task address in E2 register"
 	bool "Hold current task address in E2 register"
+	depends on !SMP
 	default y
 	default y
 	help
 	help
 	  This option removes the E2/R2 register from the set available to gcc
 	  This option removes the E2/R2 register from the set available to gcc
@@ -209,12 +266,15 @@ config MN10300_USING_JTAG
 	  suppresses the use of certain hardware debugging features, such as
 	  suppresses the use of certain hardware debugging features, such as
 	  single-stepping, which are taken over completely by the JTAG unit.
 	  single-stepping, which are taken over completely by the JTAG unit.
 
 
+source "kernel/Kconfig.hz"
+source "kernel/time/Kconfig"
+
 config MN10300_RTC
 config MN10300_RTC
 	bool "Using MN10300 RTC"
 	bool "Using MN10300 RTC"
-	depends on MN10300_PROC_MN103E010
+	depends on MN10300_PROC_MN103E010 || MN10300_PROC_MN2WS0050
+	select GENERIC_CMOS_UPDATE
 	default n
 	default n
 	help
 	help
-
 	  This option enables support for the RTC, thus enabling time to be
 	  This option enables support for the RTC, thus enabling time to be
 	  tracked, even when system is powered down. This is available on-chip
 	  tracked, even when system is powered down. This is available on-chip
 	  on the MN103E010.
 	  on the MN103E010.
@@ -306,14 +366,23 @@ config MN10300_TTYSM1
 
 
 choice
 choice
 	prompt "Select the timer to supply the clock for SIF1"
 	prompt "Select the timer to supply the clock for SIF1"
-	default MN10300_TTYSM0_TIMER9
+	default MN10300_TTYSM1_TIMER12 \
+		if !(AM33_2 || AM33_3)
+	default MN10300_TTYSM1_TIMER9 \
+		if AM33_2 || AM33_3
 	depends on MN10300_TTYSM1
 	depends on MN10300_TTYSM1
 
 
+config MN10300_TTYSM1_TIMER12
+	bool "Use timer 12 (16-bit)"
+	depends on !(AM33_2 || AM33_3)
+
 config MN10300_TTYSM1_TIMER9
 config MN10300_TTYSM1_TIMER9
 	bool "Use timer 9 (16-bit)"
 	bool "Use timer 9 (16-bit)"
+	depends on AM33_2 || AM33_3
 
 
 config MN10300_TTYSM1_TIMER3
 config MN10300_TTYSM1_TIMER3
 	bool "Use timer 3 (8-bit)"
 	bool "Use timer 3 (8-bit)"
+	depends on AM33_2 || AM33_3
 
 
 endchoice
 endchoice
 
 
@@ -328,17 +397,107 @@ config MN10300_TTYSM2
 
 
 choice
 choice
 	prompt "Select the timer to supply the clock for SIF2"
 	prompt "Select the timer to supply the clock for SIF2"
-	default MN10300_TTYSM0_TIMER10
+	default MN10300_TTYSM2_TIMER3 \
+		if !(AM33_2 || AM33_3)
+	default MN10300_TTYSM2_TIMER10 \
+		if AM33_2 || AM33_3
 	depends on MN10300_TTYSM2
 	depends on MN10300_TTYSM2
 
 
+config MN10300_TTYSM2_TIMER9
+	bool "Use timer 9 (16-bit)"
+	depends on !(AM33_2 || AM33_3)
+
+config MN10300_TTYSM2_TIMER1
+	bool "Use timer 1 (8-bit)"
+	depends on !(AM33_2 || AM33_3)
+
+config MN10300_TTYSM2_TIMER3
+	bool "Use timer 3 (8-bit)"
+	depends on !(AM33_2 || AM33_3)
+
 config MN10300_TTYSM2_TIMER10
 config MN10300_TTYSM2_TIMER10
 	bool "Use timer 10 (16-bit)"
 	bool "Use timer 10 (16-bit)"
+	depends on AM33_2 || AM33_3
 
 
 endchoice
 endchoice
 
 
 config MN10300_TTYSM2_CTS
 config MN10300_TTYSM2_CTS
 	bool "Enable the use of the CTS line /dev/ttySM2"
 	bool "Enable the use of the CTS line /dev/ttySM2"
-	depends on MN10300_TTYSM2
+	depends on MN10300_TTYSM2 && AM33_2
+
+endmenu
+
+menu "Interrupt request priority options"
+
+comment "[!] NOTE: A lower number/level indicates a higher priority (0 is highest, 6 is lowest)"
+
+comment "____Non-maskable interrupt levels____"
+comment "The following must be set to a higher priority than local_irq_disable() and on-chip serial"
+
+config GDBSTUB_IRQ_LEVEL
+	int "GDBSTUB interrupt priority"
+	depends on GDBSTUB
+	range 0 1 if LINUX_CLI_LEVEL = 2
+	range 0 2 if LINUX_CLI_LEVEL = 3
+	range 0 3 if LINUX_CLI_LEVEL = 4
+	range 0 4 if LINUX_CLI_LEVEL = 5
+	range 0 5 if LINUX_CLI_LEVEL = 6
+	default 0
+
+comment "The following must be set to a higher priority than local_irq_disable()"
+
+config MN10300_SERIAL_IRQ_LEVEL
+	int "MN10300 on-chip serial interrupt priority"
+	depends on MN10300_TTYSM
+	range 1 1 if LINUX_CLI_LEVEL = 2
+	range 1 2 if LINUX_CLI_LEVEL = 3
+	range 1 3 if LINUX_CLI_LEVEL = 4
+	range 1 4 if LINUX_CLI_LEVEL = 5
+	range 1 5 if LINUX_CLI_LEVEL = 6
+	default 1
+
+comment "-"
+comment "____Maskable interrupt levels____"
+
+config LINUX_CLI_LEVEL
+	int "The highest interrupt priority excluded by local_irq_disable() (2-6)"
+	range 2 6
+	default 2
+	help
+	  local_irq_disable() doesn't actually disable maskable interrupts -
+	  what it does is restrict the levels of interrupt which are permitted
+	  (a lower level indicates a higher priority) by lowering the value in
+	  EPSW.IM from 7.  Any interrupt is permitted for which the level is
+	  lower than EPSW.IM.
+
+	  Certain interrupts, such as GDBSTUB and virtual MN10300 on-chip
+	  serial DMA interrupts are allowed to interrupt normal disabled
+	  sections.
+
+comment "The following must be set to a equal to or lower priority than LINUX_CLI_LEVEL"
+
+config TIMER_IRQ_LEVEL
+	int "Kernel timer interrupt priority"
+	range LINUX_CLI_LEVEL 6
+	default 4
+
+config PCI_IRQ_LEVEL
+	int "PCI interrupt priority"
+	depends on PCI
+	range LINUX_CLI_LEVEL 6
+	default 5
+
+config ETHERNET_IRQ_LEVEL
+	int "Ethernet interrupt priority"
+	depends on SMC91X || SMC911X || SMSC911X
+	range LINUX_CLI_LEVEL 6
+	default 6
+
+config EXT_SERIAL_IRQ_LEVEL
+	int "External serial port interrupt priority"
+	depends on SERIAL_8250
+	range LINUX_CLI_LEVEL 6
+	default 6
 
 
 endmenu
 endmenu
 
 

+ 6 - 0
arch/mn10300/Makefile

@@ -36,6 +36,9 @@ endif
 ifeq ($(CONFIG_MN10300_PROC_MN103E010),y)
 ifeq ($(CONFIG_MN10300_PROC_MN103E010),y)
 PROCESSOR	:= mn103e010
 PROCESSOR	:= mn103e010
 endif
 endif
+ifeq ($(CONFIG_MN10300_PROC_MN2WS0050),y)
+PROCESSOR	:= mn2ws0050
+endif
 
 
 ifeq ($(CONFIG_MN10300_UNIT_ASB2303),y)
 ifeq ($(CONFIG_MN10300_UNIT_ASB2303),y)
 UNIT		:= asb2303
 UNIT		:= asb2303
@@ -43,6 +46,9 @@ endif
 ifeq ($(CONFIG_MN10300_UNIT_ASB2305),y)
 ifeq ($(CONFIG_MN10300_UNIT_ASB2305),y)
 UNIT		:= asb2305
 UNIT		:= asb2305
 endif
 endif
+ifeq ($(CONFIG_MN10300_UNIT_ASB2364),y)
+UNIT		:= asb2364
+endif
 
 
 
 
 head-y		:= arch/mn10300/kernel/head.o arch/mn10300/kernel/init_task.o
 head-y		:= arch/mn10300/kernel/head.o arch/mn10300/kernel/init_task.o

+ 67 - 2
arch/mn10300/boot/compressed/head.S

@@ -14,10 +14,29 @@
 
 
 #include <linux/linkage.h>
 #include <linux/linkage.h>
 #include <asm/cpu-regs.h>
 #include <asm/cpu-regs.h>
+#include <asm/cache.h>
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+#endif
 
 
 	.globl startup_32
 	.globl startup_32
 startup_32:
 startup_32:
-	# first save off parameters from bootloader
+#ifdef CONFIG_SMP
+	#
+	# Secondary CPUs jump directly to the kernel entry point
+	#
+	# Must save primary CPU's D0-D2 registers as they hold boot parameters
+	#
+	mov	(CPUID), d3
+	and	CPUID_MASK,d3
+	beq	startup_primary
+	mov	CONFIG_KERNEL_TEXT_ADDRESS,a0
+	jmp	(a0)
+
+startup_primary:
+#endif /* CONFIG_SMP */
+
+	# first save parameters from bootloader
 	mov	param_save_area,a0
 	mov	param_save_area,a0
 	mov	d0,(a0)
 	mov	d0,(a0)
 	mov	d1,(4,a0)
 	mov	d1,(4,a0)
@@ -37,8 +56,15 @@ startup_32:
 	mov	(a0),d0
 	mov	(a0),d0
 	btst	CHCTR_ICBUSY|CHCTR_DCBUSY,d0		# wait till not busy
 	btst	CHCTR_ICBUSY|CHCTR_DCBUSY,d0		# wait till not busy
 	lne
 	lne
-	mov	CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD,d0	# writethru dcache
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+#ifdef CONFIG_MN10300_CACHE_WBACK
+	mov	CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
+#else
+	mov	CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
+#endif /* WBACK */
 	movhu	d0,(a0)					# enable
 	movhu	d0,(a0)					# enable
+#endif /* !ENABLED */
 
 
 	# clear the BSS area
 	# clear the BSS area
 	mov	__bss_start,a0
 	mov	__bss_start,a0
@@ -54,6 +80,9 @@ bssclear_end:
 
 
 	# decompress the kernel
 	# decompress the kernel
 	call	decompress_kernel[],0
 	call	decompress_kernel[],0
+#ifdef CONFIG_MN10300_CACHE_WBACK
+	call	mn10300_dcache_flush_inv[],0
+#endif
 
 
 	# disable caches again
 	# disable caches again
 	mov	CHCTR,a0
 	mov	CHCTR,a0
@@ -69,10 +98,46 @@ bssclear_end:
 	mov	(4,a0),d1
 	mov	(4,a0),d1
 	mov	(8,a0),d2
 	mov	(8,a0),d2
 
 
+	# jump to the kernel proper entry point
 	mov	a3,sp
 	mov	a3,sp
 	mov	CONFIG_KERNEL_TEXT_ADDRESS,a0
 	mov	CONFIG_KERNEL_TEXT_ADDRESS,a0
 	jmp	(a0)
 	jmp	(a0)
 
 
+
+###############################################################################
+#
+# Cache flush routines
+#
+###############################################################################
+#ifdef CONFIG_MN10300_CACHE_WBACK
+mn10300_dcache_flush_inv:
+	movhu	(CHCTR),d0
+	btst	CHCTR_DCEN,d0
+	beq	mn10300_dcache_flush_inv_end
+
+	mov	L1_CACHE_NENTRIES,d1
+	clr	a1
+
+mn10300_dcache_flush_inv_loop:
+	mov	(DCACHE_PURGE_WAY0(0),a1),d0	# unconditional purge
+	mov	(DCACHE_PURGE_WAY1(0),a1),d0	# unconditional purge
+	mov	(DCACHE_PURGE_WAY2(0),a1),d0	# unconditional purge
+	mov	(DCACHE_PURGE_WAY3(0),a1),d0	# unconditional purge
+
+	add	L1_CACHE_BYTES,a1
+	add	-1,d1
+	bne	mn10300_dcache_flush_inv_loop
+
+mn10300_dcache_flush_inv_end:
+	ret	[],0
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+
+###############################################################################
+#
+# Data areas
+#
+###############################################################################
 	.data
 	.data
 	.align		4
 	.align		4
 param_save_area:
 param_save_area:

+ 2 - 0
arch/mn10300/configs/asb2303_defconfig

@@ -12,6 +12,8 @@ CONFIG_SLAB=y
 CONFIG_PROFILING=y
 CONFIG_PROFILING=y
 # CONFIG_BLOCK is not set
 # CONFIG_BLOCK is not set
 CONFIG_PREEMPT=y
 CONFIG_PREEMPT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_MN10300_RTC=y
 CONFIG_MN10300_RTC=y
 CONFIG_MN10300_TTYSM_CONSOLE=y
 CONFIG_MN10300_TTYSM_CONSOLE=y
 CONFIG_MN10300_TTYSM0=y
 CONFIG_MN10300_TTYSM0=y

+ 98 - 0
arch/mn10300/configs/asb2364_defconfig

@@ -0,0 +1,98 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_NS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_RELAY=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_EMBEDDED=y
+# CONFIG_KALLSYMS is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLOCK is not set
+CONFIG_MN10300_UNIT_ASB2364=y
+CONFIG_PREEMPT=y
+# CONFIG_MN10300_USING_JTAG is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_MN10300_TTYSM_CONSOLE=y
+CONFIG_MN10300_TTYSM0=y
+CONFIG_MN10300_TTYSM0_TIMER2=y
+CONFIG_MN10300_TTYSM1=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6=y
+# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_BEET is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_DEBUG=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_CFI_I4=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMSC911X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_PROC_KCORE=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_ROOT_NFS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set

+ 350 - 0
arch/mn10300/include/asm/atomic.h

@@ -1 +1,351 @@
+/* MN10300 Atomic counter operations
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef _ASM_ATOMIC_H
+#define _ASM_ATOMIC_H
+
+#include <asm/irqflags.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+static inline
+unsigned long __xchg(volatile unsigned long *m, unsigned long val)
+{
+	unsigned long status;
+	unsigned long oldval;
+
+	asm volatile(
+		"1:	mov	%4,(_AAR,%3)	\n"
+		"	mov	(_ADR,%3),%1	\n"
+		"	mov	%5,(_ADR,%3)	\n"
+		"	mov	(_ADR,%3),%0	\n"	/* flush */
+		"	mov	(_ASR,%3),%0	\n"
+		"	or	%0,%0		\n"
+		"	bne	1b		\n"
+		: "=&r"(status), "=&r"(oldval), "=m"(*m)
+		: "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val)
+		: "memory", "cc");
+
+	return oldval;
+}
+
+static inline unsigned long __cmpxchg(volatile unsigned long *m,
+				      unsigned long old, unsigned long new)
+{
+	unsigned long status;
+	unsigned long oldval;
+
+	asm volatile(
+		"1:	mov	%4,(_AAR,%3)	\n"
+		"	mov	(_ADR,%3),%1	\n"
+		"	cmp	%5,%1		\n"
+		"	bne	2f		\n"
+		"	mov	%6,(_ADR,%3)	\n"
+		"2:	mov	(_ADR,%3),%0	\n"	/* flush */
+		"	mov	(_ASR,%3),%0	\n"
+		"	or	%0,%0		\n"
+		"	bne	1b		\n"
+		: "=&r"(status), "=&r"(oldval), "=m"(*m)
+		: "a"(ATOMIC_OPS_BASE_ADDR), "r"(m),
+		  "r"(old), "r"(new)
+		: "memory", "cc");
+
+	return oldval;
+}
+#else  /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+#error "No SMP atomic operation support!"
+#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+
+#else  /* CONFIG_SMP */
+
+/*
+ * Emulate xchg for non-SMP MN10300
+ */
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+static inline
+unsigned long __xchg(volatile unsigned long *m, unsigned long val)
+{
+	unsigned long oldval;
+	unsigned long flags;
+
+	flags = arch_local_cli_save();
+	oldval = *m;
+	*m = val;
+	arch_local_irq_restore(flags);
+	return oldval;
+}
+
+/*
+ * Emulate cmpxchg for non-SMP MN10300
+ */
+static inline unsigned long __cmpxchg(volatile unsigned long *m,
+				      unsigned long old, unsigned long new)
+{
+	unsigned long oldval;
+	unsigned long flags;
+
+	flags = arch_local_cli_save();
+	oldval = *m;
+	if (oldval == old)
+		*m = new;
+	arch_local_irq_restore(flags);
+	return oldval;
+}
+
+#endif /* CONFIG_SMP */
+
+#define xchg(ptr, v)						\
+	((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr),	\
+				     (unsigned long)(v)))
+
+#define cmpxchg(ptr, o, n)					\
+	((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
+					(unsigned long)(o),	\
+					(unsigned long)(n)))
+
+#define atomic_xchg(ptr, v)		(xchg(&(ptr)->counter, (v)))
+#define atomic_cmpxchg(v, old, new)	(cmpxchg(&((v)->counter), (old), (new)))
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef CONFIG_SMP
 #include <asm-generic/atomic.h>
 #include <asm-generic/atomic.h>
+#else
+
+/*
+ * Atomic operations that C can't guarantee us.  Useful for
+ * resource counting etc..
+ */
+
+#define ATOMIC_INIT(i)	{ (i) }
+
+#ifdef __KERNEL__
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.  Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_read(v)	((v)->counter)
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.  Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+/**
+ * atomic_add_return - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns the result
+ * Note that the guaranteed useful range of an atomic_t is only 24 bits.
+ */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+	int retval;
+#ifdef CONFIG_SMP
+	int status;
+
+	asm volatile(
+		"1:	mov	%4,(_AAR,%3)	\n"
+		"	mov	(_ADR,%3),%1	\n"
+		"	add	%5,%1		\n"
+		"	mov	%1,(_ADR,%3)	\n"
+		"	mov	(_ADR,%3),%0	\n"	/* flush */
+		"	mov	(_ASR,%3),%0	\n"
+		"	or	%0,%0		\n"
+		"	bne	1b		\n"
+		: "=&r"(status), "=&r"(retval), "=m"(v->counter)
+		: "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
+		: "memory", "cc");
+
+#else
+	unsigned long flags;
+
+	flags = arch_local_cli_save();
+	retval = v->counter;
+	retval += i;
+	v->counter = retval;
+	arch_local_irq_restore(flags);
+#endif
+	return retval;
+}
+
+/**
+ * atomic_sub_return - subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns the result
+ * Note that the guaranteed useful range of an atomic_t is only 24 bits.
+ */
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+	int retval;
+#ifdef CONFIG_SMP
+	int status;
+
+	asm volatile(
+		"1:	mov	%4,(_AAR,%3)	\n"
+		"	mov	(_ADR,%3),%1	\n"
+		"	sub	%5,%1		\n"
+		"	mov	%1,(_ADR,%3)	\n"
+		"	mov	(_ADR,%3),%0	\n"	/* flush */
+		"	mov	(_ASR,%3),%0	\n"
+		"	or	%0,%0		\n"
+		"	bne	1b		\n"
+		: "=&r"(status), "=&r"(retval), "=m"(v->counter)
+		: "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
+		: "memory", "cc");
+
+#else
+	unsigned long flags;
+	flags = arch_local_cli_save();
+	retval = v->counter;
+	retval -= i;
+	v->counter = retval;
+	arch_local_irq_restore(flags);
+#endif
+	return retval;
+}
+
+static inline int atomic_add_negative(int i, atomic_t *v)
+{
+	return atomic_add_return(i, v) < 0;
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+	atomic_add_return(i, v);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+	atomic_sub_return(i, v);
+}
+
+static inline void atomic_inc(atomic_t *v)
+{
+	atomic_add_return(1, v);
+}
+
+static inline void atomic_dec(atomic_t *v)
+{
+	atomic_sub_return(1, v);
+}
+
+#define atomic_dec_return(v)		atomic_sub_return(1, (v))
+#define atomic_inc_return(v)		atomic_add_return(1, (v))
+
+#define atomic_sub_and_test(i, v)	(atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
+#define atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
+
+#define atomic_add_unless(v, a, u)				\
+({								\
+	int c, old;						\
+	c = atomic_read(v);					\
+	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+		c = old;					\
+	c != (u);						\
+})
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+/**
+ * atomic_clear_mask - Atomically clear bits in memory
+ * @mask: Mask of the bits to be cleared
+ * @v: pointer to word in memory
+ *
+ * Atomically clears the bits set in mask from the memory word specified.
+ */
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+#ifdef CONFIG_SMP
+	int status;
+
+	asm volatile(
+		"1:	mov	%3,(_AAR,%2)	\n"
+		"	mov	(_ADR,%2),%0	\n"
+		"	and	%4,%0		\n"
+		"	mov	%0,(_ADR,%2)	\n"
+		"	mov	(_ADR,%2),%0	\n"	/* flush */
+		"	mov	(_ASR,%2),%0	\n"
+		"	or	%0,%0		\n"
+		"	bne	1b		\n"
+		: "=&r"(status), "=m"(*addr)
+		: "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
+		: "memory", "cc");
+#else
+	unsigned long flags;
+
+	mask = ~mask;
+	flags = arch_local_cli_save();
+	*addr &= mask;
+	arch_local_irq_restore(flags);
+#endif
+}
+
+/**
+ * atomic_set_mask - Atomically set bits in memory
+ * @mask: Mask of the bits to be set
+ * @v: pointer to word in memory
+ *
+ * Atomically sets the bits set in mask from the memory word specified.
+ */
+static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
+{
+#ifdef CONFIG_SMP
+	int status;
+
+	asm volatile(
+		"1:	mov	%3,(_AAR,%2)	\n"
+		"	mov	(_ADR,%2),%0	\n"
+		"	or	%4,%0		\n"
+		"	mov	%0,(_ADR,%2)	\n"
+		"	mov	(_ADR,%2),%0	\n"	/* flush */
+		"	mov	(_ASR,%2),%0	\n"
+		"	or	%0,%0		\n"
+		"	bne	1b		\n"
+		: "=&r"(status), "=m"(*addr)
+		: "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
+		: "memory", "cc");
+#else
+	unsigned long flags;
+
+	flags = arch_local_cli_save();
+	*addr |= mask;
+	arch_local_irq_restore(flags);
+#endif
+}
+
+/* Atomic operations are already serializing on MN10300??? */
+#define smp_mb__before_atomic_dec()	barrier()
+#define smp_mb__after_atomic_dec()	barrier()
+#define smp_mb__before_atomic_inc()	barrier()
+#define smp_mb__after_atomic_inc()	barrier()
+
+#include <asm-generic/atomic-long.h>
+
+#endif /* __KERNEL__ */
+#endif /* CONFIG_SMP */
+#endif /* _ASM_ATOMIC_H */

+ 7 - 7
arch/mn10300/include/asm/bitops.h

@@ -57,7 +57,7 @@
 #define clear_bit(nr, addr) ___clear_bit((nr), (addr))
 #define clear_bit(nr, addr) ___clear_bit((nr), (addr))
 
 
 
 
-static inline void __clear_bit(int nr, volatile void *addr)
+static inline void __clear_bit(unsigned long nr, volatile void *addr)
 {
 {
 	unsigned int *a = (unsigned int *) addr;
 	unsigned int *a = (unsigned int *) addr;
 	int mask;
 	int mask;
@@ -70,15 +70,15 @@ static inline void __clear_bit(int nr, volatile void *addr)
 /*
 /*
  * test bit
  * test bit
  */
  */
-static inline int test_bit(int nr, const volatile void *addr)
+static inline int test_bit(unsigned long nr, const volatile void *addr)
 {
 {
-	return 1UL & (((const unsigned int *) addr)[nr >> 5] >> (nr & 31));
+	return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
 }
 }
 
 
 /*
 /*
  * change bit
  * change bit
  */
  */
-static inline void __change_bit(int nr, volatile void *addr)
+static inline void __change_bit(unsigned long nr, volatile void *addr)
 {
 {
 	int	mask;
 	int	mask;
 	unsigned int *a = (unsigned int *) addr;
 	unsigned int *a = (unsigned int *) addr;
@@ -88,7 +88,7 @@ static inline void __change_bit(int nr, volatile void *addr)
 	*a ^= mask;
 	*a ^= mask;
 }
 }
 
 
-extern void change_bit(int nr, volatile void *addr);
+extern void change_bit(unsigned long nr, volatile void *addr);
 
 
 /*
 /*
  * test and set bit
  * test and set bit
@@ -135,7 +135,7 @@ extern void change_bit(int nr, volatile void *addr);
 /*
 /*
  * test and change bit
  * test and change bit
  */
  */
-static inline int __test_and_change_bit(int nr, volatile void *addr)
+static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
 {
 {
 	int	mask, retval;
 	int	mask, retval;
 	unsigned int *a = (unsigned int *)addr;
 	unsigned int *a = (unsigned int *)addr;
@@ -148,7 +148,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr)
 	return retval;
 	return retval;
 }
 }
 
 
-extern int test_and_change_bit(int nr, volatile void *addr);
+extern int test_and_change_bit(unsigned long nr, volatile void *addr);
 
 
 #include <asm-generic/bitops/lock.h>
 #include <asm-generic/bitops/lock.h>
 
 

+ 9 - 5
arch/mn10300/include/asm/cache.h

@@ -43,14 +43,18 @@
 
 
 /* instruction cache access registers */
 /* instruction cache access registers */
 #define ICACHE_DATA(WAY, ENTRY, OFF) \
 #define ICACHE_DATA(WAY, ENTRY, OFF) \
-	__SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
+	__SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + \
+		(ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32)
 #define ICACHE_TAG(WAY, ENTRY)	 \
 #define ICACHE_TAG(WAY, ENTRY)	 \
-	__SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
+	__SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + \
+		(ENTRY) * L1_CACHE_BYTES, u32)
 
 
-/* instruction cache access registers */
+/* data cache access registers */
 #define DCACHE_DATA(WAY, ENTRY, OFF) \
 #define DCACHE_DATA(WAY, ENTRY, OFF) \
-	__SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
+	__SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + \
+		(ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32)
 #define DCACHE_TAG(WAY, ENTRY)	 \
 #define DCACHE_TAG(WAY, ENTRY)	 \
-	__SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
+	__SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + \
+		(ENTRY) * L1_CACHE_BYTES, u32)
 
 
 #endif /* _ASM_CACHE_H */
 #endif /* _ASM_CACHE_H */

+ 109 - 55
arch/mn10300/include/asm/cacheflush.h

@@ -17,66 +17,55 @@
 #include <linux/mm.h>
 #include <linux/mm.h>
 
 
 /*
 /*
- * virtually-indexed cache management (our cache is physically indexed)
+ * Primitive routines
  */
  */
-#define flush_cache_all()			do {} while (0)
-#define flush_cache_mm(mm)			do {} while (0)
-#define flush_cache_dup_mm(mm)			do {} while (0)
-#define flush_cache_range(mm, start, end)	do {} while (0)
-#define flush_cache_page(vma, vmaddr, pfn)	do {} while (0)
-#define flush_cache_vmap(start, end)		do {} while (0)
-#define flush_cache_vunmap(start, end)		do {} while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page)			do {} while (0)
-#define flush_dcache_mmap_lock(mapping)		do {} while (0)
-#define flush_dcache_mmap_unlock(mapping)	do {} while (0)
-
-/*
- * physically-indexed cache management
- */
-#ifndef CONFIG_MN10300_CACHE_DISABLED
-
-extern void flush_icache_range(unsigned long start, unsigned long end);
-extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
-
-#else
-
-#define flush_icache_range(start, end)		do {} while (0)
-#define flush_icache_page(vma, pg)		do {} while (0)
-
-#endif
-
-#define flush_icache_user_range(vma, pg, adr, len) \
-	flush_icache_range(adr, adr + len)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-	do {					\
-		memcpy(dst, src, len);		\
-		flush_icache_page(vma, page);	\
-	} while (0)
-
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
-	memcpy(dst, src, len)
-
-/*
- * primitive routines
- */
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+extern void mn10300_local_icache_inv(void);
+extern void mn10300_local_icache_inv_page(unsigned long start);
+extern void mn10300_local_icache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size);
+extern void mn10300_local_dcache_inv(void);
+extern void mn10300_local_dcache_inv_page(unsigned long start);
+extern void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size);
 extern void mn10300_icache_inv(void);
 extern void mn10300_icache_inv(void);
+extern void mn10300_icache_inv_page(unsigned long start);
+extern void mn10300_icache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_icache_inv_range2(unsigned long start, unsigned long size);
 extern void mn10300_dcache_inv(void);
 extern void mn10300_dcache_inv(void);
-extern void mn10300_dcache_inv_page(unsigned start);
-extern void mn10300_dcache_inv_range(unsigned start, unsigned end);
-extern void mn10300_dcache_inv_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_inv_page(unsigned long start);
+extern void mn10300_dcache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_inv_range2(unsigned long start, unsigned long size);
 #ifdef CONFIG_MN10300_CACHE_WBACK
 #ifdef CONFIG_MN10300_CACHE_WBACK
+extern void mn10300_local_dcache_flush(void);
+extern void mn10300_local_dcache_flush_page(unsigned long start);
+extern void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size);
+extern void mn10300_local_dcache_flush_inv(void);
+extern void mn10300_local_dcache_flush_inv_page(unsigned long start);
+extern void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size);
 extern void mn10300_dcache_flush(void);
 extern void mn10300_dcache_flush(void);
-extern void mn10300_dcache_flush_page(unsigned start);
-extern void mn10300_dcache_flush_range(unsigned start, unsigned end);
-extern void mn10300_dcache_flush_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_flush_page(unsigned long start);
+extern void mn10300_dcache_flush_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_flush_range2(unsigned long start, unsigned long size);
 extern void mn10300_dcache_flush_inv(void);
 extern void mn10300_dcache_flush_inv(void);
-extern void mn10300_dcache_flush_inv_page(unsigned start);
-extern void mn10300_dcache_flush_inv_range(unsigned start, unsigned end);
-extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_flush_inv_page(unsigned long start);
+extern void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size);
 #else
 #else
+#define mn10300_local_dcache_flush()			do {} while (0)
+#define mn10300_local_dcache_flush_page(start)		do {} while (0)
+#define mn10300_local_dcache_flush_range(start, end)	do {} while (0)
+#define mn10300_local_dcache_flush_range2(start, size)	do {} while (0)
+#define mn10300_local_dcache_flush_inv() \
+		mn10300_local_dcache_inv()
+#define mn10300_local_dcache_flush_inv_page(start) \
+		mn10300_local_dcache_inv_page(start)
+#define mn10300_local_dcache_flush_inv_range(start, end) \
+		mn10300_local_dcache_inv_range(start, end)
+#define mn10300_local_dcache_flush_inv_range2(start, size) \
+		mn10300_local_dcache_inv_range2(start, size)
 #define mn10300_dcache_flush()				do {} while (0)
 #define mn10300_dcache_flush()				do {} while (0)
 #define mn10300_dcache_flush_page(start)		do {} while (0)
 #define mn10300_dcache_flush_page(start)		do {} while (0)
 #define mn10300_dcache_flush_range(start, end)		do {} while (0)
 #define mn10300_dcache_flush_range(start, end)		do {} while (0)
@@ -90,7 +79,26 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
 	mn10300_dcache_inv_range2((start), (size))
 	mn10300_dcache_inv_range2((start), (size))
 #endif /* CONFIG_MN10300_CACHE_WBACK */
 #endif /* CONFIG_MN10300_CACHE_WBACK */
 #else
 #else
+#define mn10300_local_icache_inv()			do {} while (0)
+#define mn10300_local_icache_inv_page(start)		do {} while (0)
+#define mn10300_local_icache_inv_range(start, end)	do {} while (0)
+#define mn10300_local_icache_inv_range2(start, size)	do {} while (0)
+#define mn10300_local_dcache_inv()			do {} while (0)
+#define mn10300_local_dcache_inv_page(start)		do {} while (0)
+#define mn10300_local_dcache_inv_range(start, end)	do {} while (0)
+#define mn10300_local_dcache_inv_range2(start, size)	do {} while (0)
+#define mn10300_local_dcache_flush()			do {} while (0)
+#define mn10300_local_dcache_flush_inv_page(start)	do {} while (0)
+#define mn10300_local_dcache_flush_inv()		do {} while (0)
+#define mn10300_local_dcache_flush_inv_range(start, end)do {} while (0)
+#define mn10300_local_dcache_flush_inv_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_flush_page(start)		do {} while (0)
+#define mn10300_local_dcache_flush_range(start, end)	do {} while (0)
+#define mn10300_local_dcache_flush_range2(start, size)	do {} while (0)
 #define mn10300_icache_inv()				do {} while (0)
 #define mn10300_icache_inv()				do {} while (0)
+#define mn10300_icache_inv_page(start)			do {} while (0)
+#define mn10300_icache_inv_range(start, end)		do {} while (0)
+#define mn10300_icache_inv_range2(start, size)		do {} while (0)
 #define mn10300_dcache_inv()				do {} while (0)
 #define mn10300_dcache_inv()				do {} while (0)
 #define mn10300_dcache_inv_page(start)			do {} while (0)
 #define mn10300_dcache_inv_page(start)			do {} while (0)
 #define mn10300_dcache_inv_range(start, end)		do {} while (0)
 #define mn10300_dcache_inv_range(start, end)		do {} while (0)
@@ -103,10 +111,56 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
 #define mn10300_dcache_flush_page(start)		do {} while (0)
 #define mn10300_dcache_flush_page(start)		do {} while (0)
 #define mn10300_dcache_flush_range(start, end)		do {} while (0)
 #define mn10300_dcache_flush_range(start, end)		do {} while (0)
 #define mn10300_dcache_flush_range2(start, size)	do {} while (0)
 #define mn10300_dcache_flush_range2(start, size)	do {} while (0)
-#endif /* CONFIG_MN10300_CACHE_DISABLED */
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+
+/*
+ * Virtually-indexed cache management (our cache is physically indexed)
+ */
+#define flush_cache_all()			do {} while (0)
+#define flush_cache_mm(mm)			do {} while (0)
+#define flush_cache_dup_mm(mm)			do {} while (0)
+#define flush_cache_range(mm, start, end)	do {} while (0)
+#define flush_cache_page(vma, vmaddr, pfn)	do {} while (0)
+#define flush_cache_vmap(start, end)		do {} while (0)
+#define flush_cache_vunmap(start, end)		do {} while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page)			do {} while (0)
+#define flush_dcache_mmap_lock(mapping)		do {} while (0)
+#define flush_dcache_mmap_unlock(mapping)	do {} while (0)
+
+/*
+ * Physically-indexed cache management
+ */
+#if defined(CONFIG_MN10300_CACHE_FLUSH_ICACHE)
+extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+extern void flush_icache_range(unsigned long start, unsigned long end);
+#elif defined(CONFIG_MN10300_CACHE_INV_ICACHE)
+static inline void flush_icache_page(struct vm_area_struct *vma,
+				     struct page *page)
+{
+	mn10300_icache_inv_page(page_to_phys(page));
+}
+extern void flush_icache_range(unsigned long start, unsigned long end);
+#else
+#define flush_icache_range(start, end)		do {} while (0)
+#define flush_icache_page(vma, pg)		do {} while (0)
+#endif
+
+
+#define flush_icache_user_range(vma, pg, adr, len) \
+	flush_icache_range(adr, adr + len)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+	do {					\
+		memcpy(dst, src, len);		\
+		flush_icache_page(vma, page);	\
+	} while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+	memcpy(dst, src, len)
 
 
 /*
 /*
- * internal debugging function
+ * Internal debugging function
  */
  */
 #ifdef CONFIG_DEBUG_PAGEALLOC
 #ifdef CONFIG_DEBUG_PAGEALLOC
 extern void kernel_map_pages(struct page *page, int numpages, int enable);
 extern void kernel_map_pages(struct page *page, int numpages, int enable);

+ 77 - 14
arch/mn10300/include/asm/cpu-regs.h

@@ -15,7 +15,6 @@
 #include <linux/types.h>
 #include <linux/types.h>
 #endif
 #endif
 
 
-#ifdef CONFIG_MN10300_CPU_AM33V2
 /* we tell the compiler to pretend to be AM33 so that it doesn't try and use
 /* we tell the compiler to pretend to be AM33 so that it doesn't try and use
  * the FP regs, but tell the assembler that we're actually allowed AM33v2
  * the FP regs, but tell the assembler that we're actually allowed AM33v2
  * instructions */
  * instructions */
@@ -24,7 +23,6 @@ asm(" .am33_2\n");
 #else
 #else
 .am33_2
 .am33_2
 #endif
 #endif
-#endif
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 
 
@@ -58,6 +56,9 @@ asm(" .am33_2\n");
 #define EPSW_nAR		0x00040000	/* register bank control */
 #define EPSW_nAR		0x00040000	/* register bank control */
 #define EPSW_ML			0x00080000	/* monitor level */
 #define EPSW_ML			0x00080000	/* monitor level */
 #define EPSW_FE			0x00100000	/* FPU enable */
 #define EPSW_FE			0x00100000	/* FPU enable */
+#define EPSW_IM_SHIFT		8		/* EPSW_IM_SHIFT determines the interrupt mode */
+
+#define NUM2EPSW_IM(num)	((num) << EPSW_IM_SHIFT)
 
 
 /* FPU registers */
 /* FPU registers */
 #define FPCR_EF_I		0x00000001	/* inexact result FPU exception flag */
 #define FPCR_EF_I		0x00000001	/* inexact result FPU exception flag */
@@ -99,9 +100,11 @@ asm(" .am33_2\n");
 #define CPUREV			__SYSREGC(0xc0000050, u32)	/* CPU revision register */
 #define CPUREV			__SYSREGC(0xc0000050, u32)	/* CPU revision register */
 #define CPUREV_TYPE		0x0000000f	/* CPU type */
 #define CPUREV_TYPE		0x0000000f	/* CPU type */
 #define CPUREV_TYPE_S		0
 #define CPUREV_TYPE_S		0
-#define CPUREV_TYPE_AM33V1	0x00000000	/* - AM33 V1 core, AM33/1.00 arch */
-#define CPUREV_TYPE_AM33V2	0x00000001	/* - AM33 V2 core, AM33/2.00 arch */
-#define CPUREV_TYPE_AM34V1	0x00000002	/* - AM34 V1 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM33_1	0x00000000	/* - AM33-1 core, AM33/1.00 arch */
+#define CPUREV_TYPE_AM33_2	0x00000001	/* - AM33-2 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM34_1	0x00000002	/* - AM34-1 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM33_3	0x00000003	/* - AM33-3 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM34_2	0x00000004	/* - AM34-2 core, AM33/3.00 arch */
 #define CPUREV_REVISION		0x000000f0	/* CPU revision */
 #define CPUREV_REVISION		0x000000f0	/* CPU revision */
 #define CPUREV_REVISION_S	4
 #define CPUREV_REVISION_S	4
 #define CPUREV_ICWAY		0x00000f00	/* number of instruction cache ways */
 #define CPUREV_ICWAY		0x00000f00	/* number of instruction cache ways */
@@ -180,6 +183,21 @@ asm(" .am33_2\n");
 #define CHCTR_ICWMD		0x0f00		/* instruction cache way mode */
 #define CHCTR_ICWMD		0x0f00		/* instruction cache way mode */
 #define CHCTR_DCWMD		0xf000		/* data cache way mode */
 #define CHCTR_DCWMD		0xf000		/* data cache way mode */
 
 
+#ifdef CONFIG_AM34_2
+#define ICIVCR			__SYSREG(0xc0000c00, u32)	/* icache area invalidate control */
+#define ICIVCR_ICIVBSY		0x00000008			/* icache area invalidate busy */
+#define ICIVCR_ICI		0x00000001			/* icache area invalidate */
+
+#define ICIVMR			__SYSREG(0xc0000c04, u32)	/* icache area invalidate mask */
+
+#define	DCPGCR			__SYSREG(0xc0000c10, u32)	/* data cache area purge control */
+#define	DCPGCR_DCPGBSY		0x00000008			/* data cache area purge busy */
+#define	DCPGCR_DCP		0x00000002			/* data cache area purge */
+#define	DCPGCR_DCI		0x00000001			/* data cache area invalidate */
+
+#define	DCPGMR			__SYSREG(0xc0000c14, u32)	/* data cache area purge mask */
+#endif /* CONFIG_AM34_2 */
+
 /* MMU control registers */
 /* MMU control registers */
 #define MMUCTR			__SYSREG(0xc0000090, u32)	/* MMU control register */
 #define MMUCTR			__SYSREG(0xc0000090, u32)	/* MMU control register */
 #define MMUCTR_IRP		0x0000003f	/* instruction TLB replace pointer */
 #define MMUCTR_IRP		0x0000003f	/* instruction TLB replace pointer */
@@ -203,6 +221,9 @@ asm(" .am33_2\n");
 #define MMUCTR_DTL_LOCK0_3	0x03000000	/* - entry 0-3 locked */
 #define MMUCTR_DTL_LOCK0_3	0x03000000	/* - entry 0-3 locked */
 #define MMUCTR_DTL_LOCK0_7	0x04000000	/* - entry 0-7 locked */
 #define MMUCTR_DTL_LOCK0_7	0x04000000	/* - entry 0-7 locked */
 #define MMUCTR_DTL_LOCK0_15	0x05000000	/* - entry 0-15 locked */
 #define MMUCTR_DTL_LOCK0_15	0x05000000	/* - entry 0-15 locked */
+#ifdef CONFIG_AM34_2
+#define MMUCTR_WTE		0x80000000	/* write-through cache TLB entry bit enable */
+#endif
 
 
 #define PIDR			__SYSREG(0xc0000094, u16)	/* PID register */
 #define PIDR			__SYSREG(0xc0000094, u16)	/* PID register */
 #define PIDR_PID		0x00ff		/* process identifier */
 #define PIDR_PID		0x00ff		/* process identifier */
@@ -231,14 +252,6 @@ asm(" .am33_2\n");
 #define xPTEL_PS_4Mb		0x00000c00	/* - 4Mb page */
 #define xPTEL_PS_4Mb		0x00000c00	/* - 4Mb page */
 #define xPTEL_PPN		0xfffff006	/* physical page number */
 #define xPTEL_PPN		0xfffff006	/* physical page number */
 
 
-#define xPTEL_V_BIT		0	/* bit numbers corresponding to above masks */
-#define xPTEL_UNUSED1_BIT	1
-#define xPTEL_UNUSED2_BIT	2
-#define xPTEL_C_BIT		3
-#define xPTEL_PV_BIT		4
-#define xPTEL_D_BIT		5
-#define xPTEL_G_BIT		9
-
 #define IPTEU			__SYSREG(0xc00000a4, u32)	/* instruction TLB virtual addr */
 #define IPTEU			__SYSREG(0xc00000a4, u32)	/* instruction TLB virtual addr */
 #define DPTEU			__SYSREG(0xc00000b4, u32)	/* data TLB virtual addr */
 #define DPTEU			__SYSREG(0xc00000b4, u32)	/* data TLB virtual addr */
 #define xPTEU_VPN		0xfffffc00	/* virtual page number */
 #define xPTEU_VPN		0xfffffc00	/* virtual page number */
@@ -262,7 +275,16 @@ asm(" .am33_2\n");
 #define xPTEL2_PS_128Kb		0x00000100	/* - 128Kb page */
 #define xPTEL2_PS_128Kb		0x00000100	/* - 128Kb page */
 #define xPTEL2_PS_1Kb		0x00000200	/* - 1Kb page */
 #define xPTEL2_PS_1Kb		0x00000200	/* - 1Kb page */
 #define xPTEL2_PS_4Mb		0x00000300	/* - 4Mb page */
 #define xPTEL2_PS_4Mb		0x00000300	/* - 4Mb page */
-#define xPTEL2_PPN		0xfffffc00	/* physical page number */
+#define xPTEL2_CWT		0x00000400	/* cacheable write-through */
+#define xPTEL2_UNUSED1		0x00000800	/* unused bit (broadcast mask) */
+#define xPTEL2_PPN		0xfffff000	/* physical page number */
+
+#define xPTEL2_V_BIT		0	/* bit numbers corresponding to above masks */
+#define xPTEL2_C_BIT		1
+#define xPTEL2_PV_BIT		2
+#define xPTEL2_D_BIT		3
+#define xPTEL2_G_BIT		7
+#define xPTEL2_UNUSED1_BIT	11
 
 
 #define MMUFCR			__SYSREGC(0xc000009c, u32)	/* MMU exception cause */
 #define MMUFCR			__SYSREGC(0xc000009c, u32)	/* MMU exception cause */
 #define MMUFCR_IFC		__SYSREGC(0xc000009c, u16)	/* MMU instruction excep cause */
 #define MMUFCR_IFC		__SYSREGC(0xc000009c, u16)	/* MMU instruction excep cause */
@@ -285,6 +307,47 @@ asm(" .am33_2\n");
 #define MMUFCR_xFC_PR_RWK_RWU	0x01c0		/* - R/W kernel and R/W user */
 #define MMUFCR_xFC_PR_RWK_RWU	0x01c0		/* - R/W kernel and R/W user */
 #define MMUFCR_xFC_ILLADDR	0x0200		/* illegal address excep flag */
 #define MMUFCR_xFC_ILLADDR	0x0200		/* illegal address excep flag */
 
 
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+/* atomic operation registers */
+#define AAR		__SYSREG(0xc0000a00, u32)	/* cacheable address */
+#define AAR2		__SYSREG(0xc0000a04, u32)	/* uncacheable address */
+#define ADR		__SYSREG(0xc0000a08, u32)	/* data */
+#define ASR		__SYSREG(0xc0000a0c, u32)	/* status */
+#define AARU		__SYSREG(0xd400aa00, u32)	/* user address */
+#define ADRU		__SYSREG(0xd400aa08, u32)	/* user data */
+#define ASRU		__SYSREG(0xd400aa0c, u32)	/* user status */
+
+#define ASR_RW		0x00000008	/* read */
+#define ASR_BW		0x00000004	/* bus error */
+#define ASR_IW		0x00000002	/* interrupt */
+#define ASR_LW		0x00000001	/* bus lock */
+
+#define ASRU_RW		ASR_RW		/* read */
+#define ASRU_BW		ASR_BW		/* bus error */
+#define ASRU_IW		ASR_IW		/* interrupt */
+#define ASRU_LW		ASR_LW		/* bus lock */
+
+/* in inline ASM, we stick the base pointer in to a reg and use offsets from
+ * it */
+#define ATOMIC_OPS_BASE_ADDR 0xc0000a00
+#ifndef __ASSEMBLY__
+asm(
+	"_AAR	= 0\n"
+	"_AAR2	= 4\n"
+	"_ADR	= 8\n"
+	"_ASR	= 12\n");
+#else
+#define _AAR		0
+#define _AAR2		4
+#define _ADR		8
+#define _ASR		12
+#endif
+
+/* physical page address for userspace atomic operations registers */
+#define USER_ATOMIC_OPS_PAGE_ADDR  0xd400a000
+
+#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 
 
 #endif /* _ASM_CPU_REGS_H */
 #endif /* _ASM_CPU_REGS_H */

+ 1 - 86
arch/mn10300/include/asm/dmactl-regs.h

@@ -11,91 +11,6 @@
 #ifndef _ASM_DMACTL_REGS_H
 #ifndef _ASM_DMACTL_REGS_H
 #define _ASM_DMACTL_REGS_H
 #define _ASM_DMACTL_REGS_H
 
 
-#include <asm/cpu-regs.h>
-
-#ifdef __KERNEL__
-
-/* DMA registers */
-#define	DMxCTR(N)		__SYSREG(0xd2000000 + ((N) * 0x100), u32)	/* control reg */
-#define	DMxCTR_BG		0x0000001f	/* transfer request source */
-#define	DMxCTR_BG_SOFT		0x00000000	/* - software source */
-#define	DMxCTR_BG_SC0TX		0x00000002	/* - serial port 0 transmission */
-#define	DMxCTR_BG_SC0RX		0x00000003	/* - serial port 0 reception */
-#define	DMxCTR_BG_SC1TX		0x00000004	/* - serial port 1 transmission */
-#define	DMxCTR_BG_SC1RX		0x00000005	/* - serial port 1 reception */
-#define	DMxCTR_BG_SC2TX		0x00000006	/* - serial port 2 transmission */
-#define	DMxCTR_BG_SC2RX		0x00000007	/* - serial port 2 reception */
-#define	DMxCTR_BG_TM0UFLOW	0x00000008	/* - timer 0 underflow */
-#define	DMxCTR_BG_TM1UFLOW	0x00000009	/* - timer 1 underflow */
-#define	DMxCTR_BG_TM2UFLOW	0x0000000a	/* - timer 2 underflow */
-#define	DMxCTR_BG_TM3UFLOW	0x0000000b	/* - timer 3 underflow */
-#define	DMxCTR_BG_TM6ACMPCAP	0x0000000c	/* - timer 6A compare/capture */
-#define	DMxCTR_BG_AFE		0x0000000d	/* - analogue front-end interrupt source */
-#define	DMxCTR_BG_ADC		0x0000000e	/* - A/D conversion end interrupt source */
-#define	DMxCTR_BG_IRDA		0x0000000f	/* - IrDA interrupt source */
-#define	DMxCTR_BG_RTC		0x00000010	/* - RTC interrupt source */
-#define	DMxCTR_BG_XIRQ0		0x00000011	/* - XIRQ0 pin interrupt source */
-#define	DMxCTR_BG_XIRQ1		0x00000012	/* - XIRQ1 pin interrupt source */
-#define	DMxCTR_BG_XDMR0		0x00000013	/* - external request 0 source (XDMR0 pin) */
-#define	DMxCTR_BG_XDMR1		0x00000014	/* - external request 1 source (XDMR1 pin) */
-#define	DMxCTR_SAM		0x000000e0	/* DMA transfer src addr mode */
-#define	DMxCTR_SAM_INCR		0x00000000	/* - increment */
-#define	DMxCTR_SAM_DECR		0x00000020	/* - decrement */
-#define	DMxCTR_SAM_FIXED	0x00000040	/* - fixed */
-#define	DMxCTR_DAM		0x00000000	/* DMA transfer dest addr mode */
-#define	DMxCTR_DAM_INCR		0x00000000	/* - increment */
-#define	DMxCTR_DAM_DECR		0x00000100	/* - decrement */
-#define	DMxCTR_DAM_FIXED	0x00000200	/* - fixed */
-#define	DMxCTR_TM		0x00001800	/* DMA transfer mode */
-#define	DMxCTR_TM_BATCH		0x00000000	/* - batch transfer */
-#define	DMxCTR_TM_INTERM	0x00001000	/* - intermittent transfer */
-#define	DMxCTR_UT		0x00006000	/* DMA transfer unit */
-#define	DMxCTR_UT_1		0x00000000	/* - 1 byte */
-#define	DMxCTR_UT_2		0x00002000	/* - 2 byte */
-#define	DMxCTR_UT_4		0x00004000	/* - 4 byte */
-#define	DMxCTR_UT_16		0x00006000	/* - 16 byte */
-#define	DMxCTR_TEN		0x00010000	/* DMA channel transfer enable */
-#define	DMxCTR_RQM		0x00060000	/* external request input source mode */
-#define	DMxCTR_RQM_FALLEDGE	0x00000000	/* - falling edge */
-#define	DMxCTR_RQM_RISEEDGE	0x00020000	/* - rising edge */
-#define	DMxCTR_RQM_LOLEVEL	0x00040000	/* - low level */
-#define	DMxCTR_RQM_HILEVEL	0x00060000	/* - high level */
-#define	DMxCTR_RQF		0x01000000	/* DMA transfer request flag */
-#define	DMxCTR_XEND		0x80000000	/* DMA transfer end flag */
-
-#define	DMxSRC(N)		__SYSREG(0xd2000004 + ((N) * 0x100), u32)	/* control reg */
-
-#define	DMxDST(N)		__SYSREG(0xd2000008 + ((N) * 0x100), u32)	/* src addr reg */
-
-#define	DMxSIZ(N)		__SYSREG(0xd200000c + ((N) * 0x100), u32)	/* dest addr reg */
-#define DMxSIZ_CT		0x000fffff	/* number of bytes to transfer */
-
-#define	DMxCYC(N)		__SYSREG(0xd2000010 + ((N) * 0x100), u32)	/* intermittent
-										 * size reg */
-#define DMxCYC_CYC		0x000000ff	/* number of interrmittent transfers -1 */
-
-#define DM0IRQ			16		/* DMA channel 0 complete IRQ */
-#define DM1IRQ			17		/* DMA channel 1 complete IRQ */
-#define DM2IRQ			18		/* DMA channel 2 complete IRQ */
-#define DM3IRQ			19		/* DMA channel 3 complete IRQ */
-
-#define	DM0ICR			GxICR(DM0IRQ)	/* DMA channel 0 complete intr ctrl reg */
-#define	DM1ICR			GxICR(DM0IR1)	/* DMA channel 1 complete intr ctrl reg */
-#define	DM2ICR			GxICR(DM0IR2)	/* DMA channel 2 complete intr ctrl reg */
-#define	DM3ICR			GxICR(DM0IR3)	/* DMA channel 3 complete intr ctrl reg */
-
-#ifndef __ASSEMBLY__
-
-struct mn10300_dmactl_regs {
-	u32		ctr;
-	const void	*src;
-	void		*dst;
-	u32		siz;
-	u32		cyc;
-} __attribute__((aligned(0x100)));
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
+#include <proc/dmactl-regs.h>
 
 
 #endif /* _ASM_DMACTL_REGS_H */
 #endif /* _ASM_DMACTL_REGS_H */

+ 10 - 2
arch/mn10300/include/asm/elf.h

@@ -31,6 +31,12 @@
 #define R_MN10300_SYM_DIFF	33	/* Adjustment when relaxing. */
 #define R_MN10300_SYM_DIFF	33	/* Adjustment when relaxing. */
 #define R_MN10300_ALIGN 	34	/* Alignment requirement. */
 #define R_MN10300_ALIGN 	34	/* Alignment requirement. */
 
 
+/*
+ * AM33/AM34 HW Capabilities
+ */
+#define HWCAP_MN10300_ATOMIC_OP_UNIT	1	/* Has AM34 Atomic Operations */
+
+
 /*
 /*
  * ELF register definitions..
  * ELF register definitions..
  */
  */
@@ -47,8 +53,6 @@ typedef struct {
 	u_int32_t	fpcr;
 	u_int32_t	fpcr;
 } elf_fpregset_t;
 } elf_fpregset_t;
 
 
-extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
-
 /*
 /*
  * This is used to ensure we don't load something for the wrong architecture
  * This is used to ensure we don't load something for the wrong architecture
  */
  */
@@ -130,7 +134,11 @@ do {						\
  * instruction set this CPU supports.  This could be done in user space,
  * instruction set this CPU supports.  This could be done in user space,
  * but it's not easy, and we've already done it here.
  * but it's not easy, and we've already done it here.
  */
  */
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+#define ELF_HWCAP	(HWCAP_MN10300_ATOMIC_OP_UNIT)
+#else
 #define ELF_HWCAP	(0)
 #define ELF_HWCAP	(0)
+#endif
 
 
 /*
 /*
  * This yields a string that ld.so will use to load implementation
  * This yields a string that ld.so will use to load implementation

+ 4 - 4
arch/mn10300/include/asm/exceptions.h

@@ -15,8 +15,8 @@
 
 
 /*
 /*
  * define the breakpoint instruction opcode to use
  * define the breakpoint instruction opcode to use
- * - note that the JTAG unit steals 0xFF, so we want to avoid that if we can
- *   (can use 0xF7)
+ * - note that the JTAG unit steals 0xFF, so you can't use JTAG and GDBSTUB at
+ *   the same time.
  */
  */
 #define GDBSTUB_BKPT		0xFF
 #define GDBSTUB_BKPT		0xFF
 
 
@@ -90,7 +90,6 @@ enum exception_code {
 
 
 extern void __set_intr_stub(enum exception_code code, void *handler);
 extern void __set_intr_stub(enum exception_code code, void *handler);
 extern void set_intr_stub(enum exception_code code, void *handler);
 extern void set_intr_stub(enum exception_code code, void *handler);
-extern void set_jtag_stub(enum exception_code code, void *handler);
 
 
 struct pt_regs;
 struct pt_regs;
 
 
@@ -102,7 +101,6 @@ extern asmlinkage void dtlb_aerror(void);
 extern asmlinkage void raw_bus_error(void);
 extern asmlinkage void raw_bus_error(void);
 extern asmlinkage void double_fault(void);
 extern asmlinkage void double_fault(void);
 extern asmlinkage int  system_call(struct pt_regs *);
 extern asmlinkage int  system_call(struct pt_regs *);
-extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
 extern asmlinkage void nmi(struct pt_regs *, enum exception_code);
 extern asmlinkage void nmi(struct pt_regs *, enum exception_code);
 extern asmlinkage void uninitialised_exception(struct pt_regs *,
 extern asmlinkage void uninitialised_exception(struct pt_regs *,
 					       enum exception_code);
 					       enum exception_code);
@@ -116,6 +114,8 @@ extern void die(const char *, struct pt_regs *, enum exception_code)
 
 
 extern int die_if_no_fixup(const char *, struct pt_regs *, enum exception_code);
 extern int die_if_no_fixup(const char *, struct pt_regs *, enum exception_code);
 
 
+#define NUM2EXCEP_IRQ_LEVEL(num)	(EXCEP_IRQ_LEVEL0 + (num) * 8)
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ASSEMBLY__ */
 
 
 #endif /* _ASM_EXCEPTIONS_H */
 #endif /* _ASM_EXCEPTIONS_H */

+ 104 - 53
arch/mn10300/include/asm/fpu.h

@@ -12,74 +12,125 @@
 #ifndef _ASM_FPU_H
 #ifndef _ASM_FPU_H
 #define _ASM_FPU_H
 #define _ASM_FPU_H
 
 
-#include <asm/processor.h>
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+#include <asm/exceptions.h>
 #include <asm/sigcontext.h>
 #include <asm/sigcontext.h>
-#include <asm/user.h>
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 
 
-/* the task that owns the FPU state */
+extern asmlinkage void fpu_disabled(void);
+
+#ifdef CONFIG_FPU
+
+#ifdef CONFIG_LAZY_SAVE_FPU
+/* the task that currently owns the FPU state */
 extern struct task_struct *fpu_state_owner;
 extern struct task_struct *fpu_state_owner;
+#endif
 
 
-#define set_using_fpu(tsk)				\
-do {							\
-	(tsk)->thread.fpu_flags |= THREAD_USING_FPU;	\
-} while (0)
+#if (THREAD_USING_FPU & ~0xff)
+#error THREAD_USING_FPU must be smaller than 0x100.
+#endif
 
 
-#define clear_using_fpu(tsk)				\
-do {							\
-	(tsk)->thread.fpu_flags &= ~THREAD_USING_FPU;	\
-} while (0)
+static inline void set_using_fpu(struct task_struct *tsk)
+{
+	asm volatile(
+		"bset %0,(0,%1)"
+		:
+		: "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags)
+		: "memory", "cc");
+}
 
 
-#define is_using_fpu(tsk) ((tsk)->thread.fpu_flags & THREAD_USING_FPU)
+static inline void clear_using_fpu(struct task_struct *tsk)
+{
+	asm volatile(
+		"bclr %0,(0,%1)"
+		:
+		: "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags)
+		: "memory", "cc");
+}
 
 
-#define unlazy_fpu(tsk)					\
-do {							\
-	preempt_disable();				\
-	if (fpu_state_owner == (tsk))			\
-		fpu_save(&tsk->thread.fpu_state);	\
-	preempt_enable();				\
-} while (0)
-
-#define exit_fpu()				\
-do {						\
-	struct task_struct *__tsk = current;	\
-	preempt_disable();			\
-	if (fpu_state_owner == __tsk)		\
-		fpu_state_owner = NULL;		\
-	preempt_enable();			\
-} while (0)
-
-#define flush_fpu()					\
-do {							\
-	struct task_struct *__tsk = current;		\
-	preempt_disable();				\
-	if (fpu_state_owner == __tsk) {			\
-		fpu_state_owner = NULL;			\
-		__tsk->thread.uregs->epsw &= ~EPSW_FE;	\
-	}						\
-	preempt_enable();				\
-	clear_using_fpu(__tsk);				\
-} while (0)
+#define is_using_fpu(tsk) ((tsk)->thread.fpu_flags & THREAD_USING_FPU)
 
 
-extern asmlinkage void fpu_init_state(void);
 extern asmlinkage void fpu_kill_state(struct task_struct *);
 extern asmlinkage void fpu_kill_state(struct task_struct *);
-extern asmlinkage void fpu_disabled(struct pt_regs *, enum exception_code);
 extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
 extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
-
-#ifdef CONFIG_FPU
+extern asmlinkage void fpu_invalid_op(struct pt_regs *, enum exception_code);
+extern asmlinkage void fpu_init_state(void);
 extern asmlinkage void fpu_save(struct fpu_state_struct *);
 extern asmlinkage void fpu_save(struct fpu_state_struct *);
-extern asmlinkage void fpu_restore(struct fpu_state_struct *);
-#else
-#define fpu_save(a)
-#define fpu_restore(a)
-#endif /* CONFIG_FPU  */
-
-/*
- * signal frame handlers
- */
 extern int fpu_setup_sigcontext(struct fpucontext *buf);
 extern int fpu_setup_sigcontext(struct fpucontext *buf);
 extern int fpu_restore_sigcontext(struct fpucontext *buf);
 extern int fpu_restore_sigcontext(struct fpucontext *buf);
 
 
+static inline void unlazy_fpu(struct task_struct *tsk)
+{
+	preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+	if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+		fpu_save(&tsk->thread.fpu_state);
+		tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+		tsk->thread.uregs->epsw &= ~EPSW_FE;
+	}
+#else
+	if (fpu_state_owner == tsk)
+		fpu_save(&tsk->thread.fpu_state);
+#endif
+	preempt_enable();
+}
+
+static inline void exit_fpu(void)
+{
+#ifdef CONFIG_LAZY_SAVE_FPU
+	struct task_struct *tsk = current;
+
+	preempt_disable();
+	if (fpu_state_owner == tsk)
+		fpu_state_owner = NULL;
+	preempt_enable();
+#endif
+}
+
+static inline void flush_fpu(void)
+{
+	struct task_struct *tsk = current;
+
+	preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+	if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+		tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+		tsk->thread.uregs->epsw &= ~EPSW_FE;
+	}
+#else
+	if (fpu_state_owner == tsk) {
+		fpu_state_owner = NULL;
+		tsk->thread.uregs->epsw &= ~EPSW_FE;
+	}
+#endif
+	preempt_enable();
+	clear_using_fpu(tsk);
+}
+
+#else /* CONFIG_FPU */
+
+extern asmlinkage
+void unexpected_fpu_exception(struct pt_regs *, enum exception_code);
+#define fpu_invalid_op unexpected_fpu_exception
+#define fpu_exception unexpected_fpu_exception
+
+struct task_struct;
+struct fpu_state_struct;
+static inline bool is_using_fpu(struct task_struct *tsk) { return false; }
+static inline void set_using_fpu(struct task_struct *tsk) {}
+static inline void clear_using_fpu(struct task_struct *tsk) {}
+static inline void fpu_init_state(void) {}
+static inline void fpu_save(struct fpu_state_struct *s) {}
+static inline void fpu_kill_state(struct task_struct *tsk) {}
+static inline void unlazy_fpu(struct task_struct *tsk) {}
+static inline void exit_fpu(void) {}
+static inline void flush_fpu(void) {}
+static inline int fpu_setup_sigcontext(struct fpucontext *buf) { return 0; }
+static inline int fpu_restore_sigcontext(struct fpucontext *buf) { return 0; }
+#endif /* CONFIG_FPU  */
+
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
 #endif /* _ASM_FPU_H */
 #endif /* _ASM_FPU_H */

+ 13 - 7
arch/mn10300/include/asm/frame.inc

@@ -18,6 +18,7 @@
 #ifndef __ASM_OFFSETS_H__
 #ifndef __ASM_OFFSETS_H__
 #include <asm/asm-offsets.h>
 #include <asm/asm-offsets.h>
 #endif
 #endif
+#include <asm/thread_info.h>
 
 
 #define pi break
 #define pi break
 
 
@@ -37,11 +38,15 @@
 	movm	[d2,d3,a2,a3,exreg0,exreg1,exother],(sp)
 	movm	[d2,d3,a2,a3,exreg0,exreg1,exother],(sp)
 	mov	sp,fp				# FRAME pointer in A3
 	mov	sp,fp				# FRAME pointer in A3
 	add	-12,sp				# allow for calls to be made
 	add	-12,sp				# allow for calls to be made
-	mov	(__frame),a1
-	mov	a1,(REG_NEXT,fp)
-	mov	fp,(__frame)
 
 
-	and	~EPSW_FE,epsw			# disable the FPU inside the kernel
+	# push the exception frame onto the front of the list
+	GET_THREAD_INFO a1
+	mov	(TI_frame,a1),a0
+	mov	a0,(REG_NEXT,fp)
+	mov	fp,(TI_frame,a1)
+
+	# disable the FPU inside the kernel
+	and	~EPSW_FE,epsw
 
 
 	# we may be holding current in E2
 	# we may be holding current in E2
 #ifdef CONFIG_MN10300_CURRENT_IN_E2
 #ifdef CONFIG_MN10300_CURRENT_IN_E2
@@ -57,10 +62,11 @@
 .macro RESTORE_ALL
 .macro RESTORE_ALL
 	# peel back the stack to the calling frame
 	# peel back the stack to the calling frame
 	# - this permits execve() to discard extra frames due to kernel syscalls
 	# - this permits execve() to discard extra frames due to kernel syscalls
-	mov	(__frame),fp
+	GET_THREAD_INFO a0
+	mov	(TI_frame,a0),fp
 	mov	fp,sp
 	mov	fp,sp
-	mov	(REG_NEXT,fp),d0                # userspace has regs->next == 0
-	mov	d0,(__frame)
+	mov	(REG_NEXT,fp),d0
+	mov	d0,(TI_frame,a0)                # userspace has regs->next == 0
 
 
 #ifndef CONFIG_MN10300_USING_JTAG
 #ifndef CONFIG_MN10300_USING_JTAG
 	mov	(REG_EPSW,fp),d0
 	mov	(REG_EPSW,fp),d0

+ 1 - 1
arch/mn10300/include/asm/gdb-stub.h

@@ -110,7 +110,7 @@ extern asmlinkage void gdbstub_exception(struct pt_regs *, enum exception_code);
 extern asmlinkage void __gdbstub_bug_trap(void);
 extern asmlinkage void __gdbstub_bug_trap(void);
 extern asmlinkage void __gdbstub_pause(void);
 extern asmlinkage void __gdbstub_pause(void);
 
 
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
 extern asmlinkage void gdbstub_purge_cache(void);
 extern asmlinkage void gdbstub_purge_cache(void);
 #else
 #else
 #define gdbstub_purge_cache()	do {} while (0)
 #define gdbstub_purge_cache()	do {} while (0)

+ 2 - 1
arch/mn10300/include/asm/hardirq.h

@@ -19,9 +19,10 @@
 /* assembly code in softirq.h is sensitive to the offsets of these fields */
 /* assembly code in softirq.h is sensitive to the offsets of these fields */
 typedef struct {
 typedef struct {
 	unsigned int	__softirq_pending;
 	unsigned int	__softirq_pending;
-	unsigned long	idle_timestamp;
+#ifdef CONFIG_MN10300_WD_TIMER
 	unsigned int	__nmi_count;	/* arch dependent */
 	unsigned int	__nmi_count;	/* arch dependent */
 	unsigned int	__irq_count;	/* arch dependent */
 	unsigned int	__irq_count;	/* arch dependent */
+#endif
 } ____cacheline_aligned irq_cpustat_t;
 } ____cacheline_aligned irq_cpustat_t;
 
 
 #include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */
 #include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */

+ 2 - 2
arch/mn10300/include/asm/highmem.h

@@ -87,7 +87,7 @@ static inline unsigned long __kmap_atomic(struct page *page)
 		BUG();
 		BUG();
 #endif
 #endif
 	set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
 	set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
-	__flush_tlb_one(vaddr);
+	local_flush_tlb_one(vaddr);
 
 
 	return vaddr;
 	return vaddr;
 }
 }
@@ -116,7 +116,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
 		 * this pte without first remap it
 		 * this pte without first remap it
 		 */
 		 */
 		pte_clear(kmap_pte - idx);
 		pte_clear(kmap_pte - idx);
-		__flush_tlb_one(vaddr);
+		local_flush_tlb_one(vaddr);
 	}
 	}
 #endif
 #endif
 
 

+ 20 - 17
arch/mn10300/include/asm/intctl-regs.h

@@ -15,24 +15,19 @@
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 
 
-/* interrupt controller registers */
-#define GxICR(X)		__SYSREG(0xd4000000 + (X) * 4, u16)	/* group irq ctrl regs */
-
-#define IAGR			__SYSREG(0xd4000100, u16)	/* intr acceptance group reg */
-#define IAGR_GN			0x00fc		/* group number register
-						 * (documentation _has_ to be wrong)
-						 */
+/*
+ * Interrupt controller registers
+ * - Registers 64-191 are at addresses offset from the main array
+ */
+#define GxICR(X)						\
+	__SYSREG(0xd4000000 + (X) * 4 +				\
+		 (((X) >= 64) && ((X) < 192)) * 0xf00, u16)
 
 
-#define EXTMD			__SYSREG(0xd4000200, u16)	/* external pin intr spec reg */
-#define GET_XIRQ_TRIGGER(X) ((EXTMD >> ((X) * 2)) & 3)
+#define GxICR_u8(X)							\
+	__SYSREG(0xd4000000 + (X) * 4 +					\
+		 (((X) >= 64) && ((X) < 192)) * 0xf00, u8)
 
 
-#define SET_XIRQ_TRIGGER(X,Y)			\
-do {						\
-	u16 x = EXTMD;				\
-	x &= ~(3 << ((X) * 2));			\
-	x |= ((Y) & 3) << ((X) * 2);		\
-	EXTMD = x;				\
-} while (0)
+#include <proc/intctl-regs.h>
 
 
 #define XIRQ_TRIGGER_LOWLEVEL	0
 #define XIRQ_TRIGGER_LOWLEVEL	0
 #define XIRQ_TRIGGER_HILEVEL	1
 #define XIRQ_TRIGGER_HILEVEL	1
@@ -59,10 +54,18 @@ do {						\
 #define GxICR_LEVEL_5		0x5000		/* - level 5 */
 #define GxICR_LEVEL_5		0x5000		/* - level 5 */
 #define GxICR_LEVEL_6		0x6000		/* - level 6 */
 #define GxICR_LEVEL_6		0x6000		/* - level 6 */
 #define GxICR_LEVEL_SHIFT	12
 #define GxICR_LEVEL_SHIFT	12
+#define GxICR_NMI		0x8000		/* nmi request flag */
+
+#define NUM2GxICR_LEVEL(num)	((num) << GxICR_LEVEL_SHIFT)
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 extern void set_intr_level(int irq, u16 level);
 extern void set_intr_level(int irq, u16 level);
-extern void set_intr_postackable(int irq);
+extern void mn10300_intc_set_level(unsigned int irq, unsigned int level);
+extern void mn10300_intc_clear(unsigned int irq);
+extern void mn10300_intc_set(unsigned int irq);
+extern void mn10300_intc_enable(unsigned int irq);
+extern void mn10300_intc_disable(unsigned int irq);
+extern void mn10300_set_lateack_irq_type(int irq);
 #endif
 #endif
 
 
 /* external interrupts */
 /* external interrupts */

+ 13 - 0
arch/mn10300/include/asm/io.h

@@ -206,6 +206,19 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
 #define iowrite32_rep(p, src, count) \
 #define iowrite32_rep(p, src, count) \
 	outsl((unsigned long) (p), (src), (count))
 	outsl((unsigned long) (p), (src), (count))
 
 
+#define readsb(p, dst, count) \
+	insb((unsigned long) (p), (dst), (count))
+#define readsw(p, dst, count) \
+	insw((unsigned long) (p), (dst), (count))
+#define readsl(p, dst, count) \
+	insl((unsigned long) (p), (dst), (count))
+
+#define writesb(p, src, count) \
+	outsb((unsigned long) (p), (src), (count))
+#define writesw(p, src, count) \
+	outsw((unsigned long) (p), (src), (count))
+#define writesl(p, src, count) \
+	outsl((unsigned long) (p), (src), (count))
 
 
 #define IO_SPACE_LIMIT 0xffffffff
 #define IO_SPACE_LIMIT 0xffffffff
 
 

+ 10 - 2
arch/mn10300/include/asm/irq.h

@@ -21,8 +21,16 @@
 /* this number is used when no interrupt has been assigned */
 /* this number is used when no interrupt has been assigned */
 #define NO_IRQ		INT_MAX
 #define NO_IRQ		INT_MAX
 
 
-/* hardware irq numbers */
-#define NR_IRQS		GxICR_NUM_IRQS
+/*
+ * hardware irq numbers
+ * - the ASB2364 has an FPGA with an IRQ multiplexer on it
+ */
+#ifdef CONFIG_MN10300_UNIT_ASB2364
+#include <unit/irq.h>
+#else
+#define NR_CPU_IRQS	GxICR_NUM_IRQS
+#define NR_IRQS		NR_CPU_IRQS
+#endif
 
 
 /* external hardware irq numbers */
 /* external hardware irq numbers */
 #define NR_XIRQS	GxICR_NUM_XIRQS
 #define NR_XIRQS	GxICR_NUM_XIRQS

+ 5 - 1
arch/mn10300/include/asm/irq_regs.h

@@ -18,7 +18,11 @@
 #define ARCH_HAS_OWN_IRQ_REGS
 #define ARCH_HAS_OWN_IRQ_REGS
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
-#define get_irq_regs() (__frame)
+static inline __attribute__((const))
+struct pt_regs *get_irq_regs(void)
+{
+	return current_frame();
+}
 #endif
 #endif
 
 
 #endif /* _ASM_IRQ_REGS_H */
 #endif /* _ASM_IRQ_REGS_H */

+ 102 - 9
arch/mn10300/include/asm/irqflags.h

@@ -13,6 +13,9 @@
 #define _ASM_IRQFLAGS_H
 #define _ASM_IRQFLAGS_H
 
 
 #include <asm/cpu-regs.h>
 #include <asm/cpu-regs.h>
+#ifndef __ASSEMBLY__
+#include <linux/smp.h>
+#endif
 
 
 /*
 /*
  * interrupt control
  * interrupt control
@@ -23,11 +26,7 @@
  *   - level 6 - timer interrupt
  *   - level 6 - timer interrupt
  * - "enabled":  run in IM7
  * - "enabled":  run in IM7
  */
  */
-#ifdef CONFIG_MN10300_TTYSM
-#define MN10300_CLI_LEVEL	EPSW_IM_2
-#else
-#define MN10300_CLI_LEVEL	EPSW_IM_1
-#endif
+#define MN10300_CLI_LEVEL	(CONFIG_LINUX_CLI_LEVEL << EPSW_IM_SHIFT)
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 
@@ -64,11 +63,12 @@ static inline unsigned long arch_local_irq_save(void)
 /*
 /*
  * we make sure arch_irq_enable() doesn't cause priority inversion
  * we make sure arch_irq_enable() doesn't cause priority inversion
  */
  */
-extern unsigned long __mn10300_irq_enabled_epsw;
+extern unsigned long __mn10300_irq_enabled_epsw[];
 
 
 static inline void arch_local_irq_enable(void)
 static inline void arch_local_irq_enable(void)
 {
 {
 	unsigned long tmp;
 	unsigned long tmp;
+	int cpu = raw_smp_processor_id();
 
 
 	asm volatile(
 	asm volatile(
 		"	mov	epsw,%0		\n"
 		"	mov	epsw,%0		\n"
@@ -76,8 +76,8 @@ static inline void arch_local_irq_enable(void)
 		"	or	%2,%0		\n"
 		"	or	%2,%0		\n"
 		"	mov	%0,epsw		\n"
 		"	mov	%0,epsw		\n"
 		: "=&d"(tmp)
 		: "=&d"(tmp)
-		: "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw)
-		: "memory");
+		: "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw[cpu])
+		: "memory", "cc");
 }
 }
 
 
 static inline void arch_local_irq_restore(unsigned long flags)
 static inline void arch_local_irq_restore(unsigned long flags)
@@ -94,7 +94,7 @@ static inline void arch_local_irq_restore(unsigned long flags)
 
 
 static inline bool arch_irqs_disabled_flags(unsigned long flags)
 static inline bool arch_irqs_disabled_flags(unsigned long flags)
 {
 {
-	return (flags & EPSW_IM) <= MN10300_CLI_LEVEL;
+	return (flags & (EPSW_IE | EPSW_IM)) != (EPSW_IE | EPSW_IM_7);
 }
 }
 
 
 static inline bool arch_irqs_disabled(void)
 static inline bool arch_irqs_disabled(void)
@@ -109,6 +109,9 @@ static inline bool arch_irqs_disabled(void)
  */
  */
 static inline void arch_safe_halt(void)
 static inline void arch_safe_halt(void)
 {
 {
+#ifdef CONFIG_SMP
+	arch_local_irq_enable();
+#else
 	asm volatile(
 	asm volatile(
 		"	or	%0,epsw	\n"
 		"	or	%0,epsw	\n"
 		"	nop		\n"
 		"	nop		\n"
@@ -117,7 +120,97 @@ static inline void arch_safe_halt(void)
 		:
 		:
 		: "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)
 		: "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)
 		: "cc");
 		: "cc");
+#endif
 }
 }
 
 
+#define __sleep_cpu()				\
+do {						\
+	asm volatile(				\
+		"	bset	%1,(%0)\n"	\
+		"1:	btst	%1,(%0)\n"	\
+		"	bne	1b\n"		\
+		:				\
+		: "i"(&CPUM), "i"(CPUM_SLEEP)	\
+		: "cc"				\
+		);				\
+} while (0)
+
+static inline void arch_local_cli(void)
+{
+	asm volatile(
+		"	and	%0,epsw		\n"
+		"	nop			\n"
+		"	nop			\n"
+		"	nop			\n"
+		:
+		: "i"(~EPSW_IE)
+		: "memory"
+		);
+}
+
+static inline unsigned long arch_local_cli_save(void)
+{
+	unsigned long flags = arch_local_save_flags();
+	arch_local_cli();
+	return flags;
+}
+
+static inline void arch_local_sti(void)
+{
+	asm volatile(
+		"	or	%0,epsw		\n"
+		:
+		: "i"(EPSW_IE)
+		: "memory");
+}
+
+static inline void arch_local_change_intr_mask_level(unsigned long level)
+{
+	asm volatile(
+		"	and	%0,epsw		\n"
+		"	or	%1,epsw		\n"
+		:
+		: "i"(~EPSW_IM), "i"(EPSW_IE | level)
+		: "cc", "memory");
+}
+
+#else /* !__ASSEMBLY__ */
+
+#define LOCAL_SAVE_FLAGS(reg)			\
+	mov	epsw,reg
+
+#define LOCAL_IRQ_DISABLE				\
+	and	~EPSW_IM,epsw;				\
+	or	EPSW_IE|MN10300_CLI_LEVEL,epsw;		\
+	nop;						\
+	nop;						\
+	nop
+
+#define LOCAL_IRQ_ENABLE		\
+	or	EPSW_IE|EPSW_IM_7,epsw
+
+#define LOCAL_IRQ_RESTORE(reg)	\
+	mov	reg,epsw
+
+#define LOCAL_CLI_SAVE(reg)	\
+	mov	epsw,reg;	\
+	and	~EPSW_IE,epsw;	\
+	nop;			\
+	nop;			\
+	nop
+
+#define LOCAL_CLI		\
+	and	~EPSW_IE,epsw;	\
+	nop;			\
+	nop;			\
+	nop
+
+#define LOCAL_STI		\
+	or	EPSW_IE,epsw
+
+#define LOCAL_CHANGE_INTR_MASK_LEVEL(level)	\
+	and	~EPSW_IM,epsw;			\
+	or	EPSW_IE|(level),epsw
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_IRQFLAGS_H */
 #endif /* _ASM_IRQFLAGS_H */

+ 48 - 25
arch/mn10300/include/asm/mmu_context.h

@@ -27,28 +27,38 @@
 #include <asm/tlbflush.h>
 #include <asm/tlbflush.h>
 #include <asm-generic/mm_hooks.h>
 #include <asm-generic/mm_hooks.h>
 
 
+#define MMU_CONTEXT_TLBPID_NR		256
 #define MMU_CONTEXT_TLBPID_MASK		0x000000ffUL
 #define MMU_CONTEXT_TLBPID_MASK		0x000000ffUL
 #define MMU_CONTEXT_VERSION_MASK	0xffffff00UL
 #define MMU_CONTEXT_VERSION_MASK	0xffffff00UL
 #define MMU_CONTEXT_FIRST_VERSION	0x00000100UL
 #define MMU_CONTEXT_FIRST_VERSION	0x00000100UL
 #define MMU_NO_CONTEXT			0x00000000UL
 #define MMU_NO_CONTEXT			0x00000000UL
-
-extern unsigned long mmu_context_cache[NR_CPUS];
-#define mm_context(mm)	(mm->context.tlbpid[smp_processor_id()])
+#define MMU_CONTEXT_TLBPID_LOCK_NR	0
 
 
 #define enter_lazy_tlb(mm, tsk)	do {} while (0)
 #define enter_lazy_tlb(mm, tsk)	do {} while (0)
 
 
+static inline void cpu_ran_vm(int cpu, struct mm_struct *mm)
+{
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
-#define cpu_ran_vm(cpu, mm) \
-	cpumask_set_cpu((cpu), mm_cpumask(mm))
-#define cpu_maybe_ran_vm(cpu, mm) \
-	cpumask_test_and_set_cpu((cpu), mm_cpumask(mm))
+	cpumask_set_cpu(cpu, mm_cpumask(mm));
+#endif
+}
+
+static inline bool cpu_maybe_ran_vm(int cpu, struct mm_struct *mm)
+{
+#ifdef CONFIG_SMP
+	return cpumask_test_and_set_cpu(cpu, mm_cpumask(mm));
 #else
 #else
-#define cpu_ran_vm(cpu, mm)		do {} while (0)
-#define cpu_maybe_ran_vm(cpu, mm)	true
-#endif /* CONFIG_SMP */
+	return true;
+#endif
+}
 
 
-/*
- * allocate an MMU context
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+extern unsigned long mmu_context_cache[NR_CPUS];
+#define mm_context(mm)	(mm->context.tlbpid[smp_processor_id()])
+
+/**
+ * allocate_mmu_context - Allocate storage for the arch-specific MMU data
+ * @mm: The userspace VM context being set up
  */
  */
 static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
 static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
 {
 {
@@ -58,7 +68,7 @@ static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
 	if (!(mc & MMU_CONTEXT_TLBPID_MASK)) {
 	if (!(mc & MMU_CONTEXT_TLBPID_MASK)) {
 		/* we exhausted the TLB PIDs of this version on this CPU, so we
 		/* we exhausted the TLB PIDs of this version on this CPU, so we
 		 * flush this CPU's TLB in its entirety and start new cycle */
 		 * flush this CPU's TLB in its entirety and start new cycle */
-		flush_tlb_all();
+		local_flush_tlb_all();
 
 
 		/* fix the TLB version if needed (we avoid version #0 so as to
 		/* fix the TLB version if needed (we avoid version #0 so as to
 		 * distingush MMU_NO_CONTEXT) */
 		 * distingush MMU_NO_CONTEXT) */
@@ -100,23 +110,35 @@ static inline int init_new_context(struct task_struct *tsk,
 	return 0;
 	return 0;
 }
 }
 
 
-/*
- * destroy context related info for an mm_struct that is about to be put to
- * rest
- */
-#define destroy_context(mm)	do { } while (0)
-
 /*
 /*
  * after we have set current->mm to a new value, this activates the context for
  * after we have set current->mm to a new value, this activates the context for
  * the new mm so we see the new mappings.
  * the new mm so we see the new mappings.
  */
  */
-static inline void activate_context(struct mm_struct *mm, int cpu)
+static inline void activate_context(struct mm_struct *mm)
 {
 {
 	PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK;
 	PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK;
 }
 }
+#else  /* CONFIG_MN10300_TLB_USE_PIDR */
 
 
-/*
- * change between virtual memory sets
+#define init_new_context(tsk, mm)	(0)
+#define activate_context(mm)		local_flush_tlb()
+
+#endif /* CONFIG_MN10300_TLB_USE_PIDR */
+
+/**
+ * destroy_context - Destroy mm context information
+ * @mm: The MM being destroyed.
+ *
+ * Destroy context related info for an mm_struct that is about to be put to
+ * rest
+ */
+#define destroy_context(mm)	do {} while (0)
+
+/**
+ * switch_mm - Change between userspace virtual memory contexts
+ * @prev: The outgoing MM context.
+ * @next: The incoming MM context.
+ * @tsk: The incoming task.
  */
  */
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 			     struct task_struct *tsk)
 			     struct task_struct *tsk)
@@ -124,11 +146,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 	int cpu = smp_processor_id();
 	int cpu = smp_processor_id();
 
 
 	if (prev != next) {
 	if (prev != next) {
+#ifdef CONFIG_SMP
+		per_cpu(cpu_tlbstate, cpu).active_mm = next;
+#endif
 		cpu_ran_vm(cpu, next);
 		cpu_ran_vm(cpu, next);
-		activate_context(next, cpu);
 		PTBR = (unsigned long) next->pgd;
 		PTBR = (unsigned long) next->pgd;
-	} else if (!cpu_maybe_ran_vm(cpu, next)) {
-		activate_context(next, cpu);
+		activate_context(next);
 	}
 	}
 }
 }
 
 

+ 0 - 1
arch/mn10300/include/asm/pgalloc.h

@@ -11,7 +11,6 @@
 #ifndef _ASM_PGALLOC_H
 #ifndef _ASM_PGALLOC_H
 #define _ASM_PGALLOC_H
 #define _ASM_PGALLOC_H
 
 
-#include <asm/processor.h>
 #include <asm/page.h>
 #include <asm/page.h>
 #include <linux/threads.h>
 #include <linux/threads.h>
 #include <linux/mm.h>		/* for struct page */
 #include <linux/mm.h>		/* for struct page */

+ 52 - 36
arch/mn10300/include/asm/pgtable.h

@@ -90,46 +90,58 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  * The vmalloc() routines also leaves a hole of 4kB between each vmalloced
  * The vmalloc() routines also leaves a hole of 4kB between each vmalloced
  * area to catch addressing errors.
  * area to catch addressing errors.
  */
  */
+#ifndef __ASSEMBLY__
+#define VMALLOC_OFFSET	(8UL * 1024 * 1024)
+#define VMALLOC_START	(0x70000000UL)
+#define VMALLOC_END	(0x7C000000UL)
+#else
 #define VMALLOC_OFFSET	(8 * 1024 * 1024)
 #define VMALLOC_OFFSET	(8 * 1024 * 1024)
 #define VMALLOC_START	(0x70000000)
 #define VMALLOC_START	(0x70000000)
 #define VMALLOC_END	(0x7C000000)
 #define VMALLOC_END	(0x7C000000)
+#endif
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
 extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
 #endif
 #endif
 
 
-/* IPTEL/DPTEL bit assignments */
-#define _PAGE_BIT_VALID		xPTEL_V_BIT
-#define _PAGE_BIT_ACCESSED	xPTEL_UNUSED1_BIT	/* mustn't be loaded into IPTEL/DPTEL */
-#define _PAGE_BIT_NX		xPTEL_UNUSED2_BIT	/* mustn't be loaded into IPTEL/DPTEL */
-#define _PAGE_BIT_CACHE		xPTEL_C_BIT
-#define _PAGE_BIT_PRESENT	xPTEL_PV_BIT
-#define _PAGE_BIT_DIRTY		xPTEL_D_BIT
-#define _PAGE_BIT_GLOBAL	xPTEL_G_BIT
-
-#define _PAGE_VALID		xPTEL_V
-#define _PAGE_ACCESSED		xPTEL_UNUSED1
-#define _PAGE_NX		xPTEL_UNUSED2		/* no-execute bit */
-#define _PAGE_CACHE		xPTEL_C
-#define _PAGE_PRESENT		xPTEL_PV
-#define _PAGE_DIRTY		xPTEL_D
-#define _PAGE_PROT		xPTEL_PR
-#define _PAGE_PROT_RKNU		xPTEL_PR_ROK
-#define _PAGE_PROT_WKNU		xPTEL_PR_RWK
-#define _PAGE_PROT_RKRU		xPTEL_PR_ROK_ROU
-#define _PAGE_PROT_WKRU		xPTEL_PR_RWK_ROU
-#define _PAGE_PROT_WKWU		xPTEL_PR_RWK_RWU
-#define _PAGE_GLOBAL		xPTEL_G
-#define _PAGE_PSE		xPTEL_PS_4Mb		/* 4MB page */
-
-#define _PAGE_FILE		xPTEL_UNUSED1_BIT	/* set:pagecache unset:swap */
-
-#define __PAGE_PROT_UWAUX	0x040
-#define __PAGE_PROT_USER	0x080
-#define __PAGE_PROT_WRITE	0x100
+/* IPTEL2/DPTEL2 bit assignments */
+#define _PAGE_BIT_VALID		xPTEL2_V_BIT
+#define _PAGE_BIT_CACHE		xPTEL2_C_BIT
+#define _PAGE_BIT_PRESENT	xPTEL2_PV_BIT
+#define _PAGE_BIT_DIRTY		xPTEL2_D_BIT
+#define _PAGE_BIT_GLOBAL	xPTEL2_G_BIT
+#define _PAGE_BIT_ACCESSED	xPTEL2_UNUSED1_BIT	/* mustn't be loaded into IPTEL2/DPTEL2 */
+
+#define _PAGE_VALID		xPTEL2_V
+#define _PAGE_CACHE		xPTEL2_C
+#define _PAGE_PRESENT		xPTEL2_PV
+#define _PAGE_DIRTY		xPTEL2_D
+#define _PAGE_PROT		xPTEL2_PR
+#define _PAGE_PROT_RKNU		xPTEL2_PR_ROK
+#define _PAGE_PROT_WKNU		xPTEL2_PR_RWK
+#define _PAGE_PROT_RKRU		xPTEL2_PR_ROK_ROU
+#define _PAGE_PROT_WKRU		xPTEL2_PR_RWK_ROU
+#define _PAGE_PROT_WKWU		xPTEL2_PR_RWK_RWU
+#define _PAGE_GLOBAL		xPTEL2_G
+#define _PAGE_PS_MASK		xPTEL2_PS
+#define _PAGE_PS_4Kb		xPTEL2_PS_4Kb
+#define _PAGE_PS_128Kb		xPTEL2_PS_128Kb
+#define _PAGE_PS_1Kb		xPTEL2_PS_1Kb
+#define _PAGE_PS_4Mb		xPTEL2_PS_4Mb
+#define _PAGE_PSE		xPTEL2_PS_4Mb		/* 4MB page */
+#define _PAGE_CACHE_WT		xPTEL2_CWT
+#define _PAGE_ACCESSED		xPTEL2_UNUSED1
+#define _PAGE_NX		0			/* no-execute bit */
+
+/* If _PAGE_VALID is clear, we use these: */
+#define _PAGE_FILE		xPTEL2_C	/* set:pagecache unset:swap */
+#define _PAGE_PROTNONE		0x000		/* If not present */
+
+#define __PAGE_PROT_UWAUX	0x010
+#define __PAGE_PROT_USER	0x020
+#define __PAGE_PROT_WRITE	0x040
 
 
 #define _PAGE_PRESENTV		(_PAGE_PRESENT|_PAGE_VALID)
 #define _PAGE_PRESENTV		(_PAGE_PRESENT|_PAGE_VALID)
-#define _PAGE_PROTNONE		0x000	/* If not present */
 
 
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 
@@ -170,6 +182,9 @@ extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
 #define PAGE_KERNEL_LARGE	__pgprot(__PAGE_KERNEL_LARGE)
 #define PAGE_KERNEL_LARGE	__pgprot(__PAGE_KERNEL_LARGE)
 #define PAGE_KERNEL_LARGE_EXEC	__pgprot(__PAGE_KERNEL_LARGE_EXEC)
 #define PAGE_KERNEL_LARGE_EXEC	__pgprot(__PAGE_KERNEL_LARGE_EXEC)
 
 
+#define __PAGE_USERIO		(__PAGE_KERNEL_BASE | _PAGE_PROT_WKWU | _PAGE_NX)
+#define PAGE_USERIO		__pgprot(__PAGE_USERIO)
+
 /*
 /*
  * Whilst the MN10300 can do page protection for execute (given separate data
  * Whilst the MN10300 can do page protection for execute (given separate data
  * and insn TLBs), we are not supporting it at the moment. Write permission,
  * and insn TLBs), we are not supporting it at the moment. Write permission,
@@ -323,11 +338,7 @@ static inline int pte_exec_kernel(pte_t pte)
 	return 1;
 	return 1;
 }
 }
 
 
-/*
- * Bits 0 and 1 are taken, split up the 29 bits of offset
- * into this range:
- */
-#define PTE_FILE_MAX_BITS	29
+#define PTE_FILE_MAX_BITS	30
 
 
 #define pte_to_pgoff(pte)	(pte_val(pte) >> 2)
 #define pte_to_pgoff(pte)	(pte_val(pte) >> 2)
 #define pgoff_to_pte(off)	__pte((off) << 2 | _PAGE_FILE)
 #define pgoff_to_pte(off)	__pte((off) << 2 | _PAGE_FILE)
@@ -373,8 +384,13 @@ static inline void ptep_mkdirty(pte_t *ptep)
  * Macro to mark a page protection value as "uncacheable".  On processors which
  * Macro to mark a page protection value as "uncacheable".  On processors which
  * do not support it, this is a no-op.
  * do not support it, this is a no-op.
  */
  */
-#define pgprot_noncached(prot)	__pgprot(pgprot_val(prot) | _PAGE_CACHE)
+#define pgprot_noncached(prot)	__pgprot(pgprot_val(prot) & ~_PAGE_CACHE)
 
 
+/*
+ * Macro to mark a page protection value as "Write-Through".
+ * On processors which do not support it, this is a no-op.
+ */
+#define pgprot_through(prot)	__pgprot(pgprot_val(prot) | _PAGE_CACHE_WT)
 
 
 /*
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * Conversion functions: convert a page and protection to a page entry,

+ 39 - 27
arch/mn10300/include/asm/processor.h

@@ -13,10 +13,13 @@
 #ifndef _ASM_PROCESSOR_H
 #ifndef _ASM_PROCESSOR_H
 #define _ASM_PROCESSOR_H
 #define _ASM_PROCESSOR_H
 
 
+#include <linux/threads.h>
+#include <linux/thread_info.h>
 #include <asm/page.h>
 #include <asm/page.h>
 #include <asm/ptrace.h>
 #include <asm/ptrace.h>
 #include <asm/cpu-regs.h>
 #include <asm/cpu-regs.h>
-#include <linux/threads.h>
+#include <asm/uaccess.h>
+#include <asm/current.h>
 
 
 /* Forward declaration, a strange C thing */
 /* Forward declaration, a strange C thing */
 struct task_struct;
 struct task_struct;
@@ -33,6 +36,8 @@ struct mm_struct;
 	__pc;					\
 	__pc;					\
 })
 })
 
 
+extern void get_mem_info(unsigned long *mem_base, unsigned long *mem_size);
+
 extern void show_registers(struct pt_regs *regs);
 extern void show_registers(struct pt_regs *regs);
 
 
 /*
 /*
@@ -43,17 +48,22 @@ extern void show_registers(struct pt_regs *regs);
 
 
 struct mn10300_cpuinfo {
 struct mn10300_cpuinfo {
 	int		type;
 	int		type;
-	unsigned long	loops_per_sec;
+	unsigned long	loops_per_jiffy;
 	char		hard_math;
 	char		hard_math;
-	unsigned long	*pgd_quick;
-	unsigned long	*pte_quick;
-	unsigned long	pgtable_cache_sz;
 };
 };
 
 
 extern struct mn10300_cpuinfo boot_cpu_data;
 extern struct mn10300_cpuinfo boot_cpu_data;
 
 
+#ifdef CONFIG_SMP
+#if CONFIG_NR_CPUS < 2 || CONFIG_NR_CPUS > 8
+# error Sorry, NR_CPUS should be 2 to 8
+#endif
+extern struct mn10300_cpuinfo cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#else  /* CONFIG_SMP */
 #define cpu_data &boot_cpu_data
 #define cpu_data &boot_cpu_data
 #define current_cpu_data boot_cpu_data
 #define current_cpu_data boot_cpu_data
+#endif /* CONFIG_SMP */
 
 
 extern void identify_cpu(struct mn10300_cpuinfo *);
 extern void identify_cpu(struct mn10300_cpuinfo *);
 extern void print_cpu_info(struct mn10300_cpuinfo *);
 extern void print_cpu_info(struct mn10300_cpuinfo *);
@@ -76,10 +86,6 @@ extern void dodgy_tsc(void);
  */
  */
 #define TASK_UNMAPPED_BASE	0x30000000
 #define TASK_UNMAPPED_BASE	0x30000000
 
 
-typedef struct {
-	unsigned long	seg;
-} mm_segment_t;
-
 struct fpu_state_struct {
 struct fpu_state_struct {
 	unsigned long	fs[32];		/* fpu registers */
 	unsigned long	fs[32];		/* fpu registers */
 	unsigned long	fpcr;		/* fpu control register */
 	unsigned long	fpcr;		/* fpu control register */
@@ -92,20 +98,19 @@ struct thread_struct {
 	unsigned long		a3;		/* kernel FP */
 	unsigned long		a3;		/* kernel FP */
 	unsigned long		wchan;
 	unsigned long		wchan;
 	unsigned long		usp;
 	unsigned long		usp;
-	struct pt_regs		*__frame;
 	unsigned long		fpu_flags;
 	unsigned long		fpu_flags;
 #define THREAD_USING_FPU	0x00000001	/* T if this task is using the FPU */
 #define THREAD_USING_FPU	0x00000001	/* T if this task is using the FPU */
+#define THREAD_HAS_FPU		0x00000002	/* T if this task owns the FPU right now */
 	struct fpu_state_struct	fpu_state;
 	struct fpu_state_struct	fpu_state;
 };
 };
 
 
-#define INIT_THREAD				\
-{						\
-	.uregs		= init_uregs,		\
-	.pc		= 0,			\
-	.sp		= 0,			\
-	.a3		= 0,			\
-	.wchan		= 0,			\
-	.__frame	= NULL,			\
+#define INIT_THREAD		\
+{				\
+	.uregs	= init_uregs,	\
+	.pc	= 0,		\
+	.sp	= 0,		\
+	.a3	= 0,		\
+	.wchan	= 0,		\
 }
 }
 
 
 #define INIT_MMAP \
 #define INIT_MMAP \
@@ -117,13 +122,20 @@ struct thread_struct {
  * - need to discard the frame stacked by the kernel thread invoking the execve
  * - need to discard the frame stacked by the kernel thread invoking the execve
  *   syscall (see RESTORE_ALL macro)
  *   syscall (see RESTORE_ALL macro)
  */
  */
-#define start_thread(regs, new_pc, new_sp) do {		\
-	set_fs(USER_DS);				\
-	__frame = current->thread.uregs;		\
-	__frame->epsw = EPSW_nSL | EPSW_IE | EPSW_IM;	\
-	__frame->pc = new_pc;				\
-	__frame->sp = new_sp;				\
-} while (0)
+static inline void start_thread(struct pt_regs *regs,
+				unsigned long new_pc, unsigned long new_sp)
+{
+	struct thread_info *ti = current_thread_info();
+	struct pt_regs *frame0;
+	set_fs(USER_DS);
+
+	frame0 = thread_info_to_uregs(ti);
+	frame0->epsw = EPSW_nSL | EPSW_IE | EPSW_IM;
+	frame0->pc = new_pc;
+	frame0->sp = new_sp;
+	ti->frame = frame0;
+}
+
 
 
 /* Free all resources held by a thread. */
 /* Free all resources held by a thread. */
 extern void release_thread(struct task_struct *);
 extern void release_thread(struct task_struct *);
@@ -157,7 +169,7 @@ unsigned long get_wchan(struct task_struct *p);
 
 
 static inline void prefetch(const void *x)
 static inline void prefetch(const void *x)
 {
 {
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
 #ifdef CONFIG_MN10300_PROC_MN103E010
 #ifdef CONFIG_MN10300_PROC_MN103E010
 	asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
 	asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
 #else
 #else
@@ -168,7 +180,7 @@ static inline void prefetch(const void *x)
 
 
 static inline void prefetchw(const void *x)
 static inline void prefetchw(const void *x)
 {
 {
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
 #ifdef CONFIG_MN10300_PROC_MN103E010
 #ifdef CONFIG_MN10300_PROC_MN103E010
 	asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
 	asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
 #else
 #else

+ 2 - 11
arch/mn10300/include/asm/ptrace.h

@@ -40,7 +40,6 @@
 #define	PT_PC		26
 #define	PT_PC		26
 #define NR_PTREGS	27
 #define NR_PTREGS	27
 
 
-#ifndef __ASSEMBLY__
 /*
 /*
  * This defines the way registers are stored in the event of an exception
  * This defines the way registers are stored in the event of an exception
  * - the strange order is due to the MOVM instruction
  * - the strange order is due to the MOVM instruction
@@ -75,7 +74,6 @@ struct pt_regs {
 	unsigned long		epsw;
 	unsigned long		epsw;
 	unsigned long		pc;
 	unsigned long		pc;
 };
 };
-#endif
 
 
 /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
 /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
 #define PTRACE_GETREGS            12
 #define PTRACE_GETREGS            12
@@ -86,12 +84,7 @@ struct pt_regs {
 /* options set using PTRACE_SETOPTIONS */
 /* options set using PTRACE_SETOPTIONS */
 #define PTRACE_O_TRACESYSGOOD     0x00000001
 #define PTRACE_O_TRACESYSGOOD     0x00000001
 
 
-#if defined(__KERNEL__)
-
-extern struct pt_regs *__frame;		/* current frame pointer */
-
-#if !defined(__ASSEMBLY__)
-struct task_struct;
+#ifdef __KERNEL__
 
 
 #define user_mode(regs)			(((regs)->epsw & EPSW_nSL) == EPSW_nSL)
 #define user_mode(regs)			(((regs)->epsw & EPSW_nSL) == EPSW_nSL)
 #define instruction_pointer(regs)	((regs)->pc)
 #define instruction_pointer(regs)	((regs)->pc)
@@ -100,9 +93,7 @@ extern void show_regs(struct pt_regs *);
 
 
 #define arch_has_single_step()	(1)
 #define arch_has_single_step()	(1)
 
 
-#endif  /*  !__ASSEMBLY  */
-
 #define profile_pc(regs) ((regs)->pc)
 #define profile_pc(regs) ((regs)->pc)
 
 
-#endif  /*  __KERNEL__  */
+#endif /* __KERNEL__  */
 #endif /* _ASM_PTRACE_H */
 #endif /* _ASM_PTRACE_H */

+ 1 - 1
arch/mn10300/include/asm/reset-regs.h

@@ -50,7 +50,7 @@ static inline void mn10300_proc_hard_reset(void)
 	RSTCTR |= RSTCTR_CHIPRST;
 	RSTCTR |= RSTCTR_CHIPRST;
 }
 }
 
 
-extern unsigned int watchdog_alert_counter;
+extern unsigned int watchdog_alert_counter[];
 
 
 extern void watchdog_go(void);
 extern void watchdog_go(void);
 extern asmlinkage void watchdog_handler(void);
 extern asmlinkage void watchdog_handler(void);

+ 0 - 11
arch/mn10300/include/asm/rtc.h

@@ -15,25 +15,14 @@
 
 
 #include <linux/init.h>
 #include <linux/init.h>
 
 
-extern void check_rtc_time(void);
 extern void __init calibrate_clock(void);
 extern void __init calibrate_clock(void);
-extern unsigned long __init get_initial_rtc_time(void);
 
 
 #else /* !CONFIG_MN10300_RTC */
 #else /* !CONFIG_MN10300_RTC */
 
 
-static inline void check_rtc_time(void)
-{
-}
-
 static inline void calibrate_clock(void)
 static inline void calibrate_clock(void)
 {
 {
 }
 }
 
 
-static inline unsigned long get_initial_rtc_time(void)
-{
-	return 0;
-}
-
 #endif /* !CONFIG_MN10300_RTC */
 #endif /* !CONFIG_MN10300_RTC */
 
 
 #include <asm-generic/rtc.h>
 #include <asm-generic/rtc.h>

+ 125 - 0
arch/mn10300/include/asm/rwlock.h

@@ -0,0 +1,125 @@
+/*
+ * Helpers used by both rw spinlocks and rw semaphores.
+ *
+ * Based in part on code from semaphore.h and
+ * spinlock.h Copyright 1996 Linus Torvalds.
+ *
+ * Copyright 1999 Red Hat, Inc.
+ *
+ * Written by Benjamin LaHaise.
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 13-Nov-2006 MEI Temporarily delete lock functions for SMP support.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+#ifndef _ASM_RWLOCK_H
+#define _ASM_RWLOCK_H
+
+#define RW_LOCK_BIAS		 0x01000000
+
+#ifndef CONFIG_SMP
+
+typedef struct { unsigned long a[100]; } __dummy_lock_t;
+#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
+
+#define RW_LOCK_BIAS_STR	"0x01000000"
+
+#define __build_read_lock_ptr(rw, helper)				\
+	do {								\
+		asm volatile(						\
+			"	mov	(%0),d3			\n"	\
+			"	sub	1,d3			\n"	\
+			"	mov	d3,(%0)			\n"	\
+			"	blt	1f			\n"	\
+			"	bra	2f			\n"	\
+			"1:	jmp	3f			\n"	\
+			"2:					\n"	\
+			"	.section .text.lock,\"ax\"	\n"	\
+			"3:	call	"helper"[],0		\n"	\
+			"	jmp	2b			\n"	\
+			"	.previous"				\
+			:						\
+			: "d" (rw)					\
+			: "memory", "d3", "cc");			\
+	} while (0)
+
+#define __build_read_lock_const(rw, helper)				\
+	do {								\
+		asm volatile(						\
+			"	mov	(%0),d3			\n"	\
+			"	sub	1,d3			\n"	\
+			"	mov	d3,(%0)			\n"	\
+			"	blt	1f			\n"	\
+			"	bra	2f			\n"	\
+			"1:	jmp	3f			\n"	\
+			"2:					\n"	\
+			"	.section .text.lock,\"ax\"	\n"	\
+			"3:	call	"helper"[],0		\n"	\
+			"	jmp	2b			\n"	\
+			"	.previous"				\
+			:						\
+			: "d" (rw)					\
+			: "memory", "d3", "cc");			\
+	} while (0)
+
+#define __build_read_lock(rw, helper) \
+	do {								\
+		if (__builtin_constant_p(rw))				\
+			__build_read_lock_const(rw, helper);		\
+		else							\
+			__build_read_lock_ptr(rw, helper);		\
+	} while (0)
+
+#define __build_write_lock_ptr(rw, helper)				\
+	do {								\
+		asm volatile(						\
+			"	mov	(%0),d3			\n"	\
+			"	sub	1,d3			\n"	\
+			"	mov	d3,(%0)			\n"	\
+			"	blt	1f			\n"	\
+			"	bra	2f			\n"	\
+			"1:	jmp	3f			\n"	\
+			"2:					\n"	\
+			"	.section .text.lock,\"ax\"	\n"	\
+			"3:	call	"helper"[],0		\n"	\
+			"	jmp	2b			\n"	\
+			"	.previous"				\
+			:						\
+			: "d" (rw)					\
+			: "memory", "d3", "cc");			\
+	} while (0)
+
+#define __build_write_lock_const(rw, helper)				\
+	do {								\
+		asm volatile(						\
+			"	mov	(%0),d3			\n"	\
+			"	sub	1,d3			\n"	\
+			"	mov	d3,(%0)			\n"	\
+			"	blt	1f			\n"	\
+			"	bra	2f			\n"	\
+			"1:	jmp	3f			\n"	\
+			"2:					\n"	\
+			"	.section .text.lock,\"ax\"	\n"	\
+			"3:	call	"helper"[],0		\n"	\
+			"	jmp	2b			\n"	\
+			"	.previous"				\
+			:						\
+			: "d" (rw)					\
+			: "memory", "d3", "cc");			\
+	} while (0)
+
+#define __build_write_lock(rw, helper)					\
+	do {								\
+		if (__builtin_constant_p(rw))				\
+			__build_write_lock_const(rw, helper);		\
+		else							\
+			__build_write_lock_ptr(rw, helper);		\
+	} while (0)
+
+#endif /* CONFIG_SMP */
+#endif /* _ASM_RWLOCK_H */

+ 41 - 10
arch/mn10300/include/asm/serial-regs.h

@@ -20,18 +20,25 @@
 /* serial port 0 */
 /* serial port 0 */
 #define	SC0CTR			__SYSREG(0xd4002000, u16)	/* control reg */
 #define	SC0CTR			__SYSREG(0xd4002000, u16)	/* control reg */
 #define	SC01CTR_CK		0x0007	/* clock source select */
 #define	SC01CTR_CK		0x0007	/* clock source select */
-#define	SC0CTR_CK_TM8UFLOW_8	0x0000	/* - 1/8 timer 8 underflow (serial port 0 only) */
-#define	SC1CTR_CK_TM9UFLOW_8	0x0000	/* - 1/8 timer 9 underflow (serial port 1 only) */
 #define	SC01CTR_CK_IOCLK_8	0x0001	/* - 1/8 IOCLK */
 #define	SC01CTR_CK_IOCLK_8	0x0001	/* - 1/8 IOCLK */
 #define	SC01CTR_CK_IOCLK_32	0x0002	/* - 1/32 IOCLK */
 #define	SC01CTR_CK_IOCLK_32	0x0002	/* - 1/32 IOCLK */
+#define	SC01CTR_CK_EXTERN_8	0x0006	/* - 1/8 external closk */
+#define	SC01CTR_CK_EXTERN	0x0007	/* - external closk */
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+#define	SC0CTR_CK_TM8UFLOW_8	0x0000	/* - 1/8 timer 8 underflow (serial port 0 only) */
 #define	SC0CTR_CK_TM2UFLOW_2	0x0003	/* - 1/2 timer 2 underflow (serial port 0 only) */
 #define	SC0CTR_CK_TM2UFLOW_2	0x0003	/* - 1/2 timer 2 underflow (serial port 0 only) */
-#define	SC1CTR_CK_TM3UFLOW_2	0x0003	/* - 1/2 timer 3 underflow (serial port 1 only) */
-#define	SC0CTR_CK_TM0UFLOW_8	0x0004	/* - 1/8 timer 1 underflow (serial port 0 only) */
-#define	SC1CTR_CK_TM1UFLOW_8	0x0004	/* - 1/8 timer 2 underflow (serial port 1 only) */
+#define	SC0CTR_CK_TM0UFLOW_8	0x0004	/* - 1/8 timer 0 underflow (serial port 0 only) */
 #define	SC0CTR_CK_TM2UFLOW_8	0x0005	/* - 1/8 timer 2 underflow (serial port 0 only) */
 #define	SC0CTR_CK_TM2UFLOW_8	0x0005	/* - 1/8 timer 2 underflow (serial port 0 only) */
+#define	SC1CTR_CK_TM9UFLOW_8	0x0000	/* - 1/8 timer 9 underflow (serial port 1 only) */
+#define	SC1CTR_CK_TM3UFLOW_2	0x0003	/* - 1/2 timer 3 underflow (serial port 1 only) */
+#define	SC1CTR_CK_TM1UFLOW_8	0x0004	/* - 1/8 timer 1 underflow (serial port 1 only) */
 #define	SC1CTR_CK_TM3UFLOW_8	0x0005	/* - 1/8 timer 3 underflow (serial port 1 only) */
 #define	SC1CTR_CK_TM3UFLOW_8	0x0005	/* - 1/8 timer 3 underflow (serial port 1 only) */
-#define	SC01CTR_CK_EXTERN_8	0x0006	/* - 1/8 external closk */
-#define	SC01CTR_CK_EXTERN	0x0007	/* - external closk */
+#else  /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+#define	SC0CTR_CK_TM8UFLOW_8	0x0000	/* - 1/8 timer 8 underflow (serial port 0 only) */
+#define	SC0CTR_CK_TM0UFLOW_8	0x0004	/* - 1/8 timer 0 underflow (serial port 0 only) */
+#define	SC0CTR_CK_TM2UFLOW_8	0x0005	/* - 1/8 timer 2 underflow (serial port 0 only) */
+#define	SC1CTR_CK_TM12UFLOW_8	0x0000	/* - 1/8 timer 12 underflow (serial port 1 only) */
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
 #define	SC01CTR_STB		0x0008	/* stop bit select */
 #define	SC01CTR_STB		0x0008	/* stop bit select */
 #define	SC01CTR_STB_1BIT	0x0000	/* - 1 stop bit */
 #define	SC01CTR_STB_1BIT	0x0000	/* - 1 stop bit */
 #define	SC01CTR_STB_2BIT	0x0008	/* - 2 stop bits */
 #define	SC01CTR_STB_2BIT	0x0008	/* - 2 stop bits */
@@ -100,11 +107,23 @@
 
 
 /* serial port 2 */
 /* serial port 2 */
 #define	SC2CTR			__SYSREG(0xd4002020, u16)	/* control reg */
 #define	SC2CTR			__SYSREG(0xd4002020, u16)	/* control reg */
+#ifdef CONFIG_AM33_2
 #define	SC2CTR_CK		0x0003	/* clock source select */
 #define	SC2CTR_CK		0x0003	/* clock source select */
 #define	SC2CTR_CK_TM10UFLOW	0x0000	/* - timer 10 underflow */
 #define	SC2CTR_CK_TM10UFLOW	0x0000	/* - timer 10 underflow */
 #define	SC2CTR_CK_TM2UFLOW	0x0001	/* - timer 2 underflow */
 #define	SC2CTR_CK_TM2UFLOW	0x0001	/* - timer 2 underflow */
 #define	SC2CTR_CK_EXTERN	0x0002	/* - external closk */
 #define	SC2CTR_CK_EXTERN	0x0002	/* - external closk */
 #define	SC2CTR_CK_TM3UFLOW	0x0003	/* - timer 3 underflow */
 #define	SC2CTR_CK_TM3UFLOW	0x0003	/* - timer 3 underflow */
+#else  /* CONFIG_AM33_2 */
+#define	SC2CTR_CK		0x0007	/* clock source select */
+#define	SC2CTR_CK_TM9UFLOW_8	0x0000	/* - 1/8 timer 9 underflow */
+#define	SC2CTR_CK_IOCLK_8	0x0001	/* - 1/8 IOCLK */
+#define	SC2CTR_CK_IOCLK_32	0x0002	/* - 1/32 IOCLK */
+#define	SC2CTR_CK_TM3UFLOW_2	0x0003	/* - 1/2 timer 3 underflow */
+#define	SC2CTR_CK_TM1UFLOW_8	0x0004	/* - 1/8 timer 1 underflow */
+#define	SC2CTR_CK_TM3UFLOW_8	0x0005	/* - 1/8 timer 3 underflow */
+#define	SC2CTR_CK_EXTERN_8	0x0006	/* - 1/8 external closk */
+#define	SC2CTR_CK_EXTERN	0x0007	/* - external closk */
+#endif /* CONFIG_AM33_2 */
 #define	SC2CTR_STB		0x0008	/* stop bit select */
 #define	SC2CTR_STB		0x0008	/* stop bit select */
 #define	SC2CTR_STB_1BIT		0x0000	/* - 1 stop bit */
 #define	SC2CTR_STB_1BIT		0x0000	/* - 1 stop bit */
 #define	SC2CTR_STB_2BIT		0x0008	/* - 2 stop bits */
 #define	SC2CTR_STB_2BIT		0x0008	/* - 2 stop bits */
@@ -134,9 +153,14 @@
 #define SC2ICR_RES		0x04	/* receive error select */
 #define SC2ICR_RES		0x04	/* receive error select */
 #define SC2ICR_RI		0x01	/* receive interrupt cause */
 #define SC2ICR_RI		0x01	/* receive interrupt cause */
 
 
-#define	SC2TXB			__SYSREG(0xd4002018, u8)	/* transmit buffer reg */
-#define	SC2RXB			__SYSREG(0xd4002019, u8)	/* receive buffer reg */
-#define	SC2STR			__SYSREG(0xd400201c, u8)	/* status reg */
+#define	SC2TXB			__SYSREG(0xd4002028, u8)	/* transmit buffer reg */
+#define	SC2RXB			__SYSREG(0xd4002029, u8)	/* receive buffer reg */
+
+#ifdef CONFIG_AM33_2
+#define	SC2STR			__SYSREG(0xd400202c, u8)	/* status reg */
+#else  /* CONFIG_AM33_2 */
+#define	SC2STR			__SYSREG(0xd400202c, u16)	/* status reg */
+#endif /* CONFIG_AM33_2 */
 #define SC2STR_OEF		0x0001	/* overrun error found */
 #define SC2STR_OEF		0x0001	/* overrun error found */
 #define SC2STR_PEF		0x0002	/* parity error found */
 #define SC2STR_PEF		0x0002	/* parity error found */
 #define SC2STR_FEF		0x0004	/* framing error found */
 #define SC2STR_FEF		0x0004	/* framing error found */
@@ -146,10 +170,17 @@
 #define SC2STR_RXF		0x0040	/* receive status */
 #define SC2STR_RXF		0x0040	/* receive status */
 #define SC2STR_TXF		0x0080	/* transmit status */
 #define SC2STR_TXF		0x0080	/* transmit status */
 
 
+#ifdef CONFIG_AM33_2
 #define	SC2TIM			__SYSREG(0xd400202d, u8)	/* status reg */
 #define	SC2TIM			__SYSREG(0xd400202d, u8)	/* status reg */
+#endif
 
 
+#ifdef CONFIG_AM33_2
 #define SC2RXIRQ		24	/* serial 2 Receive IRQ */
 #define SC2RXIRQ		24	/* serial 2 Receive IRQ */
 #define SC2TXIRQ		25	/* serial 2 Transmit IRQ */
 #define SC2TXIRQ		25	/* serial 2 Transmit IRQ */
+#else  /* CONFIG_AM33_2 */
+#define SC2RXIRQ		68	/* serial 2 Receive IRQ */
+#define SC2TXIRQ		69	/* serial 2 Transmit IRQ */
+#endif /* CONFIG_AM33_2 */
 
 
 #define	SC2RXICR		GxICR(SC2RXIRQ)	/* serial 2 receive intr ctrl reg */
 #define	SC2RXICR		GxICR(SC2RXIRQ)	/* serial 2 receive intr ctrl reg */
 #define	SC2TXICR		GxICR(SC2TXIRQ)	/* serial 2 transmit intr ctrl reg */
 #define	SC2TXICR		GxICR(SC2TXIRQ)	/* serial 2 transmit intr ctrl reg */

+ 4 - 4
arch/mn10300/include/asm/serial.h

@@ -9,10 +9,8 @@
  * 2 of the Licence, or (at your option) any later version.
  * 2 of the Licence, or (at your option) any later version.
  */
  */
 
 
-/*
- * The ASB2305 has an 18.432 MHz clock the UART
- */
-#define BASE_BAUD	(18432000 / 16)
+#ifndef _ASM_SERIAL_H
+#define _ASM_SERIAL_H
 
 
 /* Standard COM flags (except for COM4, because of the 8514 problem) */
 /* Standard COM flags (except for COM4, because of the 8514 problem) */
 #ifdef CONFIG_SERIAL_DETECT_IRQ
 #ifdef CONFIG_SERIAL_DETECT_IRQ
@@ -34,3 +32,5 @@
 #endif
 #endif
 
 
 #include <unit/serial.h>
 #include <unit/serial.h>
+
+#endif /* _ASM_SERIAL_H */

+ 89 - 2
arch/mn10300/include/asm/smp.h

@@ -3,6 +3,16 @@
  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  * Written by David Howells (dhowells@redhat.com)
  *
  *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ *  13-Nov-2006 MEI Define IPI-IRQ number and add inline/macro function
+ *                  for SMP support.
+ *  22-Jan-2007 MEI Add the define related to SMP_BOOT_IRQ.
+ *  23-Feb-2007 MEI Add the define related to SMP icahce invalidate.
+ *  23-Jun-2008 MEI Delete INTC_IPI.
+ *  22-Jul-2008 MEI Add smp_nmi_call_function and related defines.
+ *  04-Aug-2008 MEI Delete USE_DOIRQ_CACHE_IPI.
+ *
  * This program is free software; you can redistribute it and/or
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public Licence
  * modify it under the terms of the GNU General Public Licence
  * as published by the Free Software Foundation; either version
  * as published by the Free Software Foundation; either version
@@ -11,8 +21,85 @@
 #ifndef _ASM_SMP_H
 #ifndef _ASM_SMP_H
 #define _ASM_SMP_H
 #define _ASM_SMP_H
 
 
-#ifdef CONFIG_SMP
-#error SMP not yet supported for MN10300
+#ifndef __ASSEMBLY__
+#include <linux/threads.h>
+#include <linux/cpumask.h>
 #endif
 #endif
 
 
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+
+#define RESCHEDULE_IPI		63
+#define CALL_FUNC_SINGLE_IPI	192
+#define LOCAL_TIMER_IPI		193
+#define FLUSH_CACHE_IPI		194
+#define CALL_FUNCTION_NMI_IPI	195
+#define GDB_NMI_IPI		196
+
+#define SMP_BOOT_IRQ		195
+
+#define RESCHEDULE_GxICR_LV	GxICR_LEVEL_6
+#define CALL_FUNCTION_GxICR_LV	GxICR_LEVEL_4
+#define LOCAL_TIMER_GxICR_LV	GxICR_LEVEL_4
+#define FLUSH_CACHE_GxICR_LV	GxICR_LEVEL_0
+#define SMP_BOOT_GxICR_LV	GxICR_LEVEL_0
+
+#define TIME_OUT_COUNT_BOOT_IPI	100
+#define DELAY_TIME_BOOT_IPI	75000
+
+
+#ifndef __ASSEMBLY__
+
+/**
+ * raw_smp_processor_id - Determine the raw CPU ID of the CPU running it
+ *
+ * What we really want to do is to use the CPUID hardware CPU register to get
+ * this information, but accesses to that aren't cached, and run at system bus
+ * speed, not CPU speed.  A copy of this value is, however, stored in the
+ * thread_info struct, and that can be cached.
+ *
+ * An alternate way of dealing with this could be to use the EPSW.S bits to
+ * cache this information for systems with up to four CPUs.
+ */
+#if 0
+#define raw_smp_processor_id()	(CPUID)
+#else
+#define raw_smp_processor_id()	(current_thread_info()->cpu)
 #endif
 #endif
+
+static inline int cpu_logical_map(int cpu)
+{
+	return cpu;
+}
+
+static inline int cpu_number_map(int cpu)
+{
+	return cpu;
+}
+
+
+extern cpumask_t cpu_boot_map;
+
+extern void smp_init_cpus(void);
+extern void smp_cache_interrupt(void);
+extern void send_IPI_allbutself(int irq);
+extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait);
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern int __cpu_disable(void);
+extern void __cpu_die(unsigned int cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#endif /* __ASSEMBLY__ */
+#else /* CONFIG_SMP */
+#ifndef __ASSEMBLY__
+
+static inline void smp_init_cpus(void) {}
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_SMP_H */

+ 1 - 0
arch/mn10300/include/asm/smsc911x.h

@@ -0,0 +1 @@
+#include <unit/smsc911x.h>

+ 178 - 1
arch/mn10300/include/asm/spinlock.h

@@ -11,6 +11,183 @@
 #ifndef _ASM_SPINLOCK_H
 #ifndef _ASM_SPINLOCK_H
 #define _ASM_SPINLOCK_H
 #define _ASM_SPINLOCK_H
 
 
-#error SMP spinlocks not implemented for MN10300
+#include <asm/atomic.h>
+#include <asm/rwlock.h>
+#include <asm/page.h>
 
 
+/*
+ * Simple spin lock operations.  There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+#define arch_spin_is_locked(x)	(*(volatile signed char *)(&(x)->slock) != 0)
+#define arch_spin_unlock_wait(x) do { barrier(); } while (arch_spin_is_locked(x))
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+	asm volatile(
+		"	bclr	1,(0,%0)	\n"
+		:
+		: "a"(&lock->slock)
+		: "memory", "cc");
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+	int ret;
+
+	asm volatile(
+		"	mov	1,%0		\n"
+		"	bset	%0,(%1)		\n"
+		"	bne	1f		\n"
+		"	clr	%0		\n"
+		"1:	xor	1,%0		\n"
+		: "=d"(ret)
+		: "a"(&lock->slock)
+		: "memory", "cc");
+
+	return ret;
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+	asm volatile(
+		"1:	bset	1,(0,%0)	\n"
+		"	bne	1b		\n"
+		:
+		: "a"(&lock->slock)
+		: "memory", "cc");
+}
+
+static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
+					 unsigned long flags)
+{
+	int temp;
+
+	asm volatile(
+		"1:	bset	1,(0,%2)	\n"
+		"	beq	3f		\n"
+		"	mov	%1,epsw		\n"
+		"2:	mov	(0,%2),%0	\n"
+		"	or	%0,%0		\n"
+		"	bne	2b		\n"
+		"	mov	%3,%0		\n"
+		"	mov	%0,epsw		\n"
+		"	nop			\n"
+		"	nop			\n"
+		"	bra	1b\n"
+		"3:				\n"
+		: "=&d" (temp)
+		: "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
+		: "memory", "cc");
+}
+
+#ifdef __KERNEL__
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+
+/**
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_read_can_lock(x) ((int)(x)->lock > 0)
+
+/**
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+
+/*
+ * On mn10300, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "contended" bit.
+ */
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+	__build_read_lock(rw, "__read_lock_failed");
+#else
+	{
+		atomic_t *count = (atomic_t *)rw;
+		while (atomic_dec_return(count) < 0)
+			atomic_inc(count);
+	}
+#endif
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+	__build_write_lock(rw, "__write_lock_failed");
+#else
+	{
+		atomic_t *count = (atomic_t *)rw;
+		while (!atomic_sub_and_test(RW_LOCK_BIAS, count))
+			atomic_add(RW_LOCK_BIAS, count);
+	}
+#endif
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+	__build_read_unlock(rw);
+#else
+	{
+		atomic_t *count = (atomic_t *)rw;
+		atomic_inc(count);
+	}
+#endif
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+	__build_write_unlock(rw);
+#else
+	{
+		atomic_t *count = (atomic_t *)rw;
+		atomic_add(RW_LOCK_BIAS, count);
+	}
+#endif
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *lock)
+{
+	atomic_t *count = (atomic_t *)lock;
+	atomic_dec(count);
+	if (atomic_read(count) >= 0)
+		return 1;
+	atomic_inc(count);
+	return 0;
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *lock)
+{
+	atomic_t *count = (atomic_t *)lock;
+	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
+		return 1;
+	atomic_add(RW_LOCK_BIAS, count);
+	return 0;
+}
+
+#define arch_read_lock_flags(lock, flags)  arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define _raw_spin_relax(lock)	cpu_relax()
+#define _raw_read_relax(lock)	cpu_relax()
+#define _raw_write_relax(lock)	cpu_relax()
+
+#endif /* __KERNEL__ */
 #endif /* _ASM_SPINLOCK_H */
 #endif /* _ASM_SPINLOCK_H */

+ 20 - 0
arch/mn10300/include/asm/spinlock_types.h

@@ -0,0 +1,20 @@
+#ifndef _ASM_SPINLOCK_TYPES_H
+#define _ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct arch_spinlock {
+	unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED	{ 0 }
+
+typedef struct {
+	unsigned int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED		{ RW_LOCK_BIAS }
+
+#endif /* _ASM_SPINLOCK_TYPES_H */

+ 22 - 51
arch/mn10300/include/asm/system.h

@@ -12,12 +12,29 @@
 #define _ASM_SYSTEM_H
 #define _ASM_SYSTEM_H
 
 
 #include <asm/cpu-regs.h>
 #include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
 
 
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/irqflags.h>
 #include <linux/irqflags.h>
+#include <asm/atomic.h>
+
+#if !defined(CONFIG_LAZY_SAVE_FPU)
+struct fpu_state_struct;
+extern asmlinkage void fpu_save(struct fpu_state_struct *);
+#define switch_fpu(prev, next)						\
+	do {								\
+		if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) {	\
+			(prev)->thread.fpu_flags &= ~THREAD_HAS_FPU;	\
+			(prev)->thread.uregs->epsw &= ~EPSW_FE;		\
+			fpu_save(&(prev)->thread.fpu_state);		\
+		}							\
+	} while (0)
+#else
+#define switch_fpu(prev, next) do {} while (0)
+#endif
 
 
 struct task_struct;
 struct task_struct;
 struct thread_struct;
 struct thread_struct;
@@ -30,6 +47,7 @@ struct task_struct *__switch_to(struct thread_struct *prev,
 /* context switching is now performed out-of-line in switch_to.S */
 /* context switching is now performed out-of-line in switch_to.S */
 #define switch_to(prev, next, last)					\
 #define switch_to(prev, next, last)					\
 do {									\
 do {									\
+	switch_fpu(prev, next);						\
 	current->thread.wchan = (u_long) __builtin_return_address(0);	\
 	current->thread.wchan = (u_long) __builtin_return_address(0);	\
 	(last) = __switch_to(&(prev)->thread, &(next)->thread, (prev));	\
 	(last) = __switch_to(&(prev)->thread, &(next)->thread, (prev));	\
 	mb();								\
 	mb();								\
@@ -40,8 +58,6 @@ do {									\
 
 
 #define nop() asm volatile ("nop")
 #define nop() asm volatile ("nop")
 
 
-#endif /* !__ASSEMBLY__ */
-
 /*
 /*
  * Force strict CPU ordering.
  * Force strict CPU ordering.
  * And yes, this is required on UP too when we're talking
  * And yes, this is required on UP too when we're talking
@@ -68,64 +84,19 @@ do {									\
 #define smp_mb()	mb()
 #define smp_mb()	mb()
 #define smp_rmb()	rmb()
 #define smp_rmb()	rmb()
 #define smp_wmb()	wmb()
 #define smp_wmb()	wmb()
-#else
+#define set_mb(var, value)  do { xchg(&var, value); } while (0)
+#else  /* CONFIG_SMP */
 #define smp_mb()	barrier()
 #define smp_mb()	barrier()
 #define smp_rmb()	barrier()
 #define smp_rmb()	barrier()
 #define smp_wmb()	barrier()
 #define smp_wmb()	barrier()
-#endif
-
 #define set_mb(var, value)  do { var = value;  mb(); } while (0)
 #define set_mb(var, value)  do { var = value;  mb(); } while (0)
+#endif /* CONFIG_SMP */
+
 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
 
 
 #define read_barrier_depends()		do {} while (0)
 #define read_barrier_depends()		do {} while (0)
 #define smp_read_barrier_depends()	do {} while (0)
 #define smp_read_barrier_depends()	do {} while (0)
 
 
-/*****************************************************************************/
-/*
- * MN10300 doesn't actually have an exchange instruction
- */
-#ifndef __ASSEMBLY__
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((struct __xchg_dummy *)(x))
-
-static inline
-unsigned long __xchg(volatile unsigned long *m, unsigned long val)
-{
-	unsigned long retval;
-	unsigned long flags;
-
-	local_irq_save(flags);
-	retval = *m;
-	*m = val;
-	local_irq_restore(flags);
-	return retval;
-}
-
-#define xchg(ptr, v)						\
-	((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr),	\
-				     (unsigned long)(v)))
-
-static inline unsigned long __cmpxchg(volatile unsigned long *m,
-				      unsigned long old, unsigned long new)
-{
-	unsigned long retval;
-	unsigned long flags;
-
-	local_irq_save(flags);
-	retval = *m;
-	if (retval == old)
-		*m = new;
-	local_irq_restore(flags);
-	return retval;
-}
-
-#define cmpxchg(ptr, o, n)					\
-	((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
-					(unsigned long)(o),	\
-					(unsigned long)(n)))
-
 #endif /* !__ASSEMBLY__ */
 #endif /* !__ASSEMBLY__ */
-
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 #endif /* _ASM_SYSTEM_H */
 #endif /* _ASM_SYSTEM_H */

+ 14 - 4
arch/mn10300/include/asm/thread_info.h

@@ -16,10 +16,6 @@
 
 
 #include <asm/page.h>
 #include <asm/page.h>
 
 
-#ifndef __ASSEMBLY__
-#include <asm/processor.h>
-#endif
-
 #define PREEMPT_ACTIVE		0x10000000
 #define PREEMPT_ACTIVE		0x10000000
 
 
 #ifdef CONFIG_4KSTACKS
 #ifdef CONFIG_4KSTACKS
@@ -38,10 +34,14 @@
  *   must also be changed
  *   must also be changed
  */
  */
 #ifndef __ASSEMBLY__
 #ifndef __ASSEMBLY__
+typedef struct {
+	unsigned long	seg;
+} mm_segment_t;
 
 
 struct thread_info {
 struct thread_info {
 	struct task_struct	*task;		/* main task structure */
 	struct task_struct	*task;		/* main task structure */
 	struct exec_domain	*exec_domain;	/* execution domain */
 	struct exec_domain	*exec_domain;	/* execution domain */
+	struct pt_regs		*frame;		/* current exception frame */
 	unsigned long		flags;		/* low level flags */
 	unsigned long		flags;		/* low level flags */
 	__u32			cpu;		/* current CPU */
 	__u32			cpu;		/* current CPU */
 	__s32			preempt_count;	/* 0 => preemptable, <0 => BUG */
 	__s32			preempt_count;	/* 0 => preemptable, <0 => BUG */
@@ -55,6 +55,10 @@ struct thread_info {
 	__u8			supervisor_stack[0];
 	__u8			supervisor_stack[0];
 };
 };
 
 
+#define thread_info_to_uregs(ti)					\
+	((struct pt_regs *)						\
+	 ((unsigned long)ti + THREAD_SIZE - sizeof(struct pt_regs)))
+
 #else /* !__ASSEMBLY__ */
 #else /* !__ASSEMBLY__ */
 
 
 #ifndef __ASM_OFFSETS_H__
 #ifndef __ASM_OFFSETS_H__
@@ -102,6 +106,12 @@ struct thread_info *current_thread_info(void)
 	return ti;
 	return ti;
 }
 }
 
 
+static inline __attribute__((const))
+struct pt_regs *current_frame(void)
+{
+	return current_thread_info()->frame;
+}
+
 /* how to get the current stack pointer from C */
 /* how to get the current stack pointer from C */
 static inline unsigned long current_stack_pointer(void)
 static inline unsigned long current_stack_pointer(void)
 {
 {

+ 175 - 16
arch/mn10300/include/asm/timer-regs.h

@@ -17,21 +17,27 @@
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 
 
-/* timer prescalar control */
+/*
+ * Timer prescalar control
+ */
 #define	TMPSCNT			__SYSREG(0xd4003071, u8) /* timer prescaler control */
 #define	TMPSCNT			__SYSREG(0xd4003071, u8) /* timer prescaler control */
 #define	TMPSCNT_ENABLE		0x80	/* timer prescaler enable */
 #define	TMPSCNT_ENABLE		0x80	/* timer prescaler enable */
 #define	TMPSCNT_DISABLE		0x00	/* timer prescaler disable */
 #define	TMPSCNT_DISABLE		0x00	/* timer prescaler disable */
 
 
-/* 8 bit timers */
+/*
+ * 8-bit timers
+ */
 #define	TM0MD			__SYSREG(0xd4003000, u8) /* timer 0 mode register */
 #define	TM0MD			__SYSREG(0xd4003000, u8) /* timer 0 mode register */
 #define	TM0MD_SRC		0x07	/* timer source */
 #define	TM0MD_SRC		0x07	/* timer source */
 #define	TM0MD_SRC_IOCLK		0x00	/* - IOCLK */
 #define	TM0MD_SRC_IOCLK		0x00	/* - IOCLK */
 #define	TM0MD_SRC_IOCLK_8	0x01	/* - 1/8 IOCLK */
 #define	TM0MD_SRC_IOCLK_8	0x01	/* - 1/8 IOCLK */
 #define	TM0MD_SRC_IOCLK_32	0x02	/* - 1/32 IOCLK */
 #define	TM0MD_SRC_IOCLK_32	0x02	/* - 1/32 IOCLK */
-#define	TM0MD_SRC_TM2IO		0x03	/* - TM2IO pin input */
 #define	TM0MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
 #define	TM0MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
 #define	TM0MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
 #define	TM0MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
+#if	defined(CONFIG_AM33_2)
+#define	TM0MD_SRC_TM2IO		0x03	/* - TM2IO pin input */
 #define	TM0MD_SRC_TM0IO		0x07	/* - TM0IO pin input */
 #define	TM0MD_SRC_TM0IO		0x07	/* - TM0IO pin input */
+#endif /* CONFIG_AM33_2 */
 #define	TM0MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM0MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM0MD_COUNT_ENABLE	0x80	/* timer count enable */
 #define	TM0MD_COUNT_ENABLE	0x80	/* timer count enable */
 
 
@@ -43,7 +49,9 @@
 #define	TM1MD_SRC_TM0CASCADE	0x03	/* - cascade with timer 0 */
 #define	TM1MD_SRC_TM0CASCADE	0x03	/* - cascade with timer 0 */
 #define	TM1MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM1MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM1MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
 #define	TM1MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define	TM1MD_SRC_TM1IO		0x07	/* - TM1IO pin input */
 #define	TM1MD_SRC_TM1IO		0x07	/* - TM1IO pin input */
+#endif	/* CONFIG_AM33_2 */
 #define	TM1MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM1MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM1MD_COUNT_ENABLE	0x80	/* timer count enable */
 #define	TM1MD_COUNT_ENABLE	0x80	/* timer count enable */
 
 
@@ -55,7 +63,9 @@
 #define	TM2MD_SRC_TM1CASCADE	0x03	/* - cascade with timer 1 */
 #define	TM2MD_SRC_TM1CASCADE	0x03	/* - cascade with timer 1 */
 #define	TM2MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM2MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM2MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
 #define	TM2MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
+#if defined(CONFIG_AM33_2)
 #define	TM2MD_SRC_TM2IO		0x07	/* - TM2IO pin input */
 #define	TM2MD_SRC_TM2IO		0x07	/* - TM2IO pin input */
+#endif	/* CONFIG_AM33_2 */
 #define	TM2MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM2MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM2MD_COUNT_ENABLE	0x80	/* timer count enable */
 #define	TM2MD_COUNT_ENABLE	0x80	/* timer count enable */
 
 
@@ -64,11 +74,13 @@
 #define	TM3MD_SRC_IOCLK		0x00	/* - IOCLK */
 #define	TM3MD_SRC_IOCLK		0x00	/* - IOCLK */
 #define	TM3MD_SRC_IOCLK_8	0x01	/* - 1/8 IOCLK */
 #define	TM3MD_SRC_IOCLK_8	0x01	/* - 1/8 IOCLK */
 #define	TM3MD_SRC_IOCLK_32	0x02	/* - 1/32 IOCLK */
 #define	TM3MD_SRC_IOCLK_32	0x02	/* - 1/32 IOCLK */
-#define	TM3MD_SRC_TM1CASCADE	0x03	/* - cascade with timer 2 */
+#define	TM3MD_SRC_TM2CASCADE	0x03	/* - cascade with timer 2 */
 #define	TM3MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM3MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM3MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
 #define	TM3MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
 #define	TM3MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
 #define	TM3MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define	TM3MD_SRC_TM3IO		0x07	/* - TM3IO pin input */
 #define	TM3MD_SRC_TM3IO		0x07	/* - TM3IO pin input */
+#endif	/* CONFIG_AM33_2 */
 #define	TM3MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM3MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM3MD_COUNT_ENABLE	0x80	/* timer count enable */
 #define	TM3MD_COUNT_ENABLE	0x80	/* timer count enable */
 
 
@@ -96,7 +108,9 @@
 #define	TM2ICR			GxICR(TM2IRQ)	/* timer 2 uflow intr ctrl reg */
 #define	TM2ICR			GxICR(TM2IRQ)	/* timer 2 uflow intr ctrl reg */
 #define	TM3ICR			GxICR(TM3IRQ)	/* timer 3 uflow intr ctrl reg */
 #define	TM3ICR			GxICR(TM3IRQ)	/* timer 3 uflow intr ctrl reg */
 
 
-/* 16-bit timers 4,5 & 7-11 */
+/*
+ * 16-bit timers 4,5 & 7-15
+ */
 #define	TM4MD			__SYSREG(0xd4003080, u8)   /* timer 4 mode register */
 #define	TM4MD			__SYSREG(0xd4003080, u8)   /* timer 4 mode register */
 #define	TM4MD_SRC		0x07	/* timer source */
 #define	TM4MD_SRC		0x07	/* timer source */
 #define	TM4MD_SRC_IOCLK		0x00	/* - IOCLK */
 #define	TM4MD_SRC_IOCLK		0x00	/* - IOCLK */
@@ -105,7 +119,9 @@
 #define	TM4MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM4MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM4MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
 #define	TM4MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
 #define	TM4MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
 #define	TM4MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define	TM4MD_SRC_TM4IO		0x07	/* - TM4IO pin input */
 #define	TM4MD_SRC_TM4IO		0x07	/* - TM4IO pin input */
+#endif	/* CONFIG_AM33_2 */
 #define	TM4MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM4MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM4MD_COUNT_ENABLE	0x80	/* timer count enable */
 #define	TM4MD_COUNT_ENABLE	0x80	/* timer count enable */
 
 
@@ -118,7 +134,11 @@
 #define	TM5MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM5MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM5MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
 #define	TM5MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
 #define	TM5MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
 #define	TM5MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define	TM5MD_SRC_TM5IO		0x07	/* - TM5IO pin input */
 #define	TM5MD_SRC_TM5IO		0x07	/* - TM5IO pin input */
+#else	/* !CONFIG_AM33_2 */
+#define	TM5MD_SRC_TM7UFLOW	0x07	/* - timer 7 underflow */
+#endif	/* CONFIG_AM33_2 */
 #define	TM5MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM5MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM5MD_COUNT_ENABLE	0x80	/* timer count enable */
 #define	TM5MD_COUNT_ENABLE	0x80	/* timer count enable */
 
 
@@ -130,7 +150,9 @@
 #define	TM7MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM7MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM7MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
 #define	TM7MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
 #define	TM7MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
 #define	TM7MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define	TM7MD_SRC_TM7IO		0x07	/* - TM7IO pin input */
 #define	TM7MD_SRC_TM7IO		0x07	/* - TM7IO pin input */
+#endif	/* CONFIG_AM33_2 */
 #define	TM7MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM7MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM7MD_COUNT_ENABLE	0x80	/* timer count enable */
 #define	TM7MD_COUNT_ENABLE	0x80	/* timer count enable */
 
 
@@ -143,7 +165,11 @@
 #define	TM8MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM8MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM8MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
 #define	TM8MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
 #define	TM8MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
 #define	TM8MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define	TM8MD_SRC_TM8IO		0x07	/* - TM8IO pin input */
 #define	TM8MD_SRC_TM8IO		0x07	/* - TM8IO pin input */
+#else	/* !CONFIG_AM33_2 */
+#define	TM8MD_SRC_TM7UFLOW	0x07	/* - timer 7 underflow */
+#endif	/* CONFIG_AM33_2 */
 #define	TM8MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM8MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM8MD_COUNT_ENABLE	0x80	/* timer count enable */
 #define	TM8MD_COUNT_ENABLE	0x80	/* timer count enable */
 
 
@@ -156,7 +182,11 @@
 #define	TM9MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM9MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM9MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
 #define	TM9MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
 #define	TM9MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
 #define	TM9MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define	TM9MD_SRC_TM9IO		0x07	/* - TM9IO pin input */
 #define	TM9MD_SRC_TM9IO		0x07	/* - TM9IO pin input */
+#else	/* !CONFIG_AM33_2 */
+#define	TM9MD_SRC_TM7UFLOW	0x07	/* - timer 7 underflow */
+#endif	/* CONFIG_AM33_2 */
 #define	TM9MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM9MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM9MD_COUNT_ENABLE	0x80	/* timer count enable */
 #define	TM9MD_COUNT_ENABLE	0x80	/* timer count enable */
 
 
@@ -169,7 +199,11 @@
 #define	TM10MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM10MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM10MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
 #define	TM10MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
 #define	TM10MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
 #define	TM10MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define	TM10MD_SRC_TM10IO	0x07	/* - TM10IO pin input */
 #define	TM10MD_SRC_TM10IO	0x07	/* - TM10IO pin input */
+#else	/* !CONFIG_AM33_2 */
+#define	TM10MD_SRC_TM7UFLOW	0x07	/* - timer 7 underflow */
+#endif	/* CONFIG_AM33_2 */
 #define	TM10MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM10MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM10MD_COUNT_ENABLE	0x80	/* timer count enable */
 #define	TM10MD_COUNT_ENABLE	0x80	/* timer count enable */
 
 
@@ -178,32 +212,101 @@
 #define	TM11MD_SRC_IOCLK	0x00	/* - IOCLK */
 #define	TM11MD_SRC_IOCLK	0x00	/* - IOCLK */
 #define	TM11MD_SRC_IOCLK_8	0x01	/* - 1/8 IOCLK */
 #define	TM11MD_SRC_IOCLK_8	0x01	/* - 1/8 IOCLK */
 #define	TM11MD_SRC_IOCLK_32	0x02	/* - 1/32 IOCLK */
 #define	TM11MD_SRC_IOCLK_32	0x02	/* - 1/32 IOCLK */
-#define	TM11MD_SRC_TM7CASCADE	0x03	/* - cascade with timer 7 */
 #define	TM11MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM11MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
 #define	TM11MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
 #define	TM11MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
 #define	TM11MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
 #define	TM11MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define	TM11MD_SRC_TM11IO	0x07	/* - TM11IO pin input */
 #define	TM11MD_SRC_TM11IO	0x07	/* - TM11IO pin input */
+#else	/* !CONFIG_AM33_2 */
+#define	TM11MD_SRC_TM7UFLOW	0x07	/* - timer 7 underflow */
+#endif	/* CONFIG_AM33_2 */
 #define	TM11MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM11MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
 #define	TM11MD_COUNT_ENABLE	0x80	/* timer count enable */
 #define	TM11MD_COUNT_ENABLE	0x80	/* timer count enable */
 
 
+#if defined(CONFIG_AM34_2)
+#define	TM12MD			__SYSREG(0xd4003180, u8)   /* timer 11 mode register */
+#define	TM12MD_SRC		0x07	/* timer source */
+#define	TM12MD_SRC_IOCLK	0x00	/* - IOCLK */
+#define	TM12MD_SRC_IOCLK_8	0x01	/* - 1/8 IOCLK */
+#define	TM12MD_SRC_IOCLK_32	0x02	/* - 1/32 IOCLK */
+#define	TM12MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
+#define	TM12MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
+#define	TM12MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
+#define	TM12MD_SRC_TM7UFLOW	0x07	/* - timer 7 underflow */
+#define	TM12MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
+#define	TM12MD_COUNT_ENABLE	0x80	/* timer count enable */
+
+#define	TM13MD			__SYSREG(0xd4003182, u8)   /* timer 11 mode register */
+#define	TM13MD_SRC		0x07	/* timer source */
+#define	TM13MD_SRC_IOCLK	0x00	/* - IOCLK */
+#define	TM13MD_SRC_IOCLK_8	0x01	/* - 1/8 IOCLK */
+#define	TM13MD_SRC_IOCLK_32	0x02	/* - 1/32 IOCLK */
+#define	TM13MD_SRC_TM12CASCADE	0x03	/* - cascade with timer 12 */
+#define	TM13MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
+#define	TM13MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
+#define	TM13MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
+#define	TM13MD_SRC_TM7UFLOW	0x07	/* - timer 7 underflow */
+#define	TM13MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
+#define	TM13MD_COUNT_ENABLE	0x80	/* timer count enable */
+
+#define	TM14MD			__SYSREG(0xd4003184, u8)   /* timer 11 mode register */
+#define	TM14MD_SRC		0x07	/* timer source */
+#define	TM14MD_SRC_IOCLK	0x00	/* - IOCLK */
+#define	TM14MD_SRC_IOCLK_8	0x01	/* - 1/8 IOCLK */
+#define	TM14MD_SRC_IOCLK_32	0x02	/* - 1/32 IOCLK */
+#define	TM14MD_SRC_TM13CASCADE	0x03	/* - cascade with timer 13 */
+#define	TM14MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
+#define	TM14MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
+#define	TM14MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
+#define	TM14MD_SRC_TM7UFLOW	0x07	/* - timer 7 underflow */
+#define	TM14MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
+#define	TM14MD_COUNT_ENABLE	0x80	/* timer count enable */
+
+#define	TM15MD			__SYSREG(0xd4003186, u8)   /* timer 11 mode register */
+#define	TM15MD_SRC		0x07	/* timer source */
+#define	TM15MD_SRC_IOCLK	0x00	/* - IOCLK */
+#define	TM15MD_SRC_IOCLK_8	0x01	/* - 1/8 IOCLK */
+#define	TM15MD_SRC_IOCLK_32	0x02	/* - 1/32 IOCLK */
+#define	TM15MD_SRC_TM0UFLOW	0x04	/* - timer 0 underflow */
+#define	TM15MD_SRC_TM1UFLOW	0x05	/* - timer 1 underflow */
+#define	TM15MD_SRC_TM2UFLOW	0x06	/* - timer 2 underflow */
+#define	TM15MD_SRC_TM7UFLOW	0x07	/* - timer 7 underflow */
+#define	TM15MD_INIT_COUNTER	0x40	/* initialize TMnBC = TMnBR */
+#define	TM15MD_COUNT_ENABLE	0x80	/* timer count enable */
+#endif	/* CONFIG_AM34_2 */
+
+
 #define	TM4BR			__SYSREG(0xd4003090, u16)  /* timer 4 base register */
 #define	TM4BR			__SYSREG(0xd4003090, u16)  /* timer 4 base register */
 #define	TM5BR			__SYSREG(0xd4003092, u16)  /* timer 5 base register */
 #define	TM5BR			__SYSREG(0xd4003092, u16)  /* timer 5 base register */
+#define	TM45BR			__SYSREG(0xd4003090, u32)  /* timer 4:5 base register */
 #define	TM7BR			__SYSREG(0xd4003096, u16)  /* timer 7 base register */
 #define	TM7BR			__SYSREG(0xd4003096, u16)  /* timer 7 base register */
 #define	TM8BR			__SYSREG(0xd4003098, u16)  /* timer 8 base register */
 #define	TM8BR			__SYSREG(0xd4003098, u16)  /* timer 8 base register */
 #define	TM9BR			__SYSREG(0xd400309a, u16)  /* timer 9 base register */
 #define	TM9BR			__SYSREG(0xd400309a, u16)  /* timer 9 base register */
+#define	TM89BR			__SYSREG(0xd4003098, u32)  /* timer 8:9 base register */
 #define	TM10BR			__SYSREG(0xd400309c, u16)  /* timer 10 base register */
 #define	TM10BR			__SYSREG(0xd400309c, u16)  /* timer 10 base register */
 #define	TM11BR			__SYSREG(0xd400309e, u16)  /* timer 11 base register */
 #define	TM11BR			__SYSREG(0xd400309e, u16)  /* timer 11 base register */
-#define	TM45BR			__SYSREG(0xd4003090, u32)  /* timer 4:5 base register */
+#if defined(CONFIG_AM34_2)
+#define	TM12BR			__SYSREG(0xd4003190, u16)  /* timer 12 base register */
+#define	TM13BR			__SYSREG(0xd4003192, u16)  /* timer 13 base register */
+#define	TM14BR			__SYSREG(0xd4003194, u16)  /* timer 14 base register */
+#define	TM15BR			__SYSREG(0xd4003196, u16)  /* timer 15 base register */
+#endif	/* CONFIG_AM34_2 */
 
 
 #define	TM4BC			__SYSREG(0xd40030a0, u16)  /* timer 4 binary counter */
 #define	TM4BC			__SYSREG(0xd40030a0, u16)  /* timer 4 binary counter */
 #define	TM5BC			__SYSREG(0xd40030a2, u16)  /* timer 5 binary counter */
 #define	TM5BC			__SYSREG(0xd40030a2, u16)  /* timer 5 binary counter */
 #define	TM45BC			__SYSREG(0xd40030a0, u32)  /* timer 4:5 binary counter */
 #define	TM45BC			__SYSREG(0xd40030a0, u32)  /* timer 4:5 binary counter */
-
 #define	TM7BC			__SYSREG(0xd40030a6, u16)  /* timer 7 binary counter */
 #define	TM7BC			__SYSREG(0xd40030a6, u16)  /* timer 7 binary counter */
 #define	TM8BC			__SYSREG(0xd40030a8, u16)  /* timer 8 binary counter */
 #define	TM8BC			__SYSREG(0xd40030a8, u16)  /* timer 8 binary counter */
 #define	TM9BC			__SYSREG(0xd40030aa, u16)  /* timer 9 binary counter */
 #define	TM9BC			__SYSREG(0xd40030aa, u16)  /* timer 9 binary counter */
+#define	TM89BC			__SYSREG(0xd40030a8, u32)  /* timer 8:9 binary counter */
 #define	TM10BC			__SYSREG(0xd40030ac, u16)  /* timer 10 binary counter */
 #define	TM10BC			__SYSREG(0xd40030ac, u16)  /* timer 10 binary counter */
 #define	TM11BC			__SYSREG(0xd40030ae, u16)  /* timer 11 binary counter */
 #define	TM11BC			__SYSREG(0xd40030ae, u16)  /* timer 11 binary counter */
+#if defined(CONFIG_AM34_2)
+#define	TM12BC			__SYSREG(0xd40031a0, u16)  /* timer 12 binary counter */
+#define	TM13BC			__SYSREG(0xd40031a2, u16)  /* timer 13 binary counter */
+#define	TM14BC			__SYSREG(0xd40031a4, u16)  /* timer 14 binary counter */
+#define	TM15BC			__SYSREG(0xd40031a6, u16)  /* timer 15 binary counter */
+#endif	/* CONFIG_AM34_2 */
 
 
 #define TM4IRQ			6	/* timer 4 IRQ */
 #define TM4IRQ			6	/* timer 4 IRQ */
 #define TM5IRQ			7	/* timer 5 IRQ */
 #define TM5IRQ			7	/* timer 5 IRQ */
@@ -212,6 +315,12 @@
 #define TM9IRQ			13	/* timer 9 IRQ */
 #define TM9IRQ			13	/* timer 9 IRQ */
 #define TM10IRQ			14	/* timer 10 IRQ */
 #define TM10IRQ			14	/* timer 10 IRQ */
 #define TM11IRQ			15	/* timer 11 IRQ */
 #define TM11IRQ			15	/* timer 11 IRQ */
+#if defined(CONFIG_AM34_2)
+#define TM12IRQ			64	/* timer 12 IRQ */
+#define TM13IRQ			65	/* timer 13 IRQ */
+#define TM14IRQ			66	/* timer 14 IRQ */
+#define TM15IRQ			67	/* timer 15 IRQ */
+#endif	/* CONFIG_AM34_2 */
 
 
 #define	TM4ICR			GxICR(TM4IRQ)	/* timer 4 uflow intr ctrl reg */
 #define	TM4ICR			GxICR(TM4IRQ)	/* timer 4 uflow intr ctrl reg */
 #define	TM5ICR			GxICR(TM5IRQ)	/* timer 5 uflow intr ctrl reg */
 #define	TM5ICR			GxICR(TM5IRQ)	/* timer 5 uflow intr ctrl reg */
@@ -220,8 +329,16 @@
 #define	TM9ICR			GxICR(TM9IRQ)	/* timer 9 uflow intr ctrl reg */
 #define	TM9ICR			GxICR(TM9IRQ)	/* timer 9 uflow intr ctrl reg */
 #define	TM10ICR			GxICR(TM10IRQ)	/* timer 10 uflow intr ctrl reg */
 #define	TM10ICR			GxICR(TM10IRQ)	/* timer 10 uflow intr ctrl reg */
 #define	TM11ICR			GxICR(TM11IRQ)	/* timer 11 uflow intr ctrl reg */
 #define	TM11ICR			GxICR(TM11IRQ)	/* timer 11 uflow intr ctrl reg */
-
-/* 16-bit timer 6 */
+#if defined(CONFIG_AM34_2)
+#define	TM12ICR			GxICR(TM12IRQ)	/* timer 12 uflow intr ctrl reg */
+#define	TM13ICR			GxICR(TM13IRQ)	/* timer 13 uflow intr ctrl reg */
+#define	TM14ICR			GxICR(TM14IRQ)	/* timer 14 uflow intr ctrl reg */
+#define	TM15ICR			GxICR(TM15IRQ)	/* timer 15 uflow intr ctrl reg */
+#endif	/* CONFIG_AM34_2 */
+
+/*
+ * 16-bit timer 6
+ */
 #define	TM6MD			__SYSREG(0xd4003084, u16)  /* timer6 mode register */
 #define	TM6MD			__SYSREG(0xd4003084, u16)  /* timer6 mode register */
 #define	TM6MD_SRC		0x0007	/* timer source */
 #define	TM6MD_SRC		0x0007	/* timer source */
 #define	TM6MD_SRC_IOCLK		0x0000	/* - IOCLK */
 #define	TM6MD_SRC_IOCLK		0x0000	/* - IOCLK */
@@ -229,10 +346,14 @@
 #define	TM6MD_SRC_IOCLK_32	0x0002	/* - 1/32 IOCLK */
 #define	TM6MD_SRC_IOCLK_32	0x0002	/* - 1/32 IOCLK */
 #define	TM6MD_SRC_TM0UFLOW	0x0004	/* - timer 0 underflow */
 #define	TM6MD_SRC_TM0UFLOW	0x0004	/* - timer 0 underflow */
 #define	TM6MD_SRC_TM1UFLOW	0x0005	/* - timer 1 underflow */
 #define	TM6MD_SRC_TM1UFLOW	0x0005	/* - timer 1 underflow */
-#define	TM6MD_SRC_TM6IOB_BOTH	0x0006	/* - TM6IOB pin input (both edges) */
+#define	TM6MD_SRC_TM2UFLOW	0x0006	/* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
+/* #define	TM6MD_SRC_TM6IOB_BOTH	0x0006 */	/* - TM6IOB pin input (both edges) */
 #define	TM6MD_SRC_TM6IOB_SINGLE	0x0007	/* - TM6IOB pin input (single edge) */
 #define	TM6MD_SRC_TM6IOB_SINGLE	0x0007	/* - TM6IOB pin input (single edge) */
-#define	TM6MD_CLR_ENABLE	0x0010	/* clear count enable */
+#endif	/* CONFIG_AM33_2 */
 #define	TM6MD_ONESHOT_ENABLE	0x0040	/* oneshot count */
 #define	TM6MD_ONESHOT_ENABLE	0x0040	/* oneshot count */
+#define	TM6MD_CLR_ENABLE	0x0010	/* clear count enable */
+#if	defined(CONFIG_AM33_2)
 #define	TM6MD_TRIG_ENABLE	0x0080	/* TM6IOB pin trigger enable */
 #define	TM6MD_TRIG_ENABLE	0x0080	/* TM6IOB pin trigger enable */
 #define TM6MD_PWM		0x3800	/* PWM output mode */
 #define TM6MD_PWM		0x3800	/* PWM output mode */
 #define TM6MD_PWM_DIS		0x0000	/* - disabled */
 #define TM6MD_PWM_DIS		0x0000	/* - disabled */
@@ -240,10 +361,15 @@
 #define	TM6MD_PWM_11BIT		0x1800	/* - 11 bits mode */
 #define	TM6MD_PWM_11BIT		0x1800	/* - 11 bits mode */
 #define	TM6MD_PWM_12BIT		0x3000	/* - 12 bits mode */
 #define	TM6MD_PWM_12BIT		0x3000	/* - 12 bits mode */
 #define	TM6MD_PWM_14BIT		0x3800	/* - 14 bits mode */
 #define	TM6MD_PWM_14BIT		0x3800	/* - 14 bits mode */
+#endif	/* CONFIG_AM33_2 */
+
 #define	TM6MD_INIT_COUNTER	0x4000	/* initialize TMnBC to zero */
 #define	TM6MD_INIT_COUNTER	0x4000	/* initialize TMnBC to zero */
 #define	TM6MD_COUNT_ENABLE	0x8000	/* timer count enable */
 #define	TM6MD_COUNT_ENABLE	0x8000	/* timer count enable */
 
 
 #define	TM6MDA			__SYSREG(0xd40030b4, u8)   /* timer6 cmp/cap A mode reg */
 #define	TM6MDA			__SYSREG(0xd40030b4, u8)   /* timer6 cmp/cap A mode reg */
+#define	TM6MDA_MODE_CMP_SINGLE	0x00	/* - compare, single buffer mode */
+#define	TM6MDA_MODE_CMP_DOUBLE	0x40	/* - compare, double buffer mode */
+#if	defined(CONFIG_AM33_2)
 #define TM6MDA_OUT		0x07	/* output select */
 #define TM6MDA_OUT		0x07	/* output select */
 #define	TM6MDA_OUT_SETA_RESETB	0x00	/* - set at match A, reset at match B */
 #define	TM6MDA_OUT_SETA_RESETB	0x00	/* - set at match A, reset at match B */
 #define	TM6MDA_OUT_SETA_RESETOV	0x01	/* - set at match A, reset at overflow */
 #define	TM6MDA_OUT_SETA_RESETOV	0x01	/* - set at match A, reset at overflow */
@@ -251,30 +377,35 @@
 #define	TM6MDA_OUT_RESETA	0x03	/* - reset at match A */
 #define	TM6MDA_OUT_RESETA	0x03	/* - reset at match A */
 #define	TM6MDA_OUT_TOGGLE	0x04	/* - toggle on match A */
 #define	TM6MDA_OUT_TOGGLE	0x04	/* - toggle on match A */
 #define TM6MDA_MODE		0xc0	/* compare A register mode */
 #define TM6MDA_MODE		0xc0	/* compare A register mode */
-#define	TM6MDA_MODE_CMP_SINGLE	0x00	/* - compare, single buffer mode */
-#define	TM6MDA_MODE_CMP_DOUBLE	0x40	/* - compare, double buffer mode */
 #define	TM6MDA_MODE_CAP_S_EDGE	0x80	/* - capture, single edge mode */
 #define	TM6MDA_MODE_CAP_S_EDGE	0x80	/* - capture, single edge mode */
 #define	TM6MDA_MODE_CAP_D_EDGE	0xc0	/* - capture, double edge mode */
 #define	TM6MDA_MODE_CAP_D_EDGE	0xc0	/* - capture, double edge mode */
 #define TM6MDA_EDGE		0x20	/* compare A edge select */
 #define TM6MDA_EDGE		0x20	/* compare A edge select */
 #define	TM6MDA_EDGE_FALLING	0x00	/* capture on falling edge */
 #define	TM6MDA_EDGE_FALLING	0x00	/* capture on falling edge */
 #define	TM6MDA_EDGE_RISING	0x20	/* capture on rising edge */
 #define	TM6MDA_EDGE_RISING	0x20	/* capture on rising edge */
 #define	TM6MDA_CAPTURE_ENABLE	0x10	/* capture enable */
 #define	TM6MDA_CAPTURE_ENABLE	0x10	/* capture enable */
+#else	/* !CONFIG_AM33_2 */
+#define	TM6MDA_MODE		0x40	/* compare A register mode */
+#endif	/* CONFIG_AM33_2 */
 
 
 #define	TM6MDB			__SYSREG(0xd40030b5, u8)   /* timer6 cmp/cap B mode reg */
 #define	TM6MDB			__SYSREG(0xd40030b5, u8)   /* timer6 cmp/cap B mode reg */
+#define	TM6MDB_MODE_CMP_SINGLE	0x00	/* - compare, single buffer mode */
+#define	TM6MDB_MODE_CMP_DOUBLE	0x40	/* - compare, double buffer mode */
+#if defined(CONFIG_AM33_2)
 #define TM6MDB_OUT		0x07	/* output select */
 #define TM6MDB_OUT		0x07	/* output select */
 #define	TM6MDB_OUT_SETB_RESETA	0x00	/* - set at match B, reset at match A */
 #define	TM6MDB_OUT_SETB_RESETA	0x00	/* - set at match B, reset at match A */
 #define	TM6MDB_OUT_SETB_RESETOV	0x01	/* - set at match B */
 #define	TM6MDB_OUT_SETB_RESETOV	0x01	/* - set at match B */
 #define	TM6MDB_OUT_RESETB	0x03	/* - reset at match B */
 #define	TM6MDB_OUT_RESETB	0x03	/* - reset at match B */
 #define	TM6MDB_OUT_TOGGLE	0x04	/* - toggle on match B */
 #define	TM6MDB_OUT_TOGGLE	0x04	/* - toggle on match B */
 #define TM6MDB_MODE		0xc0	/* compare B register mode */
 #define TM6MDB_MODE		0xc0	/* compare B register mode */
-#define	TM6MDB_MODE_CMP_SINGLE	0x00	/* - compare, single buffer mode */
-#define	TM6MDB_MODE_CMP_DOUBLE	0x40	/* - compare, double buffer mode */
 #define	TM6MDB_MODE_CAP_S_EDGE	0x80	/* - capture, single edge mode */
 #define	TM6MDB_MODE_CAP_S_EDGE	0x80	/* - capture, single edge mode */
 #define	TM6MDB_MODE_CAP_D_EDGE	0xc0	/* - capture, double edge mode */
 #define	TM6MDB_MODE_CAP_D_EDGE	0xc0	/* - capture, double edge mode */
 #define TM6MDB_EDGE		0x20	/* compare B edge select */
 #define TM6MDB_EDGE		0x20	/* compare B edge select */
 #define	TM6MDB_EDGE_FALLING	0x00	/* capture on falling edge */
 #define	TM6MDB_EDGE_FALLING	0x00	/* capture on falling edge */
 #define	TM6MDB_EDGE_RISING	0x20	/* capture on rising edge */
 #define	TM6MDB_EDGE_RISING	0x20	/* capture on rising edge */
 #define	TM6MDB_CAPTURE_ENABLE	0x10	/* capture enable */
 #define	TM6MDB_CAPTURE_ENABLE	0x10	/* capture enable */
+#else	/* !CONFIG_AM33_2 */
+#define	TM6MDB_MODE		0x40	/* compare B register mode */
+#endif	/* CONFIG_AM33_2 */
 
 
 #define	TM6CA			__SYSREG(0xd40030c4, u16)   /* timer6 cmp/capture reg A */
 #define	TM6CA			__SYSREG(0xd40030c4, u16)   /* timer6 cmp/capture reg A */
 #define	TM6CB			__SYSREG(0xd40030d4, u16)   /* timer6 cmp/capture reg B */
 #define	TM6CB			__SYSREG(0xd40030d4, u16)   /* timer6 cmp/capture reg B */
@@ -288,6 +419,34 @@
 #define	TM6AICR			GxICR(TM6AIRQ)	/* timer 6A intr control reg */
 #define	TM6AICR			GxICR(TM6AIRQ)	/* timer 6A intr control reg */
 #define	TM6BICR			GxICR(TM6BIRQ)	/* timer 6B intr control reg */
 #define	TM6BICR			GxICR(TM6BIRQ)	/* timer 6B intr control reg */
 
 
+#if defined(CONFIG_AM34_2)
+/*
+ * MTM: OS Tick-Timer
+ */
+#define	TMTMD			__SYSREG(0xd4004100, u8)	/* Tick Timer mode register */
+#define	TMTMD_TMTLDE		0x40	/* initialize TMTBC = TMTBR */
+#define	TMTMD_TMTCNE		0x80	/* timer count enable       */
+
+#define	TMTBR			__SYSREG(0xd4004110, u32)	/* Tick Timer mode reg */
+#define	TMTBC			__SYSREG(0xd4004120, u32)	/* Tick Timer mode reg */
+
+/*
+ * MTM: OS Timestamp-Timer
+ */
+#define	TMSMD			__SYSREG(0xd4004140, u8)	/* Tick Timer mode register */
+#define	TMSMD_TMSLDE		0x40		/* initialize TMSBC = TMSBR */
+#define	TMSMD_TMSCNE		0x80		/* timer count enable       */
+
+#define	TMSBR			__SYSREG(0xd4004150, u32)	/* Tick Timer mode register */
+#define	TMSBC			__SYSREG(0xd4004160, u32)	/* Tick Timer mode register */
+
+#define TMTIRQ			119		/* OS Tick timer   IRQ */
+#define TMSIRQ			120		/* Timestamp timer IRQ */
+
+#define	TMTICR			GxICR(TMTIRQ)	/* OS Tick timer   uflow intr ctrl reg */
+#define	TMSICR			GxICR(TMSIRQ)	/* Timestamp timer uflow intr ctrl reg */
+#endif	/* CONFIG_AM34_2 */
+
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 
 
 #endif /* _ASM_TIMER_REGS_H */
 #endif /* _ASM_TIMER_REGS_H */

+ 16 - 4
arch/mn10300/include/asm/timex.h

@@ -16,18 +16,30 @@
 
 
 #define TICK_SIZE (tick_nsec / 1000)
 #define TICK_SIZE (tick_nsec / 1000)
 
 
-#define CLOCK_TICK_RATE 1193180 /* Underlying HZ - this should probably be set
-				 * to something appropriate, but what? */
-
-extern cycles_t cacheflush_time;
+#define CLOCK_TICK_RATE MN10300_JCCLK /* Underlying HZ */
 
 
 #ifdef __KERNEL__
 #ifdef __KERNEL__
 
 
+extern cycles_t cacheflush_time;
+
 static inline cycles_t get_cycles(void)
 static inline cycles_t get_cycles(void)
 {
 {
 	return read_timestamp_counter();
 	return read_timestamp_counter();
 }
 }
 
 
+extern int init_clockevents(void);
+extern int init_clocksource(void);
+
+static inline void setup_jiffies_interrupt(int irq,
+					   struct irqaction *action)
+{
+	u16 tmp;
+	setup_irq(irq, action);
+	set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
+	GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
+	tmp = GxICR(irq);
+}
+
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 
 
 #endif /* _ASM_TIMEX_H */
 #endif /* _ASM_TIMEX_H */

+ 124 - 50
arch/mn10300/include/asm/tlbflush.h

@@ -11,24 +11,78 @@
 #ifndef _ASM_TLBFLUSH_H
 #ifndef _ASM_TLBFLUSH_H
 #define _ASM_TLBFLUSH_H
 #define _ASM_TLBFLUSH_H
 
 
+#include <linux/mm.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
 
 
-#define __flush_tlb()						\
-do {								\
-	int w;							\
-	__asm__ __volatile__					\
-		("	mov %1,%0		\n"		\
-		 "	or %2,%0		\n"		\
-		 "	mov %0,%1		\n"		\
-		 : "=d"(w)					\
-		 : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)	\
-		 : "cc", "memory"				\
-		 );						\
-} while (0)
+struct tlb_state {
+	struct mm_struct	*active_mm;
+	int			state;
+};
+DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
 
 
-#define __flush_tlb_all() __flush_tlb()
-#define __flush_tlb_one(addr) __flush_tlb()
+/**
+ * local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs
+ */
+static inline void local_flush_tlb(void)
+{
+	int w;
+	asm volatile(
+		"	mov	%1,%0		\n"
+		"	or	%2,%0		\n"
+		"	mov	%0,%1		\n"
+		: "=d"(w)
+		: "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)
+		: "cc", "memory");
+}
+
+/**
+ * local_flush_tlb_all - Flush all entries from the local CPU's TLBs
+ */
+static inline void local_flush_tlb_all(void)
+{
+	local_flush_tlb();
+}
 
 
+/**
+ * local_flush_tlb_one - Flush one entry from the local CPU's TLBs
+ */
+static inline void local_flush_tlb_one(unsigned long addr)
+{
+	local_flush_tlb();
+}
+
+/**
+ * local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs
+ * @mm: The MM to flush for
+ * @addr: The address of the target page in RAM (not its page struct)
+ */
+static inline
+void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
+{
+	unsigned long pteu, flags, cnx;
+
+	addr &= PAGE_MASK;
+
+	local_irq_save(flags);
+
+	cnx = 1;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+	cnx = mm->context.tlbpid[smp_processor_id()];
+#endif
+	if (cnx) {
+		pteu = addr;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+		pteu |= cnx & xPTEU_PID;
+#endif
+		IPTEU = pteu;
+		DPTEU = pteu;
+		if (IPTEL & xPTEL_V)
+			IPTEL = 0;
+		if (DPTEL & xPTEL_V)
+			DPTEL = 0;
+	}
+	local_irq_restore(flags);
+}
 
 
 /*
 /*
  * TLB flushing:
  * TLB flushing:
@@ -40,41 +94,61 @@ do {								\
  *  - flush_tlb_range(mm, start, end) flushes a range of pages
  *  - flush_tlb_range(mm, start, end) flushes a range of pages
  *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
  *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
  */
  */
-#define flush_tlb_all()				\
-do {						\
-	preempt_disable();			\
-	__flush_tlb_all();			\
-	preempt_enable();			\
-} while (0)
-
-#define flush_tlb_mm(mm)			\
-do {						\
-	preempt_disable();			\
-	__flush_tlb_all();			\
-	preempt_enable();			\
-} while (0)
-
-#define flush_tlb_range(vma, start, end)			\
-do {								\
-	unsigned long __s __attribute__((unused)) = (start);	\
-	unsigned long __e __attribute__((unused)) = (end);	\
-	preempt_disable();					\
-	__flush_tlb_all();					\
-	preempt_enable();					\
-} while (0)
-
-
-#define __flush_tlb_global()			flush_tlb_all()
-#define flush_tlb()				flush_tlb_all()
-#define flush_tlb_kernel_range(start, end)			\
-do {								\
-	unsigned long __s __attribute__((unused)) = (start);	\
-	unsigned long __e __attribute__((unused)) = (end);	\
-	flush_tlb_all();					\
-} while (0)
-
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
-
-#define flush_tlb_pgtables(mm, start, end)	do {} while (0)
+#ifdef CONFIG_SMP
+
+#include <asm/smp.h>
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+
+#define flush_tlb()		flush_tlb_current_task()
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+				   unsigned long start, unsigned long end)
+{
+	flush_tlb_mm(vma->vm_mm);
+}
+
+#else   /* CONFIG_SMP */
+
+static inline void flush_tlb_all(void)
+{
+	preempt_disable();
+	local_flush_tlb_all();
+	preempt_enable();
+}
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+	preempt_disable();
+	local_flush_tlb_all();
+	preempt_enable();
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+				   unsigned long start, unsigned long end)
+{
+	preempt_disable();
+	local_flush_tlb_all();
+	preempt_enable();
+}
+
+#define flush_tlb_page(vma, addr)	local_flush_tlb_page((vma)->vm_mm, addr)
+#define flush_tlb()			flush_tlb_all()
+
+#endif /* CONFIG_SMP */
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+					  unsigned long end)
+{
+	flush_tlb_all();
+}
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+				      unsigned long start, unsigned long end)
+{
+}
 
 
 #endif /* _ASM_TLBFLUSH_H */
 #endif /* _ASM_TLBFLUSH_H */

+ 2 - 4
arch/mn10300/include/asm/uaccess.h

@@ -14,9 +14,8 @@
 /*
 /*
  * User space memory access functions
  * User space memory access functions
  */
  */
-#include <linux/sched.h>
+#include <linux/thread_info.h>
 #include <asm/page.h>
 #include <asm/page.h>
-#include <asm/pgtable.h>
 #include <asm/errno.h>
 #include <asm/errno.h>
 
 
 #define VERIFY_READ 0
 #define VERIFY_READ 0
@@ -29,7 +28,6 @@
  *
  *
  * For historical reasons, these macros are grossly misnamed.
  * For historical reasons, these macros are grossly misnamed.
  */
  */
-
 #define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
 #define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
 
 
 #define KERNEL_XDS	MAKE_MM_SEG(0xBFFFFFFF)
 #define KERNEL_XDS	MAKE_MM_SEG(0xBFFFFFFF)
@@ -377,7 +375,7 @@ unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
 
 
 
 
 #if 0
 #if 0
-#error don't use - these macros don't increment to & from pointers
+#error "don't use - these macros don't increment to & from pointers"
 /* Optimize just a little bit when we know the size of the move. */
 /* Optimize just a little bit when we know the size of the move. */
 #define __constant_copy_user(to, from, size)	\
 #define __constant_copy_user(to, from, size)	\
 do {						\
 do {						\

+ 10 - 5
arch/mn10300/kernel/Makefile

@@ -3,13 +3,16 @@
 #
 #
 extra-y := head.o init_task.o vmlinux.lds
 extra-y := head.o init_task.o vmlinux.lds
 
 
-obj-y   := process.o signal.o entry.o fpu.o traps.o irq.o \
+fpu-obj-y := fpu-nofpu.o fpu-nofpu-low.o
+fpu-obj-$(CONFIG_FPU) := fpu.o fpu-low.o
+
+obj-y   := process.o signal.o entry.o traps.o irq.o \
 	   ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \
 	   ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \
-	   switch_to.o mn10300_ksyms.o kernel_execve.o
+	   switch_to.o mn10300_ksyms.o kernel_execve.o $(fpu-obj-y)
 
 
-obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o
+obj-$(CONFIG_SMP) += smp.o smp-low.o
 
 
-obj-$(CONFIG_FPU) += fpu-low.o
+obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o
 
 
 obj-$(CONFIG_MN10300_TTYSM) += mn10300-serial.o mn10300-serial-low.o \
 obj-$(CONFIG_MN10300_TTYSM) += mn10300-serial.o mn10300-serial-low.o \
 			       mn10300-debug.o
 			       mn10300-debug.o
@@ -17,7 +20,7 @@ obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-low.o
 obj-$(CONFIG_GDBSTUB_ON_TTYSx) += gdb-io-serial.o gdb-io-serial-low.o
 obj-$(CONFIG_GDBSTUB_ON_TTYSx) += gdb-io-serial.o gdb-io-serial-low.o
 obj-$(CONFIG_GDBSTUB_ON_TTYSMx) += gdb-io-ttysm.o gdb-io-ttysm-low.o
 obj-$(CONFIG_GDBSTUB_ON_TTYSMx) += gdb-io-ttysm.o gdb-io-ttysm-low.o
 
 
-ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
+ifeq ($(CONFIG_MN10300_CACHE_ENABLED),y)
 obj-$(CONFIG_GDBSTUB) += gdb-cache.o
 obj-$(CONFIG_GDBSTUB) += gdb-cache.o
 endif
 endif
 
 
@@ -25,3 +28,5 @@ obj-$(CONFIG_MN10300_RTC) += rtc.o
 obj-$(CONFIG_PROFILE) += profile.o profile-low.o
 obj-$(CONFIG_PROFILE) += profile.o profile-low.o
 obj-$(CONFIG_MODULES) += module.o
 obj-$(CONFIG_MODULES) += module.o
 obj-$(CONFIG_KPROBES) += kprobes.o
 obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_CSRC_MN10300) += csrc-mn10300.o
+obj-$(CONFIG_CEVT_MN10300) += cevt-mn10300.o

+ 10 - 1
arch/mn10300/kernel/asm-offsets.c

@@ -23,6 +23,7 @@ void foo(void)
 
 
 	OFFSET(TI_task,			thread_info, task);
 	OFFSET(TI_task,			thread_info, task);
 	OFFSET(TI_exec_domain,		thread_info, exec_domain);
 	OFFSET(TI_exec_domain,		thread_info, exec_domain);
+	OFFSET(TI_frame,		thread_info, frame);
 	OFFSET(TI_flags,		thread_info, flags);
 	OFFSET(TI_flags,		thread_info, flags);
 	OFFSET(TI_cpu,			thread_info, cpu);
 	OFFSET(TI_cpu,			thread_info, cpu);
 	OFFSET(TI_preempt_count,	thread_info, preempt_count);
 	OFFSET(TI_preempt_count,	thread_info, preempt_count);
@@ -66,7 +67,15 @@ void foo(void)
 	OFFSET(THREAD_SP,		thread_struct, sp);
 	OFFSET(THREAD_SP,		thread_struct, sp);
 	OFFSET(THREAD_A3,		thread_struct, a3);
 	OFFSET(THREAD_A3,		thread_struct, a3);
 	OFFSET(THREAD_USP,		thread_struct, usp);
 	OFFSET(THREAD_USP,		thread_struct, usp);
-	OFFSET(THREAD_FRAME,		thread_struct, __frame);
+#ifdef CONFIG_FPU
+	OFFSET(THREAD_FPU_FLAGS,	thread_struct, fpu_flags);
+	OFFSET(THREAD_FPU_STATE,	thread_struct, fpu_state);
+	DEFINE(__THREAD_USING_FPU,	THREAD_USING_FPU);
+	DEFINE(__THREAD_HAS_FPU,	THREAD_HAS_FPU);
+#endif /* CONFIG_FPU */
+	BLANK();
+
+	OFFSET(TASK_THREAD,		task_struct, thread);
 	BLANK();
 	BLANK();
 
 
 	DEFINE(CLONE_VM_asm,		CLONE_VM);
 	DEFINE(CLONE_VM_asm,		CLONE_VM);

+ 131 - 0
arch/mn10300/kernel/cevt-mn10300.c

@@ -0,0 +1,131 @@
+/* MN10300 clockevents
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by Mark Salter (msalter@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <asm/timex.h>
+#include "internal.h"
+
+#ifdef CONFIG_SMP
+#if (CONFIG_NR_CPUS > 2) && !defined(CONFIG_GEENERIC_CLOCKEVENTS_BROADCAST)
+#error "This doesn't scale well! Need per-core local timers."
+#endif
+#else /* CONFIG_SMP */
+#define stop_jiffies_counter1()
+#define reload_jiffies_counter1(x)
+#define TMJC1IRQ TMJCIRQ
+#endif
+
+
+static int next_event(unsigned long delta,
+		      struct clock_event_device *evt)
+{
+	unsigned int cpu = smp_processor_id();
+
+	if (cpu == 0) {
+		stop_jiffies_counter();
+		reload_jiffies_counter(delta - 1);
+	} else {
+		stop_jiffies_counter1();
+		reload_jiffies_counter1(delta - 1);
+	}
+	return 0;
+}
+
+static void set_clock_mode(enum clock_event_mode mode,
+			   struct clock_event_device *evt)
+{
+	/* Nothing to do ...  */
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, mn10300_clockevent_device);
+static DEFINE_PER_CPU(struct irqaction, timer_irq);
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+	struct clock_event_device *cd;
+	unsigned int cpu = smp_processor_id();
+
+	if (cpu == 0)
+		stop_jiffies_counter();
+	else
+		stop_jiffies_counter1();
+
+	cd = &per_cpu(mn10300_clockevent_device, cpu);
+	cd->event_handler(cd);
+
+	return IRQ_HANDLED;
+}
+
+static void event_handler(struct clock_event_device *dev)
+{
+}
+
+int __init init_clockevents(void)
+{
+	struct clock_event_device *cd;
+	struct irqaction *iact;
+	unsigned int cpu = smp_processor_id();
+
+	cd = &per_cpu(mn10300_clockevent_device, cpu);
+
+	if (cpu == 0) {
+		stop_jiffies_counter();
+		cd->irq	= TMJCIRQ;
+	} else {
+		stop_jiffies_counter1();
+		cd->irq	= TMJC1IRQ;
+	}
+
+	cd->name		= "Timestamp";
+	cd->features		= CLOCK_EVT_FEAT_ONESHOT;
+
+	/* Calculate the min / max delta */
+	clockevent_set_clock(cd, MN10300_JCCLK);
+
+	cd->max_delta_ns	= clockevent_delta2ns(TMJCBR_MAX, cd);
+	cd->min_delta_ns	= clockevent_delta2ns(100, cd);
+
+	cd->rating		= 200;
+	cd->cpumask		= cpumask_of(smp_processor_id());
+	cd->set_mode		= set_clock_mode;
+	cd->event_handler	= event_handler;
+	cd->set_next_event	= next_event;
+
+	iact = &per_cpu(timer_irq, cpu);
+	iact->flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER;
+	iact->handler = timer_interrupt;
+
+	clockevents_register_device(cd);
+
+#if defined(CONFIG_SMP) && !defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+	/* setup timer irq affinity so it only runs on this cpu */
+	{
+		struct irq_desc *desc;
+		desc = irq_to_desc(cd->irq);
+		cpumask_copy(desc->affinity, cpumask_of(cpu));
+		iact->flags |= IRQF_NOBALANCING;
+	}
+#endif
+
+	if (cpu == 0) {
+		reload_jiffies_counter(MN10300_JC_PER_HZ - 1);
+		iact->name = "CPU0 Timer";
+	} else {
+		reload_jiffies_counter1(MN10300_JC_PER_HZ - 1);
+		iact->name = "CPU1 Timer";
+	}
+
+	setup_jiffies_interrupt(cd->irq, iact);
+
+	return 0;
+}

+ 35 - 0
arch/mn10300/kernel/csrc-mn10300.c

@@ -0,0 +1,35 @@
+/* MN10300 clocksource
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by Mark Salter (msalter@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <asm/timex.h>
+#include "internal.h"
+
+static cycle_t mn10300_read(struct clocksource *cs)
+{
+	return read_timestamp_counter();
+}
+
+static struct clocksource clocksource_mn10300 = {
+	.name	= "TSC",
+	.rating	= 200,
+	.read	= mn10300_read,
+	.mask	= CLOCKSOURCE_MASK(32),
+	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+int __init init_clocksource(void)
+{
+	startup_timestamp_counter();
+	clocksource_set_clock(&clocksource_mn10300, MN10300_TSCCLK);
+	clocksource_register(&clocksource_mn10300);
+	return 0;
+}

+ 98 - 48
arch/mn10300/kernel/entry.S

@@ -28,25 +28,17 @@
 #include <asm/asm-offsets.h>
 #include <asm/asm-offsets.h>
 #include <asm/frame.inc>
 #include <asm/frame.inc>
 
 
+#if defined(CONFIG_SMP) && defined(CONFIG_GDBSTUB)
+#include <asm/gdb-stub.h>
+#endif /* CONFIG_SMP && CONFIG_GDBSTUB */
+
 #ifdef CONFIG_PREEMPT
 #ifdef CONFIG_PREEMPT
-#define preempt_stop		__cli
+#define preempt_stop		LOCAL_IRQ_DISABLE
 #else
 #else
 #define preempt_stop
 #define preempt_stop
 #define resume_kernel		restore_all
 #define resume_kernel		restore_all
 #endif
 #endif
 
 
-	.macro __cli
-	and	~EPSW_IM,epsw
-	or	EPSW_IE|MN10300_CLI_LEVEL,epsw
-	nop
-	nop
-	nop
-	.endm
-	.macro __sti
-	or	EPSW_IE|EPSW_IM_7,epsw
-	.endm
-
-
 	.am33_2
 	.am33_2
 
 
 ###############################################################################
 ###############################################################################
@@ -88,7 +80,7 @@ syscall_call:
 syscall_exit:
 syscall_exit:
 	# make sure we don't miss an interrupt setting need_resched or
 	# make sure we don't miss an interrupt setting need_resched or
 	# sigpending between sampling and the rti
 	# sigpending between sampling and the rti
-	__cli
+	LOCAL_IRQ_DISABLE
 	mov	(TI_flags,a2),d2
 	mov	(TI_flags,a2),d2
 	btst	_TIF_ALLWORK_MASK,d2
 	btst	_TIF_ALLWORK_MASK,d2
 	bne	syscall_exit_work
 	bne	syscall_exit_work
@@ -105,7 +97,7 @@ restore_all:
 syscall_exit_work:
 syscall_exit_work:
 	btst	_TIF_SYSCALL_TRACE,d2
 	btst	_TIF_SYSCALL_TRACE,d2
 	beq	work_pending
 	beq	work_pending
-	__sti				# could let syscall_trace_exit() call
+	LOCAL_IRQ_ENABLE		# could let syscall_trace_exit() call
 					# schedule() instead
 					# schedule() instead
 	mov	fp,d0
 	mov	fp,d0
 	call	syscall_trace_exit[],0	# do_syscall_trace(regs)
 	call	syscall_trace_exit[],0	# do_syscall_trace(regs)
@@ -121,7 +113,7 @@ work_resched:
 
 
 	# make sure we don't miss an interrupt setting need_resched or
 	# make sure we don't miss an interrupt setting need_resched or
 	# sigpending between sampling and the rti
 	# sigpending between sampling and the rti
-	__cli
+	LOCAL_IRQ_DISABLE
 
 
 	# is there any work to be done other than syscall tracing?
 	# is there any work to be done other than syscall tracing?
 	mov	(TI_flags,a2),d2
 	mov	(TI_flags,a2),d2
@@ -168,7 +160,7 @@ ret_from_intr:
 ENTRY(resume_userspace)
 ENTRY(resume_userspace)
 	# make sure we don't miss an interrupt setting need_resched or
 	# make sure we don't miss an interrupt setting need_resched or
 	# sigpending between sampling and the rti
 	# sigpending between sampling and the rti
-	__cli
+	LOCAL_IRQ_DISABLE
 
 
 	# is there any work to be done on int/exception return?
 	# is there any work to be done on int/exception return?
 	mov	(TI_flags,a2),d2
 	mov	(TI_flags,a2),d2
@@ -178,7 +170,7 @@ ENTRY(resume_userspace)
 
 
 #ifdef CONFIG_PREEMPT
 #ifdef CONFIG_PREEMPT
 ENTRY(resume_kernel)
 ENTRY(resume_kernel)
-	__cli
+	LOCAL_IRQ_DISABLE
 	mov	(TI_preempt_count,a2),d0	# non-zero preempt_count ?
 	mov	(TI_preempt_count,a2),d0	# non-zero preempt_count ?
 	cmp	0,d0
 	cmp	0,d0
 	bne	restore_all
 	bne	restore_all
@@ -214,31 +206,6 @@ ENTRY(irq_handler)
 
 
 	jmp	ret_from_intr
 	jmp	ret_from_intr
 
 
-###############################################################################
-#
-# Monitor Signal handler entry point
-#
-###############################################################################
-ENTRY(monitor_signal)
-	movbu	(0xae000001),d1
-	cmp	1,d1
-	beq	monsignal
-	ret	[],0
-
-monsignal:
-	or	EPSW_NMID,epsw
-	mov	d0,a0
-	mov	a0,sp
-	mov	(REG_EPSW,fp),d1
-	and	~EPSW_nSL,d1
-	mov	d1,(REG_EPSW,fp)
-	movm	(sp),[d2,d3,a2,a3,exreg0,exreg1,exother]
-	mov	(sp),a1
-	mov	a1,usp
-	movm	(sp),[other]
-	add	4,sp
-here:	jmp	0x8e000008-here+0x8e000008
-
 ###############################################################################
 ###############################################################################
 #
 #
 # Double Fault handler entry point
 # Double Fault handler entry point
@@ -276,6 +243,10 @@ double_fault_loop:
 ENTRY(raw_bus_error)
 ENTRY(raw_bus_error)
 	add	-4,sp
 	add	-4,sp
 	mov	d0,(sp)
 	mov	d0,(sp)
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+	mov	(MMUCTR),d0
+	mov	d0,(MMUCTR)
+#endif
 	mov	(BCBERR),d0		# what
 	mov	(BCBERR),d0		# what
 	btst	BCBERR_BEMR_DMA,d0	# see if it was an external bus error
 	btst	BCBERR_BEMR_DMA,d0	# see if it was an external bus error
 	beq	__common_exception_aux	# it wasn't
 	beq	__common_exception_aux	# it wasn't
@@ -302,11 +273,88 @@ ENTRY(nmi_handler)
 	add	-4,sp
 	add	-4,sp
 	mov	d0,(sp)
 	mov	d0,(sp)
 	mov	(TBR),d0
 	mov	(TBR),d0
+
+#ifdef CONFIG_SMP
+	add	-4,sp
+	mov	d0,(sp)			# save d0(TBR)
+	movhu	(NMIAGR),d0
+	and	NMIAGR_GN,d0
+	lsr	0x2,d0
+	cmp	CALL_FUNCTION_NMI_IPI,d0
+	bne	5f			# if not call function, jump
+
+	# function call nmi ipi
+	add	4,sp			# no need to store TBR
+	mov	GxICR_DETECT,d0		# clear NMI request
+	movbu	d0,(GxICR(CALL_FUNCTION_NMI_IPI))
+	movhu	(GxICR(CALL_FUNCTION_NMI_IPI)),d0
+	and	~EPSW_NMID,epsw		# enable NMI
+
+	mov	(sp),d0			# restore d0
+	SAVE_ALL
+	call	smp_nmi_call_function_interrupt[],0
+	RESTORE_ALL
+
+5:
+#ifdef CONFIG_GDBSTUB
+	cmp	GDB_NMI_IPI,d0
+	bne	3f			# if not gdb nmi ipi, jump
+
+	# gdb nmi ipi
+	add	4,sp			# no need to store TBR
+	mov	GxICR_DETECT,d0		# clear NMI
+	movbu	d0,(GxICR(GDB_NMI_IPI))
+	movhu	(GxICR(GDB_NMI_IPI)),d0
+	and	~EPSW_NMID,epsw		# enable NMI
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+	mov	(gdbstub_nmi_opr_type),d0
+	cmp	GDBSTUB_NMI_CACHE_PURGE,d0
+	bne	4f			# if not gdb cache purge, jump
+
+	# gdb cache purge nmi ipi
+	add	-20,sp
+	mov	d1,(4,sp)
+	mov	a0,(8,sp)
+	mov	a1,(12,sp)
+	mov	mdr,d0
+	mov	d0,(16,sp)
+	call	gdbstub_local_purge_cache[],0
+	mov	0x1,d0
+	mov	(CPUID),d1
+	asl	d1,d0
+	mov	gdbstub_nmi_cpumask,a0
+	bclr	d0,(a0)
+	mov	(4,sp),d1
+	mov	(8,sp),a0
+	mov	(12,sp),a1
+	mov	(16,sp),d0
+	mov	d0,mdr
+	add	20,sp
+	mov	(sp),d0
+	add	4,sp
+	rti
+4:
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+	# gdb wait nmi ipi
+	mov     (sp),d0
+	SAVE_ALL
+	call    gdbstub_nmi_wait[],0
+	RESTORE_ALL
+3:
+#endif /* CONFIG_GDBSTUB */
+	mov     (sp),d0                 # restore TBR to d0
+	add     4,sp
+#endif /* CONFIG_SMP */
+
 	bra	__common_exception_nonmi
 	bra	__common_exception_nonmi
 
 
 ENTRY(__common_exception)
 ENTRY(__common_exception)
 	add	-4,sp
 	add	-4,sp
 	mov	d0,(sp)
 	mov	d0,(sp)
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+	mov	(MMUCTR),d0
+	mov	d0,(MMUCTR)
+#endif
 
 
 __common_exception_aux:
 __common_exception_aux:
 	mov	(TBR),d0
 	mov	(TBR),d0
@@ -331,15 +379,21 @@ __common_exception_nonmi:
 	mov	d0,(REG_ORIG_D0,fp)
 	mov	d0,(REG_ORIG_D0,fp)
 
 
 #ifdef CONFIG_GDBSTUB
 #ifdef CONFIG_GDBSTUB
+#ifdef CONFIG_SMP
+	call	gdbstub_busy_check[],0
+	and	d0,d0			# check return value
+	beq	2f
+#else  /* CONFIG_SMP */
 	btst	0x01,(gdbstub_busy)
 	btst	0x01,(gdbstub_busy)
 	beq	2f
 	beq	2f
+#endif /* CONFIG_SMP */
 	and	~EPSW_IE,epsw
 	and	~EPSW_IE,epsw
 	mov	fp,d0
 	mov	fp,d0
 	mov	a2,d1
 	mov	a2,d1
 	call	gdbstub_exception[],0	# gdbstub itself caused an exception
 	call	gdbstub_exception[],0	# gdbstub itself caused an exception
 	bra	restore_all
 	bra	restore_all
 2:
 2:
-#endif
+#endif /* CONFIG_GDBSTUB */
 
 
 	mov	fp,d0			# arg 0: stacked register file
 	mov	fp,d0			# arg 0: stacked register file
 	mov	a2,d1			# arg 1: exception number
 	mov	a2,d1			# arg 1: exception number
@@ -374,11 +428,7 @@ ENTRY(set_excp_vector)
 	add	exception_table,d0
 	add	exception_table,d0
 	mov	d1,(d0)
 	mov	d1,(d0)
 	mov	4,d1
 	mov	4,d1
-#if defined(CONFIG_MN10300_CACHE_WBACK)
-	jmp	mn10300_dcache_flush_inv_range2
-#else
 	ret	[],0
 	ret	[],0
-#endif
 
 
 ###############################################################################
 ###############################################################################
 #
 #

+ 163 - 102
arch/mn10300/kernel/fpu-low.S

@@ -8,25 +8,14 @@
  * as published by the Free Software Foundation; either version
  * as published by the Free Software Foundation; either version
  * 2 of the Licence, or (at your option) any later version.
  * 2 of the Licence, or (at your option) any later version.
  */
  */
+#include <linux/linkage.h>
 #include <asm/cpu-regs.h>
 #include <asm/cpu-regs.h>
+#include <asm/smp.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
 
 
-###############################################################################
-#
-# void fpu_init_state(void)
-# - initialise the FPU
-#
-###############################################################################
-	.globl	fpu_init_state
-	.type	fpu_init_state,@function
-fpu_init_state:
-	mov	epsw,d0
-	or	EPSW_FE,epsw
-
-#ifdef CONFIG_MN10300_PROC_MN103E010
-	nop
-	nop
-	nop
-#endif
+.macro FPU_INIT_STATE_ALL
 	fmov	0,fs0
 	fmov	0,fs0
 	fmov	fs0,fs1
 	fmov	fs0,fs1
 	fmov	fs0,fs2
 	fmov	fs0,fs2
@@ -60,7 +49,100 @@ fpu_init_state:
 	fmov	fs0,fs30
 	fmov	fs0,fs30
 	fmov	fs0,fs31
 	fmov	fs0,fs31
 	fmov	FPCR_INIT,fpcr
 	fmov	FPCR_INIT,fpcr
+.endm
+
+.macro FPU_SAVE_ALL areg,dreg
+	fmov	fs0,(\areg+)
+	fmov	fs1,(\areg+)
+	fmov	fs2,(\areg+)
+	fmov	fs3,(\areg+)
+	fmov	fs4,(\areg+)
+	fmov	fs5,(\areg+)
+	fmov	fs6,(\areg+)
+	fmov	fs7,(\areg+)
+	fmov	fs8,(\areg+)
+	fmov	fs9,(\areg+)
+	fmov	fs10,(\areg+)
+	fmov	fs11,(\areg+)
+	fmov	fs12,(\areg+)
+	fmov	fs13,(\areg+)
+	fmov	fs14,(\areg+)
+	fmov	fs15,(\areg+)
+	fmov	fs16,(\areg+)
+	fmov	fs17,(\areg+)
+	fmov	fs18,(\areg+)
+	fmov	fs19,(\areg+)
+	fmov	fs20,(\areg+)
+	fmov	fs21,(\areg+)
+	fmov	fs22,(\areg+)
+	fmov	fs23,(\areg+)
+	fmov	fs24,(\areg+)
+	fmov	fs25,(\areg+)
+	fmov	fs26,(\areg+)
+	fmov	fs27,(\areg+)
+	fmov	fs28,(\areg+)
+	fmov	fs29,(\areg+)
+	fmov	fs30,(\areg+)
+	fmov	fs31,(\areg+)
+	fmov	fpcr,\dreg
+	mov	\dreg,(\areg)
+.endm
+
+.macro FPU_RESTORE_ALL areg,dreg
+	fmov	(\areg+),fs0
+	fmov	(\areg+),fs1
+	fmov	(\areg+),fs2
+	fmov	(\areg+),fs3
+	fmov	(\areg+),fs4
+	fmov	(\areg+),fs5
+	fmov	(\areg+),fs6
+	fmov	(\areg+),fs7
+	fmov	(\areg+),fs8
+	fmov	(\areg+),fs9
+	fmov	(\areg+),fs10
+	fmov	(\areg+),fs11
+	fmov	(\areg+),fs12
+	fmov	(\areg+),fs13
+	fmov	(\areg+),fs14
+	fmov	(\areg+),fs15
+	fmov	(\areg+),fs16
+	fmov	(\areg+),fs17
+	fmov	(\areg+),fs18
+	fmov	(\areg+),fs19
+	fmov	(\areg+),fs20
+	fmov	(\areg+),fs21
+	fmov	(\areg+),fs22
+	fmov	(\areg+),fs23
+	fmov	(\areg+),fs24
+	fmov	(\areg+),fs25
+	fmov	(\areg+),fs26
+	fmov	(\areg+),fs27
+	fmov	(\areg+),fs28
+	fmov	(\areg+),fs29
+	fmov	(\areg+),fs30
+	fmov	(\areg+),fs31
+	mov	(\areg),\dreg
+	fmov	\dreg,fpcr
+.endm
 
 
+###############################################################################
+#
+# void fpu_init_state(void)
+# - initialise the FPU
+#
+###############################################################################
+	.globl	fpu_init_state
+	.type	fpu_init_state,@function
+fpu_init_state:
+	mov	epsw,d0
+	or	EPSW_FE,epsw
+
+#ifdef CONFIG_MN10300_PROC_MN103E010
+	nop
+	nop
+	nop
+#endif
+	FPU_INIT_STATE_ALL
 #ifdef CONFIG_MN10300_PROC_MN103E010
 #ifdef CONFIG_MN10300_PROC_MN103E010
 	nop
 	nop
 	nop
 	nop
@@ -89,40 +171,7 @@ fpu_save:
 	nop
 	nop
 #endif
 #endif
 	mov	d0,a0
 	mov	d0,a0
-	fmov	fs0,(a0+)
-	fmov	fs1,(a0+)
-	fmov	fs2,(a0+)
-	fmov	fs3,(a0+)
-	fmov	fs4,(a0+)
-	fmov	fs5,(a0+)
-	fmov	fs6,(a0+)
-	fmov	fs7,(a0+)
-	fmov	fs8,(a0+)
-	fmov	fs9,(a0+)
-	fmov	fs10,(a0+)
-	fmov	fs11,(a0+)
-	fmov	fs12,(a0+)
-	fmov	fs13,(a0+)
-	fmov	fs14,(a0+)
-	fmov	fs15,(a0+)
-	fmov	fs16,(a0+)
-	fmov	fs17,(a0+)
-	fmov	fs18,(a0+)
-	fmov	fs19,(a0+)
-	fmov	fs20,(a0+)
-	fmov	fs21,(a0+)
-	fmov	fs22,(a0+)
-	fmov	fs23,(a0+)
-	fmov	fs24,(a0+)
-	fmov	fs25,(a0+)
-	fmov	fs26,(a0+)
-	fmov	fs27,(a0+)
-	fmov	fs28,(a0+)
-	fmov	fs29,(a0+)
-	fmov	fs30,(a0+)
-	fmov	fs31,(a0+)
-	fmov	fpcr,d0
-	mov	d0,(a0)
+	FPU_SAVE_ALL	a0,d0
 #ifdef CONFIG_MN10300_PROC_MN103E010
 #ifdef CONFIG_MN10300_PROC_MN103E010
 	nop
 	nop
 	nop
 	nop
@@ -135,63 +184,75 @@ fpu_save:
 
 
 ###############################################################################
 ###############################################################################
 #
 #
-# void fpu_restore(struct fpu_state_struct *)
-# - restore the fpu state
-# - note that an FPU Operational exception might occur during this process
+# void fpu_disabled(void)
+# - handle an exception due to the FPU being disabled
+#   when CONFIG_FPU is enabled
 #
 #
 ###############################################################################
 ###############################################################################
-	.globl	fpu_restore
-	.type	fpu_restore,@function
-fpu_restore:
-	mov	epsw,d1
-	or	EPSW_FE,epsw		/* enable the FPU so we can access it */
-
-#ifdef CONFIG_MN10300_PROC_MN103E010
+	.type	fpu_disabled,@function
+	.globl	fpu_disabled
+fpu_disabled:
+	or	EPSW_nAR|EPSW_FE,epsw
 	nop
 	nop
 	nop
 	nop
-#endif
-	mov	d0,a0
-	fmov	(a0+),fs0
-	fmov	(a0+),fs1
-	fmov	(a0+),fs2
-	fmov	(a0+),fs3
-	fmov	(a0+),fs4
-	fmov	(a0+),fs5
-	fmov	(a0+),fs6
-	fmov	(a0+),fs7
-	fmov	(a0+),fs8
-	fmov	(a0+),fs9
-	fmov	(a0+),fs10
-	fmov	(a0+),fs11
-	fmov	(a0+),fs12
-	fmov	(a0+),fs13
-	fmov	(a0+),fs14
-	fmov	(a0+),fs15
-	fmov	(a0+),fs16
-	fmov	(a0+),fs17
-	fmov	(a0+),fs18
-	fmov	(a0+),fs19
-	fmov	(a0+),fs20
-	fmov	(a0+),fs21
-	fmov	(a0+),fs22
-	fmov	(a0+),fs23
-	fmov	(a0+),fs24
-	fmov	(a0+),fs25
-	fmov	(a0+),fs26
-	fmov	(a0+),fs27
-	fmov	(a0+),fs28
-	fmov	(a0+),fs29
-	fmov	(a0+),fs30
-	fmov	(a0+),fs31
-	mov	(a0),d0
-	fmov	d0,fpcr
-#ifdef CONFIG_MN10300_PROC_MN103E010
 	nop
 	nop
+
+	mov	sp,a1
+	mov	(a1),d1			/* get epsw of user context */
+	and	~(THREAD_SIZE-1),a1	/* a1: (thread_info *ti) */
+	mov	(TI_task,a1),a2		/* a2: (task_struct *tsk) */
+	btst	EPSW_nSL,d1
+	beq	fpu_used_in_kernel
+
+	or	EPSW_FE,d1
+	mov	d1,(sp)
+	mov	(TASK_THREAD+THREAD_FPU_FLAGS,a2),d1
+#ifndef CONFIG_LAZY_SAVE_FPU
+	or	__THREAD_HAS_FPU,d1
+	mov	d1,(TASK_THREAD+THREAD_FPU_FLAGS,a2)
+#else  /* !CONFIG_LAZY_SAVE_FPU */
+	mov	(fpu_state_owner),a0
+	cmp	0,a0
+	beq	fpu_regs_save_end
+
+	mov	(TASK_THREAD+THREAD_UREGS,a0),a1
+	add	TASK_THREAD+THREAD_FPU_STATE,a0
+	FPU_SAVE_ALL a0,d0
+
+	mov	(REG_EPSW,a1),d0
+	and	~EPSW_FE,d0
+	mov	d0,(REG_EPSW,a1)
+
+fpu_regs_save_end:
+	mov	a2,(fpu_state_owner)
+#endif /* !CONFIG_LAZY_SAVE_FPU */
+
+	btst	__THREAD_USING_FPU,d1
+	beq	fpu_regs_init
+	add	TASK_THREAD+THREAD_FPU_STATE,a2
+	FPU_RESTORE_ALL a2,d0
+	rti
+
+fpu_regs_init:
+	FPU_INIT_STATE_ALL
+	add	TASK_THREAD+THREAD_FPU_FLAGS,a2
+	bset	__THREAD_USING_FPU,(0,a2)
+	rti
+
+fpu_used_in_kernel:
+	and	~(EPSW_nAR|EPSW_FE),epsw
 	nop
 	nop
 	nop
 	nop
-#endif
 
 
-	mov	d1,epsw
-	ret	[],0
+	add	-4,sp
+	SAVE_ALL
+	mov	-1,d0
+	mov	d0,(REG_ORIG_D0,fp)
+
+	and	~EPSW_NMID,epsw
+
+	mov	fp,d0
+	call	fpu_disabled_in_kernel[],0
+	jmp	ret_from_exception
 
 
-	.size	fpu_restore,.-fpu_restore
+	.size	fpu_disabled,.-fpu_disabled

+ 39 - 0
arch/mn10300/kernel/fpu-nofpu-low.S

@@ -0,0 +1,39 @@
+/* MN10300 Low level FPU management operations
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/linkage.h>
+#include <asm/cpu-regs.h>
+#include <asm/smp.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
+
+###############################################################################
+#
+# void fpu_disabled(void)
+# - handle an exception due to the FPU being disabled
+#   when CONFIG_FPU is disabled
+#
+###############################################################################
+	.type	fpu_disabled,@function
+	.globl	fpu_disabled
+fpu_disabled:
+	add	-4,sp
+	SAVE_ALL
+	mov	-1,d0
+	mov	d0,(REG_ORIG_D0,fp)
+
+	and	~EPSW_NMID,epsw
+
+	mov	fp,d0
+	call	unexpected_fpu_exception[],0
+	jmp	ret_from_exception
+
+	.size	fpu_disabled,.-fpu_disabled

+ 30 - 0
arch/mn10300/kernel/fpu-nofpu.c

@@ -0,0 +1,30 @@
+/* MN10300 FPU management
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <asm/fpu.h>
+
+/*
+ * handle an FPU operational exception
+ * - there's a possibility that if the FPU is asynchronous, the signal might
+ *   be meant for a process other than the current one
+ */
+asmlinkage
+void unexpected_fpu_exception(struct pt_regs *regs, enum exception_code code)
+{
+	panic("An FPU exception was received, but there's no FPU enabled.");
+}
+
+/*
+ * fill in the FPU structure for a core dump
+ */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpreg)
+{
+	return 0; /* not valid */
+}

+ 56 - 85
arch/mn10300/kernel/fpu.c

@@ -12,56 +12,19 @@
 #include <asm/fpu.h>
 #include <asm/fpu.h>
 #include <asm/elf.h>
 #include <asm/elf.h>
 #include <asm/exceptions.h>
 #include <asm/exceptions.h>
+#include <asm/system.h>
 
 
+#ifdef CONFIG_LAZY_SAVE_FPU
 struct task_struct *fpu_state_owner;
 struct task_struct *fpu_state_owner;
+#endif
 
 
 /*
 /*
- * handle an exception due to the FPU being disabled
+ * error functions in FPU disabled exception
  */
  */
-asmlinkage void fpu_disabled(struct pt_regs *regs, enum exception_code code)
+asmlinkage void fpu_disabled_in_kernel(struct pt_regs *regs)
 {
 {
-	struct task_struct *tsk = current;
-
-	if (!user_mode(regs))
-		die_if_no_fixup("An FPU Disabled exception happened in"
-				" kernel space\n",
-				regs, code);
-
-#ifdef CONFIG_FPU
-	preempt_disable();
-
-	/* transfer the last process's FPU state to memory */
-	if (fpu_state_owner) {
-		fpu_save(&fpu_state_owner->thread.fpu_state);
-		fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
-	}
-
-	/* the current process now owns the FPU state */
-	fpu_state_owner = tsk;
-	regs->epsw |= EPSW_FE;
-
-	/* load the FPU with the current process's FPU state or invent a new
-	 * clean one if the process doesn't have one */
-	if (is_using_fpu(tsk)) {
-		fpu_restore(&tsk->thread.fpu_state);
-	} else {
-		fpu_init_state();
-		set_using_fpu(tsk);
-	}
-
-	preempt_enable();
-#else
-	{
-		siginfo_t info;
-
-		info.si_signo = SIGFPE;
-		info.si_errno = 0;
-		info.si_addr = (void *) tsk->thread.uregs->pc;
-		info.si_code = FPE_FLTINV;
-
-		force_sig_info(SIGFPE, &info, tsk);
-	}
-#endif  /* CONFIG_FPU */
+	die_if_no_fixup("An FPU Disabled exception happened in kernel space\n",
+			regs, EXCEP_FPU_DISABLED);
 }
 }
 
 
 /*
 /*
@@ -71,15 +34,16 @@ asmlinkage void fpu_disabled(struct pt_regs *regs, enum exception_code code)
  */
  */
 asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
 asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
 {
 {
-	struct task_struct *tsk = fpu_state_owner;
+	struct task_struct *tsk = current;
 	siginfo_t info;
 	siginfo_t info;
+	u32 fpcr;
 
 
 	if (!user_mode(regs))
 	if (!user_mode(regs))
 		die_if_no_fixup("An FPU Operation exception happened in"
 		die_if_no_fixup("An FPU Operation exception happened in"
 				" kernel space\n",
 				" kernel space\n",
 				regs, code);
 				regs, code);
 
 
-	if (!tsk)
+	if (!is_using_fpu(tsk))
 		die_if_no_fixup("An FPU Operation exception happened,"
 		die_if_no_fixup("An FPU Operation exception happened,"
 				" but the FPU is not in use",
 				" but the FPU is not in use",
 				regs, code);
 				regs, code);
@@ -89,48 +53,45 @@ asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
 	info.si_addr = (void *) tsk->thread.uregs->pc;
 	info.si_addr = (void *) tsk->thread.uregs->pc;
 	info.si_code = FPE_FLTINV;
 	info.si_code = FPE_FLTINV;
 
 
-#ifdef CONFIG_FPU
-	{
-		u32 fpcr;
+	unlazy_fpu(tsk);
 
 
-		/* get FPCR (we need to enable the FPU whilst we do this) */
-		asm volatile("	or	%1,epsw		\n"
-#ifdef CONFIG_MN10300_PROC_MN103E010
-			     "	nop			\n"
-			     "	nop			\n"
-			     "	nop			\n"
-#endif
-			     "	fmov	fpcr,%0		\n"
-#ifdef CONFIG_MN10300_PROC_MN103E010
-			     "	nop			\n"
-			     "	nop			\n"
-			     "	nop			\n"
-#endif
-			     "	and	%2,epsw		\n"
-			     : "=&d"(fpcr)
-			     : "i"(EPSW_FE), "i"(~EPSW_FE)
-			     );
-
-		if (fpcr & FPCR_EC_Z)
-			info.si_code = FPE_FLTDIV;
-		else if	(fpcr & FPCR_EC_O)
-			info.si_code = FPE_FLTOVF;
-		else if	(fpcr & FPCR_EC_U)
-			info.si_code = FPE_FLTUND;
-		else if	(fpcr & FPCR_EC_I)
-			info.si_code = FPE_FLTRES;
-	}
-#endif
+	fpcr = tsk->thread.fpu_state.fpcr;
+
+	if (fpcr & FPCR_EC_Z)
+		info.si_code = FPE_FLTDIV;
+	else if	(fpcr & FPCR_EC_O)
+		info.si_code = FPE_FLTOVF;
+	else if	(fpcr & FPCR_EC_U)
+		info.si_code = FPE_FLTUND;
+	else if	(fpcr & FPCR_EC_I)
+		info.si_code = FPE_FLTRES;
 
 
 	force_sig_info(SIGFPE, &info, tsk);
 	force_sig_info(SIGFPE, &info, tsk);
 }
 }
 
 
+/*
+ * handle an FPU invalid_op exception
+ * - Derived from DO_EINFO() macro in arch/mn10300/kernel/traps.c
+ */
+asmlinkage void fpu_invalid_op(struct pt_regs *regs, enum exception_code code)
+{
+	siginfo_t info;
+
+	if (!user_mode(regs))
+		die_if_no_fixup("FPU invalid opcode", regs, code);
+
+	info.si_signo = SIGILL;
+	info.si_errno = 0;
+	info.si_code = ILL_COPROC;
+	info.si_addr = (void *) regs->pc;
+	force_sig_info(info.si_signo, &info, current);
+}
+
 /*
 /*
  * save the FPU state to a signal context
  * save the FPU state to a signal context
  */
  */
 int fpu_setup_sigcontext(struct fpucontext *fpucontext)
 int fpu_setup_sigcontext(struct fpucontext *fpucontext)
 {
 {
-#ifdef CONFIG_FPU
 	struct task_struct *tsk = current;
 	struct task_struct *tsk = current;
 
 
 	if (!is_using_fpu(tsk))
 	if (!is_using_fpu(tsk))
@@ -142,11 +103,19 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
 	 */
 	 */
 	preempt_disable();
 	preempt_disable();
 
 
+#ifndef CONFIG_LAZY_SAVE_FPU
+	if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+		fpu_save(&tsk->thread.fpu_state);
+		tsk->thread.uregs->epsw &= ~EPSW_FE;
+		tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+	}
+#else /* !CONFIG_LAZY_SAVE_FPU */
 	if (fpu_state_owner == tsk) {
 	if (fpu_state_owner == tsk) {
 		fpu_save(&tsk->thread.fpu_state);
 		fpu_save(&tsk->thread.fpu_state);
 		fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
 		fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
 		fpu_state_owner = NULL;
 		fpu_state_owner = NULL;
 	}
 	}
+#endif /* !CONFIG_LAZY_SAVE_FPU */
 
 
 	preempt_enable();
 	preempt_enable();
 
 
@@ -161,9 +130,6 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
 		return -1;
 		return -1;
 
 
 	return 1;
 	return 1;
-#else
-	return 0;
-#endif
 }
 }
 
 
 /*
 /*
@@ -171,17 +137,23 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
  */
  */
 void fpu_kill_state(struct task_struct *tsk)
 void fpu_kill_state(struct task_struct *tsk)
 {
 {
-#ifdef CONFIG_FPU
 	/* disown anything left in the FPU */
 	/* disown anything left in the FPU */
 	preempt_disable();
 	preempt_disable();
 
 
+#ifndef CONFIG_LAZY_SAVE_FPU
+	if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+		tsk->thread.uregs->epsw &= ~EPSW_FE;
+		tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+	}
+#else /* !CONFIG_LAZY_SAVE_FPU */
 	if (fpu_state_owner == tsk) {
 	if (fpu_state_owner == tsk) {
 		fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
 		fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
 		fpu_state_owner = NULL;
 		fpu_state_owner = NULL;
 	}
 	}
+#endif /* !CONFIG_LAZY_SAVE_FPU */
 
 
 	preempt_enable();
 	preempt_enable();
-#endif
+
 	/* we no longer have a valid current FPU state */
 	/* we no longer have a valid current FPU state */
 	clear_using_fpu(tsk);
 	clear_using_fpu(tsk);
 }
 }
@@ -195,8 +167,7 @@ int fpu_restore_sigcontext(struct fpucontext *fpucontext)
 	int ret;
 	int ret;
 
 
 	/* load up the old FPU state */
 	/* load up the old FPU state */
-	ret = copy_from_user(&tsk->thread.fpu_state,
-			     fpucontext,
+	ret = copy_from_user(&tsk->thread.fpu_state, fpucontext,
 			     min(sizeof(struct fpu_state_struct),
 			     min(sizeof(struct fpu_state_struct),
 				 sizeof(struct fpucontext)));
 				 sizeof(struct fpucontext)));
 	if (!ret)
 	if (!ret)

+ 3 - 2
arch/mn10300/kernel/gdb-io-serial-low.S

@@ -18,6 +18,7 @@
 #include <asm/thread_info.h>
 #include <asm/thread_info.h>
 #include <asm/frame.inc>
 #include <asm/frame.inc>
 #include <asm/intctl-regs.h>
 #include <asm/intctl-regs.h>
+#include <asm/irqflags.h>
 #include <unit/serial.h>
 #include <unit/serial.h>
 
 
 	.text
 	.text
@@ -69,7 +70,7 @@ gdbstub_io_rx_overflow:
 	bra	gdbstub_io_rx_done
 	bra	gdbstub_io_rx_done
 
 
 gdbstub_io_rx_enter:
 gdbstub_io_rx_enter:
-	or	EPSW_IE|EPSW_IM_1,epsw
+	LOCAL_CHANGE_INTR_MASK_LEVEL(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL+1))
 	add	-4,sp
 	add	-4,sp
 	SAVE_ALL
 	SAVE_ALL
 
 
@@ -80,7 +81,7 @@ gdbstub_io_rx_enter:
 	mov	fp,d0
 	mov	fp,d0
 	call	gdbstub_rx_irq[],0	# gdbstub_rx_irq(regs,excep)
 	call	gdbstub_rx_irq[],0	# gdbstub_rx_irq(regs,excep)
 
 
-	and	~EPSW_IE,epsw
+	LOCAL_CLI
 	bclr	0x01,(gdbstub_busy)
 	bclr	0x01,(gdbstub_busy)
 
 
 	.globl gdbstub_return
 	.globl gdbstub_return

+ 27 - 10
arch/mn10300/kernel/gdb-io-serial.c

@@ -23,6 +23,7 @@
 #include <asm/exceptions.h>
 #include <asm/exceptions.h>
 #include <asm/serial-regs.h>
 #include <asm/serial-regs.h>
 #include <unit/serial.h>
 #include <unit/serial.h>
+#include <asm/smp.h>
 
 
 /*
 /*
  * initialise the GDB stub
  * initialise the GDB stub
@@ -45,22 +46,34 @@ void gdbstub_io_init(void)
 	XIRQxICR(GDBPORT_SERIAL_IRQ) = 0;
 	XIRQxICR(GDBPORT_SERIAL_IRQ) = 0;
 	tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
 	tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
 
 
+#if   CONFIG_GDBSTUB_IRQ_LEVEL == 0
 	IVAR0 = EXCEP_IRQ_LEVEL0;
 	IVAR0 = EXCEP_IRQ_LEVEL0;
-	set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler);
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 1
+	IVAR1 = EXCEP_IRQ_LEVEL1;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 2
+	IVAR2 = EXCEP_IRQ_LEVEL2;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 3
+	IVAR3 = EXCEP_IRQ_LEVEL3;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 4
+	IVAR4 = EXCEP_IRQ_LEVEL4;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 5
+	IVAR5 = EXCEP_IRQ_LEVEL5;
+#else
+#error "Unknown irq level for gdbstub."
+#endif
+
+	set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
+		gdbstub_io_rx_handler);
 
 
 	XIRQxICR(GDBPORT_SERIAL_IRQ) &= ~GxICR_REQUEST;
 	XIRQxICR(GDBPORT_SERIAL_IRQ) &= ~GxICR_REQUEST;
-	XIRQxICR(GDBPORT_SERIAL_IRQ) = GxICR_ENABLE | GxICR_LEVEL_0;
+	XIRQxICR(GDBPORT_SERIAL_IRQ) =
+		GxICR_ENABLE | NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL);
 	tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
 	tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
 
 
 	GDBPORT_SERIAL_IER = UART_IER_RDI | UART_IER_RLSI;
 	GDBPORT_SERIAL_IER = UART_IER_RDI | UART_IER_RLSI;
 
 
 	/* permit level 0 IRQs to take place */
 	/* permit level 0 IRQs to take place */
-	asm volatile(
-		"	and %0,epsw	\n"
-		"	or %1,epsw	\n"
-		:
-		: "i"(~EPSW_IM), "i"(EPSW_IE | EPSW_IM_1)
-		);
+	local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
 }
 }
 
 
 /*
 /*
@@ -87,6 +100,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
 {
 {
 	unsigned ix;
 	unsigned ix;
 	u8 ch, st;
 	u8 ch, st;
+#if defined(CONFIG_MN10300_WD_TIMER)
+	int cpu;
+#endif
 
 
 	*_ch = 0xff;
 	*_ch = 0xff;
 
 
@@ -104,8 +120,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
 		if (nonblock)
 		if (nonblock)
 			return -EAGAIN;
 			return -EAGAIN;
 #ifdef CONFIG_MN10300_WD_TIMER
 #ifdef CONFIG_MN10300_WD_TIMER
-		watchdog_alert_counter = 0;
-#endif /* CONFIG_MN10300_WD_TIMER */
+	for (cpu = 0; cpu < NR_CPUS; cpu++)
+		watchdog_alert_counter[cpu] = 0;
+#endif
 		goto try_again;
 		goto try_again;
 	}
 	}
 
 

+ 13 - 11
arch/mn10300/kernel/gdb-io-ttysm.c

@@ -58,9 +58,12 @@ void __init gdbstub_io_init(void)
 	gdbstub_io_set_baud(115200);
 	gdbstub_io_set_baud(115200);
 
 
 	/* we want to get serial receive interrupts */
 	/* we want to get serial receive interrupts */
-	set_intr_level(gdbstub_port->rx_irq, GxICR_LEVEL_0);
-	set_intr_level(gdbstub_port->tx_irq, GxICR_LEVEL_0);
-	set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler);
+	set_intr_level(gdbstub_port->rx_irq,
+		NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
+	set_intr_level(gdbstub_port->tx_irq,
+		NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
+	set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
+		gdbstub_io_rx_handler);
 
 
 	*gdbstub_port->rx_icr |= GxICR_ENABLE;
 	*gdbstub_port->rx_icr |= GxICR_ENABLE;
 	tmp = *gdbstub_port->rx_icr;
 	tmp = *gdbstub_port->rx_icr;
@@ -84,12 +87,7 @@ void __init gdbstub_io_init(void)
 	tmp = *gdbstub_port->_control;
 	tmp = *gdbstub_port->_control;
 
 
 	/* permit level 0 IRQs only */
 	/* permit level 0 IRQs only */
-	asm volatile(
-		"	and %0,epsw	\n"
-		"	or %1,epsw	\n"
-		:
-		: "i"(~EPSW_IM), "i"(EPSW_IE|EPSW_IM_1)
-		);
+	local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
 }
 }
 
 
 /*
 /*
@@ -184,6 +182,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
 {
 {
 	unsigned ix;
 	unsigned ix;
 	u8 ch, st;
 	u8 ch, st;
+#if defined(CONFIG_MN10300_WD_TIMER)
+	int cpu;
+#endif
 
 
 	*_ch = 0xff;
 	*_ch = 0xff;
 
 
@@ -201,8 +202,9 @@ try_again:
 		if (nonblock)
 		if (nonblock)
 			return -EAGAIN;
 			return -EAGAIN;
 #ifdef CONFIG_MN10300_WD_TIMER
 #ifdef CONFIG_MN10300_WD_TIMER
-		watchdog_alert_counter = 0;
-#endif /* CONFIG_MN10300_WD_TIMER */
+	for (cpu = 0; cpu < NR_CPUS; cpu++)
+		watchdog_alert_counter[cpu] = 0;
+#endif
 		goto try_again;
 		goto try_again;
 	}
 	}
 
 

+ 6 - 11
arch/mn10300/kernel/gdb-stub.c

@@ -440,15 +440,11 @@ static const unsigned char gdbstub_insn_sizes[256] =
 
 
 static int __gdbstub_mark_bp(u8 *addr, int ix)
 static int __gdbstub_mark_bp(u8 *addr, int ix)
 {
 {
-	if (addr < (u8 *) 0x70000000UL)
-		return 0;
-	/* 70000000-7fffffff: vmalloc area */
-	if (addr < (u8 *) 0x80000000UL)
+	/* vmalloc area */
+	if (((u8 *) VMALLOC_START <= addr) && (addr < (u8 *) VMALLOC_END))
 		goto okay;
 		goto okay;
-	if (addr < (u8 *) 0x8c000000UL)
-		return 0;
-	/* 8c000000-93ffffff: SRAM, SDRAM */
-	if (addr < (u8 *) 0x94000000UL)
+	/* SRAM, SDRAM */
+	if (((u8 *) 0x80000000UL <= addr) && (addr < (u8 *) 0xa0000000UL))
 		goto okay;
 		goto okay;
 	return 0;
 	return 0;
 
 
@@ -1197,9 +1193,8 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep)
 	mn10300_set_gdbleds(1);
 	mn10300_set_gdbleds(1);
 
 
 	asm volatile("mov mdr,%0" : "=d"(mdr));
 	asm volatile("mov mdr,%0" : "=d"(mdr));
-	asm volatile("mov epsw,%0" : "=d"(epsw));
-	asm volatile("mov %0,epsw"
-		     :: "d"((epsw & ~EPSW_IM) | EPSW_IE | EPSW_IM_1));
+	local_save_flags(epsw);
+	local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
 
 
 	gdbstub_store_fpu();
 	gdbstub_store_fpu();
 
 

+ 198 - 4
arch/mn10300/kernel/head.S

@@ -19,6 +19,12 @@
 #include <asm/frame.inc>
 #include <asm/frame.inc>
 #include <asm/param.h>
 #include <asm/param.h>
 #include <unit/serial.h>
 #include <unit/serial.h>
+#ifdef CONFIG_SMP
+#include <asm/smp.h>
+#include <asm/intctl-regs.h>
+#include <asm/cpu-regs.h>
+#include <proc/smp-regs.h>
+#endif /* CONFIG_SMP */
 
 
 	__HEAD
 	__HEAD
 
 
@@ -30,17 +36,51 @@
 	.globl	_start
 	.globl	_start
 	.type	_start,@function
 	.type	_start,@function
 _start:
 _start:
+#ifdef CONFIG_SMP
+	#
+	# If this is a secondary CPU (AP), then deal with that elsewhere
+	#
+	mov	(CPUID),d3
+	and	CPUID_MASK,d3
+	bne	startup_secondary
+
+	#
+	# We're dealing with the primary CPU (BP) here, then.
+	# Keep BP's D0,D1,D2 register for boot check.
+	#
+
+	# Set up the Boot IPI for each secondary CPU
+	mov	0x1,a0
+loop_set_secondary_icr:
+	mov	a0,a1
+	asl	CROSS_ICR_CPU_SHIFT,a1
+	add	CROSS_GxICR(SMP_BOOT_IRQ,0),a1
+	movhu	(a1),d3
+	or	GxICR_ENABLE|GxICR_LEVEL_0,d3
+	movhu	d3,(a1)
+	movhu	(a1),d3				# flush
+	inc	a0
+	cmp	NR_CPUS,a0
+	bne	loop_set_secondary_icr
+#endif /* CONFIG_SMP */
+
 	# save commandline pointer
 	# save commandline pointer
 	mov	d0,a3
 	mov	d0,a3
 
 
 	# preload the PGD pointer register
 	# preload the PGD pointer register
 	mov	swapper_pg_dir,d0
 	mov	swapper_pg_dir,d0
 	mov	d0,(PTBR)
 	mov	d0,(PTBR)
+	clr	d0
+	movbu	d0,(PIDR)
 
 
 	# turn on the TLBs
 	# turn on the TLBs
 	mov	MMUCTR_IIV|MMUCTR_DIV,d0
 	mov	MMUCTR_IIV|MMUCTR_DIV,d0
 	mov	d0,(MMUCTR)
 	mov	d0,(MMUCTR)
+#ifdef CONFIG_AM34_2
+	mov	MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0
+#else
 	mov	MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0
 	mov	MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0
+#endif
 	mov	d0,(MMUCTR)
 	mov	d0,(MMUCTR)
 
 
 	# turn on AM33v2 exception handling mode and set the trap table base
 	# turn on AM33v2 exception handling mode and set the trap table base
@@ -51,6 +91,11 @@ _start:
 	mov	d0,(TBR)
 	mov	d0,(TBR)
 
 
 	# invalidate and enable both of the caches
 	# invalidate and enable both of the caches
+#ifdef CONFIG_SMP
+	mov	ECHCTR,a0
+	clr	d0
+	mov	d0,(a0)
+#endif
 	mov	CHCTR,a0
 	mov	CHCTR,a0
 	clr	d0
 	clr	d0
 	movhu	d0,(a0)					# turn off first
 	movhu	d0,(a0)					# turn off first
@@ -61,18 +106,18 @@ _start:
 	btst	CHCTR_ICBUSY|CHCTR_DCBUSY,d0		# wait till not busy
 	btst	CHCTR_ICBUSY|CHCTR_DCBUSY,d0		# wait till not busy
 	lne
 	lne
 
 
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
 #ifdef CONFIG_MN10300_CACHE_WBACK
 #ifdef CONFIG_MN10300_CACHE_WBACK
 #ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC
 #ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC
 	mov	CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
 	mov	CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
 #else
 #else
 	mov	CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0
 	mov	CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0
-#endif /* CACHE_DISABLED */
+#endif /* NOWRALLOC */
 #else
 #else
 	mov	CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
 	mov	CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
 #endif /* WBACK */
 #endif /* WBACK */
 	movhu	d0,(a0)					# enable
 	movhu	d0,(a0)					# enable
-#endif /* NOWRALLOC */
+#endif /* ENABLED */
 
 
 	# turn on RTS on the debug serial port if applicable
 	# turn on RTS on the debug serial port if applicable
 #ifdef CONFIG_MN10300_UNIT_ASB2305
 #ifdef CONFIG_MN10300_UNIT_ASB2305
@@ -206,6 +251,44 @@ __no_parameters:
 	call	processor_init[],0
 	call	processor_init[],0
 	call	unit_init[],0
 	call	unit_init[],0
 
 
+#ifdef CONFIG_SMP
+	# mark the primary CPU in cpu_boot_map
+	mov	cpu_boot_map,a0
+	mov	0x1,d0
+	mov	d0,(a0)
+
+	# signal each secondary CPU to begin booting
+	mov	0x1,d2				# CPU ID
+
+loop_request_boot_secondary:
+	mov	d2,a0
+	# send SMP_BOOT_IPI to secondary CPU
+	asl	CROSS_ICR_CPU_SHIFT,a0
+	add	CROSS_GxICR(SMP_BOOT_IRQ,0),a0
+	movhu	(a0),d0
+	or	GxICR_REQUEST|GxICR_DETECT,d0
+	movhu	d0,(a0)
+	movhu	(a0),d0				# flush
+
+	# wait up to 100ms for AP's IPI to be received
+	clr	d3
+wait_on_secondary_boot:
+	mov	DELAY_TIME_BOOT_IPI,d0
+	call	__delay[],0
+	inc	d3
+	mov	cpu_boot_map,a0
+	mov	(a0),d0
+	lsr	d2,d0
+	btst	0x1,d0
+	bne	1f
+	cmp	TIME_OUT_COUNT_BOOT_IPI,d3
+	bne	wait_on_secondary_boot
+1:
+	inc	d2
+	cmp	NR_CPUS,d2
+	bne	loop_request_boot_secondary
+#endif /* CONFIG_SMP */
+
 #ifdef CONFIG_GDBSTUB
 #ifdef CONFIG_GDBSTUB
 	call	gdbstub_init[],0
 	call	gdbstub_init[],0
 
 
@@ -217,7 +300,118 @@ __gdbstub_pause:
 #endif
 #endif
 
 
 	jmp	start_kernel
 	jmp	start_kernel
-	.size	_start, _start-.
+	.size	_start,.-_start
+
+###############################################################################
+#
+# Secondary CPU boot point
+#
+###############################################################################
+#ifdef CONFIG_SMP
+startup_secondary:
+	# preload the PGD pointer register
+	mov	swapper_pg_dir,d0
+	mov	d0,(PTBR)
+	clr	d0
+	movbu	d0,(PIDR)
+
+	# turn on the TLBs
+	mov	MMUCTR_IIV|MMUCTR_DIV,d0
+	mov	d0,(MMUCTR)
+#ifdef CONFIG_AM34_2
+	mov	MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0
+#else
+	mov	MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0
+#endif
+	mov	d0,(MMUCTR)
+
+	# turn on AM33v2 exception handling mode and set the trap table base
+	movhu	(CPUP),d0
+	or	CPUP_EXM_AM33V2,d0
+	movhu	d0,(CPUP)
+
+	# set the interrupt vector table
+	mov	CONFIG_INTERRUPT_VECTOR_BASE,d0
+	mov	d0,(TBR)
+
+	# invalidate and enable both of the caches
+	mov	ECHCTR,a0
+	clr	d0
+	mov	d0,(a0)
+	mov	CHCTR,a0
+	clr	d0
+	movhu	d0,(a0)					# turn off first
+	mov	CHCTR_ICINV|CHCTR_DCINV,d0
+	movhu	d0,(a0)
+	setlb
+	mov	(a0),d0
+	btst	CHCTR_ICBUSY|CHCTR_DCBUSY,d0		# wait till not busy (use CPU loop buffer)
+	lne
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+#ifdef  CONFIG_MN10300_CACHE_WBACK
+#ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC
+	mov	CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
+#else
+	mov	CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0
+#endif  /* !NOWRALLOC */
+#else
+	mov	CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
+#endif  /* WBACK */
+	movhu	d0,(a0)					# enable
+#endif  /* ENABLED */
+
+	# Clear the boot IPI interrupt for this CPU
+	movhu	(GxICR(SMP_BOOT_IRQ)),d0
+	and	~GxICR_REQUEST,d0
+	movhu	d0,(GxICR(SMP_BOOT_IRQ))
+	movhu	(GxICR(SMP_BOOT_IRQ)),d0		# flush
+
+	/* get stack */
+	mov	CONFIG_INTERRUPT_VECTOR_BASE + CONFIG_BOOT_STACK_OFFSET,a0
+	mov	(CPUID),d0
+	and	CPUID_MASK,d0
+	mulu	CONFIG_BOOT_STACK_SIZE,d0
+	sub	d0,a0
+	mov	a0,sp
+
+	# init interrupt for AP
+	call	smp_prepare_cpu_init[],0
+
+	# mark this secondary CPU in cpu_boot_map
+	mov	(CPUID),d0
+	mov	0x1,d1
+	asl	d0,d1
+	mov	cpu_boot_map,a0
+	bset	d1,(a0)
+
+	or	EPSW_IE|EPSW_IM_1,epsw  # permit level 0 interrupts
+	nop
+	nop
+#ifdef  CONFIG_MN10300_CACHE_WBACK
+	# flush the local cache if it's in writeback mode
+	call	mn10300_local_dcache_flush_inv[],0
+	setlb
+	mov	(CHCTR),d0
+	btst	CHCTR_DCBUSY,d0		# wait till not busy (use CPU loop buffer)
+	lne
+#endif
+
+	# now sleep waiting for further instructions
+secondary_sleep:
+	mov	CPUM_SLEEP,d0
+	movhu	d0,(CPUM)
+	nop
+	nop
+	bra	secondary_sleep
+	.size	startup_secondary,.-startup_secondary
+#endif /* CONFIG_SMP */
+
+###############################################################################
+#
+#
+#
+###############################################################################
 ENTRY(__head_end)
 ENTRY(__head_end)
 
 
 /*
 /*

+ 25 - 0
arch/mn10300/kernel/internal.h

@@ -9,6 +9,9 @@
  * 2 of the Licence, or (at your option) any later version.
  * 2 of the Licence, or (at your option) any later version.
  */
  */
 
 
+struct clocksource;
+struct clock_event_device;
+
 /*
 /*
  * kthread.S
  * kthread.S
  */
  */
@@ -18,3 +21,25 @@ extern int kernel_thread_helper(int);
  * entry.S
  * entry.S
  */
  */
 extern void ret_from_fork(struct task_struct *) __attribute__((noreturn));
 extern void ret_from_fork(struct task_struct *) __attribute__((noreturn));
+
+/*
+ * smp-low.S
+ */
+#ifdef CONFIG_SMP
+extern void mn10300_low_ipi_handler(void);
+#endif
+
+/*
+ * time.c
+ */
+extern irqreturn_t local_timer_interrupt(void);
+
+/*
+ * time.c
+ */
+#ifdef CONFIG_CEVT_MN10300
+extern void clockevent_set_clock(struct clock_event_device *, unsigned int);
+#endif
+#ifdef CONFIG_CSRC_MN10300
+extern void clocksource_set_clock(struct clocksource *, unsigned int);
+#endif

+ 246 - 30
arch/mn10300/kernel/irq.c

@@ -12,11 +12,26 @@
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
 #include <linux/kernel_stat.h>
 #include <linux/seq_file.h>
 #include <linux/seq_file.h>
+#include <linux/cpumask.h>
 #include <asm/setup.h>
 #include <asm/setup.h>
+#include <asm/serial-regs.h>
 
 
-unsigned long __mn10300_irq_enabled_epsw = EPSW_IE | EPSW_IM_7;
+unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = {
+	[0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7
+};
 EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
 EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
 
 
+#ifdef CONFIG_SMP
+static char irq_affinity_online[NR_IRQS] = {
+	[0 ... NR_IRQS - 1] = 0
+};
+
+#define NR_IRQ_WORDS	((NR_IRQS + 31) / 32)
+static unsigned long irq_affinity_request[NR_IRQ_WORDS] = {
+	[0 ... NR_IRQ_WORDS - 1] = 0
+};
+#endif  /* CONFIG_SMP */
+
 atomic_t irq_err_count;
 atomic_t irq_err_count;
 
 
 /*
 /*
@@ -24,30 +39,67 @@ atomic_t irq_err_count;
  */
  */
 static void mn10300_cpupic_ack(unsigned int irq)
 static void mn10300_cpupic_ack(unsigned int irq)
 {
 {
+	unsigned long flags;
 	u16 tmp;
 	u16 tmp;
-	*(volatile u8 *) &GxICR(irq) = GxICR_DETECT;
+
+	flags = arch_local_cli_save();
+	GxICR_u8(irq) = GxICR_DETECT;
 	tmp = GxICR(irq);
 	tmp = GxICR(irq);
+	arch_local_irq_restore(flags);
 }
 }
 
 
-static void mn10300_cpupic_mask(unsigned int irq)
+static void __mask_and_set_icr(unsigned int irq,
+			       unsigned int mask, unsigned int set)
 {
 {
-	u16 tmp = GxICR(irq);
-	GxICR(irq) = (tmp & GxICR_LEVEL);
+	unsigned long flags;
+	u16 tmp;
+
+	flags = arch_local_cli_save();
+	tmp = GxICR(irq);
+	GxICR(irq) = (tmp & mask) | set;
 	tmp = GxICR(irq);
 	tmp = GxICR(irq);
+	arch_local_irq_restore(flags);
+}
+
+static void mn10300_cpupic_mask(unsigned int irq)
+{
+	__mask_and_set_icr(irq, GxICR_LEVEL, 0);
 }
 }
 
 
 static void mn10300_cpupic_mask_ack(unsigned int irq)
 static void mn10300_cpupic_mask_ack(unsigned int irq)
 {
 {
-	u16 tmp = GxICR(irq);
-	GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
-	tmp = GxICR(irq);
+#ifdef CONFIG_SMP
+	unsigned long flags;
+	u16 tmp;
+
+	flags = arch_local_cli_save();
+
+	if (!test_and_clear_bit(irq, irq_affinity_request)) {
+		tmp = GxICR(irq);
+		GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
+		tmp = GxICR(irq);
+	} else {
+		u16 tmp2;
+		tmp = GxICR(irq);
+		GxICR(irq) = (tmp & GxICR_LEVEL);
+		tmp2 = GxICR(irq);
+
+		irq_affinity_online[irq] =
+			any_online_cpu(*irq_desc[irq].affinity);
+		CROSS_GxICR(irq, irq_affinity_online[irq]) =
+			(tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
+		tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
+	}
+
+	arch_local_irq_restore(flags);
+#else  /* CONFIG_SMP */
+	__mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT);
+#endif /* CONFIG_SMP */
 }
 }
 
 
 static void mn10300_cpupic_unmask(unsigned int irq)
 static void mn10300_cpupic_unmask(unsigned int irq)
 {
 {
-	u16 tmp = GxICR(irq);
-	GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
-	tmp = GxICR(irq);
+	__mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE);
 }
 }
 
 
 static void mn10300_cpupic_unmask_clear(unsigned int irq)
 static void mn10300_cpupic_unmask_clear(unsigned int irq)
@@ -56,11 +108,89 @@ static void mn10300_cpupic_unmask_clear(unsigned int irq)
 	 * device has ceased to assert its interrupt line and the interrupt
 	 * device has ceased to assert its interrupt line and the interrupt
 	 * channel has been disabled in the PIC, so for level-triggered
 	 * channel has been disabled in the PIC, so for level-triggered
 	 * interrupts we need to clear the request bit when we re-enable */
 	 * interrupts we need to clear the request bit when we re-enable */
-	u16 tmp = GxICR(irq);
-	GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
-	tmp = GxICR(irq);
+#ifdef CONFIG_SMP
+	unsigned long flags;
+	u16 tmp;
+
+	flags = arch_local_cli_save();
+
+	if (!test_and_clear_bit(irq, irq_affinity_request)) {
+		tmp = GxICR(irq);
+		GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
+		tmp = GxICR(irq);
+	} else {
+		tmp = GxICR(irq);
+
+		irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity);
+		CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
+		tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
+	}
+
+	arch_local_irq_restore(flags);
+#else  /* CONFIG_SMP */
+	__mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT);
+#endif /* CONFIG_SMP */
 }
 }
 
 
+#ifdef CONFIG_SMP
+static int
+mn10300_cpupic_setaffinity(unsigned int irq, const struct cpumask *mask)
+{
+	unsigned long flags;
+	int err;
+
+	flags = arch_local_cli_save();
+
+	/* check irq no */
+	switch (irq) {
+	case TMJCIRQ:
+	case RESCHEDULE_IPI:
+	case CALL_FUNC_SINGLE_IPI:
+	case LOCAL_TIMER_IPI:
+	case FLUSH_CACHE_IPI:
+	case CALL_FUNCTION_NMI_IPI:
+	case GDB_NMI_IPI:
+#ifdef CONFIG_MN10300_TTYSM0
+	case SC0RXIRQ:
+	case SC0TXIRQ:
+#ifdef CONFIG_MN10300_TTYSM0_TIMER8
+	case TM8IRQ:
+#elif CONFIG_MN10300_TTYSM0_TIMER2
+	case TM2IRQ:
+#endif /* CONFIG_MN10300_TTYSM0_TIMER8 */
+#endif /* CONFIG_MN10300_TTYSM0 */
+
+#ifdef CONFIG_MN10300_TTYSM1
+	case SC1RXIRQ:
+	case SC1TXIRQ:
+#ifdef CONFIG_MN10300_TTYSM1_TIMER12
+	case TM12IRQ:
+#elif CONFIG_MN10300_TTYSM1_TIMER9
+	case TM9IRQ:
+#elif CONFIG_MN10300_TTYSM1_TIMER3
+	case TM3IRQ:
+#endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
+#endif /* CONFIG_MN10300_TTYSM1 */
+
+#ifdef CONFIG_MN10300_TTYSM2
+	case SC2RXIRQ:
+	case SC2TXIRQ:
+	case TM10IRQ:
+#endif /* CONFIG_MN10300_TTYSM2 */
+		err = -1;
+		break;
+
+	default:
+		set_bit(irq, irq_affinity_request);
+		err = 0;
+		break;
+	}
+
+	arch_local_irq_restore(flags);
+	return err;
+}
+#endif /* CONFIG_SMP */
+
 /*
 /*
  * MN10300 PIC level-triggered IRQ handling.
  * MN10300 PIC level-triggered IRQ handling.
  *
  *
@@ -79,6 +209,9 @@ static struct irq_chip mn10300_cpu_pic_level = {
 	.mask		= mn10300_cpupic_mask,
 	.mask		= mn10300_cpupic_mask,
 	.mask_ack	= mn10300_cpupic_mask,
 	.mask_ack	= mn10300_cpupic_mask,
 	.unmask		= mn10300_cpupic_unmask_clear,
 	.unmask		= mn10300_cpupic_unmask_clear,
+#ifdef CONFIG_SMP
+	.set_affinity	= mn10300_cpupic_setaffinity,
+#endif
 };
 };
 
 
 /*
 /*
@@ -94,6 +227,9 @@ static struct irq_chip mn10300_cpu_pic_edge = {
 	.mask		= mn10300_cpupic_mask,
 	.mask		= mn10300_cpupic_mask,
 	.mask_ack	= mn10300_cpupic_mask_ack,
 	.mask_ack	= mn10300_cpupic_mask_ack,
 	.unmask		= mn10300_cpupic_unmask,
 	.unmask		= mn10300_cpupic_unmask,
+#ifdef CONFIG_SMP
+	.set_affinity	= mn10300_cpupic_setaffinity,
+#endif
 };
 };
 
 
 /*
 /*
@@ -111,14 +247,34 @@ void ack_bad_irq(int irq)
  */
  */
 void set_intr_level(int irq, u16 level)
 void set_intr_level(int irq, u16 level)
 {
 {
-	u16 tmp;
+	BUG_ON(in_interrupt());
 
 
-	if (in_interrupt())
-		BUG();
+	__mask_and_set_icr(irq, GxICR_ENABLE, level);
+}
 
 
-	tmp = GxICR(irq);
-	GxICR(irq) = (tmp & GxICR_ENABLE) | level;
-	tmp = GxICR(irq);
+void mn10300_intc_set_level(unsigned int irq, unsigned int level)
+{
+	set_intr_level(irq, NUM2GxICR_LEVEL(level) & GxICR_LEVEL);
+}
+
+void mn10300_intc_clear(unsigned int irq)
+{
+	__mask_and_set_icr(irq, GxICR_LEVEL | GxICR_ENABLE, GxICR_DETECT);
+}
+
+void mn10300_intc_set(unsigned int irq)
+{
+	__mask_and_set_icr(irq, 0, GxICR_REQUEST | GxICR_DETECT);
+}
+
+void mn10300_intc_enable(unsigned int irq)
+{
+	mn10300_cpupic_unmask(irq);
+}
+
+void mn10300_intc_disable(unsigned int irq)
+{
+	mn10300_cpupic_mask(irq);
 }
 }
 
 
 /*
 /*
@@ -126,7 +282,7 @@ void set_intr_level(int irq, u16 level)
  * than before
  * than before
  * - see Documentation/mn10300/features.txt
  * - see Documentation/mn10300/features.txt
  */
  */
-void set_intr_postackable(int irq)
+void mn10300_set_lateack_irq_type(int irq)
 {
 {
 	set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level,
 	set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level,
 				 handle_level_irq);
 				 handle_level_irq);
@@ -147,6 +303,7 @@ void __init init_IRQ(void)
 			 * interrupts */
 			 * interrupts */
 			set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge,
 			set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge,
 						 handle_level_irq);
 						 handle_level_irq);
+
 	unit_init_IRQ();
 	unit_init_IRQ();
 }
 }
 
 
@@ -156,20 +313,22 @@ void __init init_IRQ(void)
 asmlinkage void do_IRQ(void)
 asmlinkage void do_IRQ(void)
 {
 {
 	unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw;
 	unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw;
+	unsigned int cpu_id = smp_processor_id();
 	int irq;
 	int irq;
 
 
 	sp = current_stack_pointer();
 	sp = current_stack_pointer();
-	if (sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN)
-		BUG();
+	BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN);
 
 
 	/* make sure local_irq_enable() doesn't muck up the interrupt priority
 	/* make sure local_irq_enable() doesn't muck up the interrupt priority
 	 * setting in EPSW */
 	 * setting in EPSW */
-	old_irq_enabled_epsw = __mn10300_irq_enabled_epsw;
+	old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id];
 	local_save_flags(epsw);
 	local_save_flags(epsw);
-	__mn10300_irq_enabled_epsw = EPSW_IE | (EPSW_IM & epsw);
+	__mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw);
 	irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL;
 	irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL;
 
 
-	__IRQ_STAT(smp_processor_id(), __irq_count)++;
+#ifdef CONFIG_MN10300_WD_TIMER
+	__IRQ_STAT(cpu_id, __irq_count)++;
+#endif
 
 
 	irq_enter();
 	irq_enter();
 
 
@@ -189,7 +348,7 @@ asmlinkage void do_IRQ(void)
 		local_irq_restore(epsw);
 		local_irq_restore(epsw);
 	}
 	}
 
 
-	__mn10300_irq_enabled_epsw = old_irq_enabled_epsw;
+	__mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw;
 
 
 	irq_exit();
 	irq_exit();
 }
 }
@@ -222,9 +381,16 @@ int show_interrupts(struct seq_file *p, void *v)
 			seq_printf(p, "%3d: ", i);
 			seq_printf(p, "%3d: ", i);
 			for_each_present_cpu(cpu)
 			for_each_present_cpu(cpu)
 				seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
 				seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
-			seq_printf(p, " %14s.%u", irq_desc[i].chip->name,
-				   (GxICR(i) & GxICR_LEVEL) >>
-				   GxICR_LEVEL_SHIFT);
+
+			if (i < NR_CPU_IRQS)
+				seq_printf(p, " %14s.%u",
+					   irq_desc[i].chip->name,
+					   (GxICR(i) & GxICR_LEVEL) >>
+					   GxICR_LEVEL_SHIFT);
+			else
+				seq_printf(p, " %14s",
+					   irq_desc[i].chip->name);
+
 			seq_printf(p, "  %s", action->name);
 			seq_printf(p, "  %s", action->name);
 
 
 			for (action = action->next;
 			for (action = action->next;
@@ -240,11 +406,13 @@ int show_interrupts(struct seq_file *p, void *v)
 
 
 		/* polish off with NMI and error counters */
 		/* polish off with NMI and error counters */
 	case NR_IRQS:
 	case NR_IRQS:
+#ifdef CONFIG_MN10300_WD_TIMER
 		seq_printf(p, "NMI: ");
 		seq_printf(p, "NMI: ");
 		for (j = 0; j < NR_CPUS; j++)
 		for (j = 0; j < NR_CPUS; j++)
 			if (cpu_online(j))
 			if (cpu_online(j))
 				seq_printf(p, "%10u ", nmi_count(j));
 				seq_printf(p, "%10u ", nmi_count(j));
 		seq_putc(p, '\n');
 		seq_putc(p, '\n');
+#endif
 
 
 		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
 		seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
 		break;
 		break;
@@ -252,3 +420,51 @@ int show_interrupts(struct seq_file *p, void *v)
 
 
 	return 0;
 	return 0;
 }
 }
+
+#ifdef CONFIG_HOTPLUG_CPU
+void migrate_irqs(void)
+{
+	irq_desc_t *desc;
+	int irq;
+	unsigned int self, new;
+	unsigned long flags;
+
+	self = smp_processor_id();
+	for (irq = 0; irq < NR_IRQS; irq++) {
+		desc = irq_desc + irq;
+
+		if (desc->status == IRQ_PER_CPU)
+			continue;
+
+		if (cpu_isset(self, irq_desc[irq].affinity) &&
+		    !cpus_intersects(irq_affinity[irq], cpu_online_map)) {
+			int cpu_id;
+			cpu_id = first_cpu(cpu_online_map);
+			cpu_set(cpu_id, irq_desc[irq].affinity);
+		}
+		/* We need to operate irq_affinity_online atomically. */
+		arch_local_cli_save(flags);
+		if (irq_affinity_online[irq] == self) {
+			u16 x, tmp;
+
+			x = GxICR(irq);
+			GxICR(irq) = x & GxICR_LEVEL;
+			tmp = GxICR(irq);
+
+			new = any_online_cpu(irq_desc[irq].affinity);
+			irq_affinity_online[irq] = new;
+
+			CROSS_GxICR(irq, new) =
+				(x & GxICR_LEVEL) | GxICR_DETECT;
+			tmp = CROSS_GxICR(irq, new);
+
+			x &= GxICR_LEVEL | GxICR_ENABLE;
+			if (GxICR(irq) & GxICR_REQUEST) {
+				x |= GxICR_REQUEST | GxICR_DETECT;
+			CROSS_GxICR(irq, new) = x;
+			tmp = CROSS_GxICR(irq, new);
+		}
+		arch_local_irq_restore(flags);
+	}
+}
+#endif /* CONFIG_HOTPLUG_CPU */

+ 4 - 0
arch/mn10300/kernel/kprobes.c

@@ -377,8 +377,10 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
 
 
 void __kprobes arch_disarm_kprobe(struct kprobe *p)
 void __kprobes arch_disarm_kprobe(struct kprobe *p)
 {
 {
+#ifndef CONFIG_MN10300_CACHE_SNOOP
 	mn10300_dcache_flush();
 	mn10300_dcache_flush();
 	mn10300_icache_inv();
 	mn10300_icache_inv();
+#endif
 }
 }
 
 
 void arch_remove_kprobe(struct kprobe *p)
 void arch_remove_kprobe(struct kprobe *p)
@@ -390,8 +392,10 @@ void __kprobes disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
 {
 {
 	*p->addr = p->opcode;
 	*p->addr = p->opcode;
 	regs->pc = (unsigned long) p->addr;
 	regs->pc = (unsigned long) p->addr;
+#ifndef CONFIG_MN10300_CACHE_SNOOP
 	mn10300_dcache_flush();
 	mn10300_dcache_flush();
 	mn10300_icache_inv();
 	mn10300_icache_inv();
+#endif
 }
 }
 
 
 static inline
 static inline

+ 3 - 3
arch/mn10300/kernel/mn10300-serial-low.S

@@ -39,7 +39,7 @@
 ###############################################################################
 ###############################################################################
 	.balign	L1_CACHE_BYTES
 	.balign	L1_CACHE_BYTES
 ENTRY(mn10300_serial_vdma_interrupt)
 ENTRY(mn10300_serial_vdma_interrupt)
-	or	EPSW_IE,psw			# permit overriding by
+#	or	EPSW_IE,psw			# permit overriding by
 						# debugging interrupts
 						# debugging interrupts
 	movm	[d2,d3,a2,a3,exreg0],(sp)
 	movm	[d2,d3,a2,a3,exreg0],(sp)
 
 
@@ -164,7 +164,7 @@ mnsc_vdma_tx_noint:
 	rti
 	rti
 
 
 mnsc_vdma_tx_empty:
 mnsc_vdma_tx_empty:
-	mov	+(GxICR_LEVEL_1|GxICR_DETECT),d2
+	mov	+(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2
 	movhu	d2,(e3)			# disable the interrupt
 	movhu	d2,(e3)			# disable the interrupt
 	movhu	(e3),d2			# flush
 	movhu	(e3),d2			# flush
 
 
@@ -175,7 +175,7 @@ mnsc_vdma_tx_break:
 	movhu	(SCxCTR,e2),d2		# turn on break mode
 	movhu	(SCxCTR,e2),d2		# turn on break mode
 	or	SC01CTR_BKE,d2
 	or	SC01CTR_BKE,d2
 	movhu	d2,(SCxCTR,e2)
 	movhu	d2,(SCxCTR,e2)
-	mov	+(GxICR_LEVEL_1|GxICR_DETECT),d2
+	mov	+(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2
 	movhu	d2,(e3)			# disable transmit interrupts on this
 	movhu	d2,(e3)			# disable transmit interrupts on this
 					# channel
 					# channel
 	movhu	(e3),d2			# flush
 	movhu	(e3),d2			# flush

+ 181 - 29
arch/mn10300/kernel/mn10300-serial.c

@@ -44,6 +44,11 @@ static const char serial_revdate[] = "2007-11-06";
 #include <unit/timex.h>
 #include <unit/timex.h>
 #include "mn10300-serial.h"
 #include "mn10300-serial.h"
 
 
+#ifdef CONFIG_SMP
+#undef  GxICR
+#define GxICR(X) CROSS_GxICR(X, 0)
+#endif /* CONFIG_SMP */
+
 #define kenter(FMT, ...) \
 #define kenter(FMT, ...) \
 	printk(KERN_DEBUG "-->%s(" FMT ")\n", __func__, ##__VA_ARGS__)
 	printk(KERN_DEBUG "-->%s(" FMT ")\n", __func__, ##__VA_ARGS__)
 #define _enter(FMT, ...) \
 #define _enter(FMT, ...) \
@@ -57,6 +62,11 @@ static const char serial_revdate[] = "2007-11-06";
 #define _proto(FMT, ...) \
 #define _proto(FMT, ...) \
 	no_printk(KERN_DEBUG "### MNSERIAL " FMT " ###\n", ##__VA_ARGS__)
 	no_printk(KERN_DEBUG "### MNSERIAL " FMT " ###\n", ##__VA_ARGS__)
 
 
+#ifndef CODMSB
+/* c_cflag bit meaning */
+#define CODMSB	004000000000	/* change Transfer bit-order */
+#endif
+
 #define NR_UARTS 3
 #define NR_UARTS 3
 
 
 #ifdef CONFIG_MN10300_TTYSM_CONSOLE
 #ifdef CONFIG_MN10300_TTYSM_CONSOLE
@@ -152,26 +162,35 @@ struct mn10300_serial_port mn10300_serial_port_sif0 = {
 	.name		= "ttySM0",
 	.name		= "ttySM0",
 	._iobase	= &SC0CTR,
 	._iobase	= &SC0CTR,
 	._control	= &SC0CTR,
 	._control	= &SC0CTR,
-	._status	= (volatile u8 *) &SC0STR,
+	._status	= (volatile u8 *)&SC0STR,
 	._intr		= &SC0ICR,
 	._intr		= &SC0ICR,
 	._rxb		= &SC0RXB,
 	._rxb		= &SC0RXB,
 	._txb		= &SC0TXB,
 	._txb		= &SC0TXB,
 	.rx_name	= "ttySM0:Rx",
 	.rx_name	= "ttySM0:Rx",
 	.tx_name	= "ttySM0:Tx",
 	.tx_name	= "ttySM0:Tx",
-#ifdef CONFIG_MN10300_TTYSM0_TIMER8
+#if defined(CONFIG_MN10300_TTYSM0_TIMER8)
 	.tm_name	= "ttySM0:Timer8",
 	.tm_name	= "ttySM0:Timer8",
 	._tmxmd		= &TM8MD,
 	._tmxmd		= &TM8MD,
 	._tmxbr		= &TM8BR,
 	._tmxbr		= &TM8BR,
 	._tmicr		= &TM8ICR,
 	._tmicr		= &TM8ICR,
 	.tm_irq		= TM8IRQ,
 	.tm_irq		= TM8IRQ,
 	.div_timer	= MNSCx_DIV_TIMER_16BIT,
 	.div_timer	= MNSCx_DIV_TIMER_16BIT,
-#else /* CONFIG_MN10300_TTYSM0_TIMER2 */
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER0)
+	.tm_name	= "ttySM0:Timer0",
+	._tmxmd		= &TM0MD,
+	._tmxbr		= (volatile u16 *)&TM0BR,
+	._tmicr		= &TM0ICR,
+	.tm_irq		= TM0IRQ,
+	.div_timer	= MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER2)
 	.tm_name	= "ttySM0:Timer2",
 	.tm_name	= "ttySM0:Timer2",
 	._tmxmd		= &TM2MD,
 	._tmxmd		= &TM2MD,
-	._tmxbr		= (volatile u16 *) &TM2BR,
+	._tmxbr		= (volatile u16 *)&TM2BR,
 	._tmicr		= &TM2ICR,
 	._tmicr		= &TM2ICR,
 	.tm_irq		= TM2IRQ,
 	.tm_irq		= TM2IRQ,
 	.div_timer	= MNSCx_DIV_TIMER_8BIT,
 	.div_timer	= MNSCx_DIV_TIMER_8BIT,
+#else
+#error "Unknown config for ttySM0"
 #endif
 #endif
 	.rx_irq		= SC0RXIRQ,
 	.rx_irq		= SC0RXIRQ,
 	.tx_irq		= SC0TXIRQ,
 	.tx_irq		= SC0TXIRQ,
@@ -205,26 +224,35 @@ struct mn10300_serial_port mn10300_serial_port_sif1 = {
 	.name		= "ttySM1",
 	.name		= "ttySM1",
 	._iobase	= &SC1CTR,
 	._iobase	= &SC1CTR,
 	._control	= &SC1CTR,
 	._control	= &SC1CTR,
-	._status	= (volatile u8 *) &SC1STR,
+	._status	= (volatile u8 *)&SC1STR,
 	._intr		= &SC1ICR,
 	._intr		= &SC1ICR,
 	._rxb		= &SC1RXB,
 	._rxb		= &SC1RXB,
 	._txb		= &SC1TXB,
 	._txb		= &SC1TXB,
 	.rx_name	= "ttySM1:Rx",
 	.rx_name	= "ttySM1:Rx",
 	.tx_name	= "ttySM1:Tx",
 	.tx_name	= "ttySM1:Tx",
-#ifdef CONFIG_MN10300_TTYSM1_TIMER9
+#if defined(CONFIG_MN10300_TTYSM1_TIMER9)
 	.tm_name	= "ttySM1:Timer9",
 	.tm_name	= "ttySM1:Timer9",
 	._tmxmd		= &TM9MD,
 	._tmxmd		= &TM9MD,
 	._tmxbr		= &TM9BR,
 	._tmxbr		= &TM9BR,
 	._tmicr		= &TM9ICR,
 	._tmicr		= &TM9ICR,
 	.tm_irq		= TM9IRQ,
 	.tm_irq		= TM9IRQ,
 	.div_timer	= MNSCx_DIV_TIMER_16BIT,
 	.div_timer	= MNSCx_DIV_TIMER_16BIT,
-#else /* CONFIG_MN10300_TTYSM1_TIMER3 */
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
 	.tm_name	= "ttySM1:Timer3",
 	.tm_name	= "ttySM1:Timer3",
 	._tmxmd		= &TM3MD,
 	._tmxmd		= &TM3MD,
-	._tmxbr		= (volatile u16 *) &TM3BR,
+	._tmxbr		= (volatile u16 *)&TM3BR,
 	._tmicr		= &TM3ICR,
 	._tmicr		= &TM3ICR,
 	.tm_irq		= TM3IRQ,
 	.tm_irq		= TM3IRQ,
 	.div_timer	= MNSCx_DIV_TIMER_8BIT,
 	.div_timer	= MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER12)
+	.tm_name	= "ttySM1/Timer12",
+	._tmxmd		= &TM12MD,
+	._tmxbr		= &TM12BR,
+	._tmicr		= &TM12ICR,
+	.tm_irq		= TM12IRQ,
+	.div_timer	= MNSCx_DIV_TIMER_16BIT,
+#else
+#error "Unknown config for ttySM1"
 #endif
 #endif
 	.rx_irq		= SC1RXIRQ,
 	.rx_irq		= SC1RXIRQ,
 	.tx_irq		= SC1TXIRQ,
 	.tx_irq		= SC1TXIRQ,
@@ -260,20 +288,45 @@ struct mn10300_serial_port mn10300_serial_port_sif2 = {
 	.uart.lock	=
 	.uart.lock	=
 	__SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock),
 	__SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock),
 	.name		= "ttySM2",
 	.name		= "ttySM2",
-	.rx_name	= "ttySM2:Rx",
-	.tx_name	= "ttySM2:Tx",
-	.tm_name	= "ttySM2:Timer10",
 	._iobase	= &SC2CTR,
 	._iobase	= &SC2CTR,
 	._control	= &SC2CTR,
 	._control	= &SC2CTR,
-	._status	= &SC2STR,
+	._status	= (volatile u8 *)&SC2STR,
 	._intr		= &SC2ICR,
 	._intr		= &SC2ICR,
 	._rxb		= &SC2RXB,
 	._rxb		= &SC2RXB,
 	._txb		= &SC2TXB,
 	._txb		= &SC2TXB,
+	.rx_name	= "ttySM2:Rx",
+	.tx_name	= "ttySM2:Tx",
+#if defined(CONFIG_MN10300_TTYSM2_TIMER10)
+	.tm_name	= "ttySM2/Timer10",
 	._tmxmd		= &TM10MD,
 	._tmxmd		= &TM10MD,
 	._tmxbr		= &TM10BR,
 	._tmxbr		= &TM10BR,
 	._tmicr		= &TM10ICR,
 	._tmicr		= &TM10ICR,
 	.tm_irq		= TM10IRQ,
 	.tm_irq		= TM10IRQ,
 	.div_timer	= MNSCx_DIV_TIMER_16BIT,
 	.div_timer	= MNSCx_DIV_TIMER_16BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER9)
+	.tm_name	= "ttySM2/Timer9",
+	._tmxmd		= &TM9MD,
+	._tmxbr		= &TM9BR,
+	._tmicr		= &TM9ICR,
+	.tm_irq		= TM9IRQ,
+	.div_timer	= MNSCx_DIV_TIMER_16BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER1)
+	.tm_name	= "ttySM2/Timer1",
+	._tmxmd		= &TM1MD,
+	._tmxbr		= (volatile u16 *)&TM1BR,
+	._tmicr		= &TM1ICR,
+	.tm_irq		= TM1IRQ,
+	.div_timer	= MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER3)
+	.tm_name	= "ttySM2/Timer3",
+	._tmxmd		= &TM3MD,
+	._tmxbr		= (volatile u16 *)&TM3BR,
+	._tmicr		= &TM3ICR,
+	.tm_irq		= TM3IRQ,
+	.div_timer	= MNSCx_DIV_TIMER_8BIT,
+#else
+#error "Unknown config for ttySM2"
+#endif
 	.rx_irq		= SC2RXIRQ,
 	.rx_irq		= SC2RXIRQ,
 	.tx_irq		= SC2TXIRQ,
 	.tx_irq		= SC2TXIRQ,
 	.rx_icr		= &GxICR(SC2RXIRQ),
 	.rx_icr		= &GxICR(SC2RXIRQ),
@@ -322,9 +375,13 @@ struct mn10300_serial_port *mn10300_serial_ports[NR_UARTS + 1] = {
  */
  */
 static void mn10300_serial_mask_ack(unsigned int irq)
 static void mn10300_serial_mask_ack(unsigned int irq)
 {
 {
+	unsigned long flags;
 	u16 tmp;
 	u16 tmp;
+
+	flags = arch_local_cli_save();
 	GxICR(irq) = GxICR_LEVEL_6;
 	GxICR(irq) = GxICR_LEVEL_6;
 	tmp = GxICR(irq); /* flush write buffer */
 	tmp = GxICR(irq); /* flush write buffer */
+	arch_local_irq_restore(flags);
 }
 }
 
 
 static void mn10300_serial_nop(unsigned int irq)
 static void mn10300_serial_nop(unsigned int irq)
@@ -348,23 +405,36 @@ struct mn10300_serial_int mn10300_serial_int_tbl[NR_IRQS];
 
 
 static void mn10300_serial_dis_tx_intr(struct mn10300_serial_port *port)
 static void mn10300_serial_dis_tx_intr(struct mn10300_serial_port *port)
 {
 {
+	unsigned long flags;
 	u16 x;
 	u16 x;
-	*port->tx_icr = GxICR_LEVEL_1 | GxICR_DETECT;
+
+	flags = arch_local_cli_save();
+	*port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
 	x = *port->tx_icr;
 	x = *port->tx_icr;
+	arch_local_irq_restore(flags);
 }
 }
 
 
 static void mn10300_serial_en_tx_intr(struct mn10300_serial_port *port)
 static void mn10300_serial_en_tx_intr(struct mn10300_serial_port *port)
 {
 {
+	unsigned long flags;
 	u16 x;
 	u16 x;
-	*port->tx_icr = GxICR_LEVEL_1 | GxICR_ENABLE;
+
+	flags = arch_local_cli_save();
+	*port->tx_icr =
+		NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL) | GxICR_ENABLE;
 	x = *port->tx_icr;
 	x = *port->tx_icr;
+	arch_local_irq_restore(flags);
 }
 }
 
 
 static void mn10300_serial_dis_rx_intr(struct mn10300_serial_port *port)
 static void mn10300_serial_dis_rx_intr(struct mn10300_serial_port *port)
 {
 {
+	unsigned long flags;
 	u16 x;
 	u16 x;
-	*port->rx_icr = GxICR_LEVEL_1 | GxICR_DETECT;
+
+	flags = arch_local_cli_save();
+	*port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
 	x = *port->rx_icr;
 	x = *port->rx_icr;
+	arch_local_irq_restore(flags);
 }
 }
 
 
 /*
 /*
@@ -650,7 +720,7 @@ static unsigned int mn10300_serial_tx_empty(struct uart_port *_port)
 static void mn10300_serial_set_mctrl(struct uart_port *_port,
 static void mn10300_serial_set_mctrl(struct uart_port *_port,
 				     unsigned int mctrl)
 				     unsigned int mctrl)
 {
 {
-	struct mn10300_serial_port *port =
+	struct mn10300_serial_port *port __attribute__ ((unused)) =
 		container_of(_port, struct mn10300_serial_port, uart);
 		container_of(_port, struct mn10300_serial_port, uart);
 
 
 	_enter("%s,%x", port->name, mctrl);
 	_enter("%s,%x", port->name, mctrl);
@@ -706,6 +776,7 @@ static void mn10300_serial_start_tx(struct uart_port *_port)
 			UART_XMIT_SIZE));
 			UART_XMIT_SIZE));
 
 
 	/* kick the virtual DMA controller */
 	/* kick the virtual DMA controller */
+	arch_local_cli();
 	x = *port->tx_icr;
 	x = *port->tx_icr;
 	x |= GxICR_ENABLE;
 	x |= GxICR_ENABLE;
 
 
@@ -716,10 +787,14 @@ static void mn10300_serial_start_tx(struct uart_port *_port)
 
 
 	_debug("CTR=%04hx ICR=%02hx STR=%04x TMD=%02hx TBR=%04hx ICR=%04hx",
 	_debug("CTR=%04hx ICR=%02hx STR=%04x TMD=%02hx TBR=%04hx ICR=%04hx",
 	       *port->_control, *port->_intr, *port->_status,
 	       *port->_control, *port->_intr, *port->_status,
-	       *port->_tmxmd, *port->_tmxbr, *port->tx_icr);
+	       *port->_tmxmd,
+	       (port->div_timer == MNSCx_DIV_TIMER_8BIT) ?
+	           *(volatile u8 *)port->_tmxbr : *port->_tmxbr,
+	       *port->tx_icr);
 
 
 	*port->tx_icr = x;
 	*port->tx_icr = x;
 	x = *port->tx_icr;
 	x = *port->tx_icr;
+	arch_local_sti();
 }
 }
 
 
 /*
 /*
@@ -842,8 +917,10 @@ static int mn10300_serial_startup(struct uart_port *_port)
 	pint->port = port;
 	pint->port = port;
 	pint->vdma = mn10300_serial_vdma_tx_handler;
 	pint->vdma = mn10300_serial_vdma_tx_handler;
 
 
-	set_intr_level(port->rx_irq, GxICR_LEVEL_1);
-	set_intr_level(port->tx_irq, GxICR_LEVEL_1);
+	set_intr_level(port->rx_irq,
+		NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL));
+	set_intr_level(port->tx_irq,
+		NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL));
 	set_irq_chip(port->tm_irq, &mn10300_serial_pic);
 	set_irq_chip(port->tm_irq, &mn10300_serial_pic);
 
 
 	if (request_irq(port->rx_irq, mn10300_serial_interrupt,
 	if (request_irq(port->rx_irq, mn10300_serial_interrupt,
@@ -876,6 +953,7 @@ error:
  */
  */
 static void mn10300_serial_shutdown(struct uart_port *_port)
 static void mn10300_serial_shutdown(struct uart_port *_port)
 {
 {
+	u16 x;
 	struct mn10300_serial_port *port =
 	struct mn10300_serial_port *port =
 		container_of(_port, struct mn10300_serial_port, uart);
 		container_of(_port, struct mn10300_serial_port, uart);
 
 
@@ -897,8 +975,12 @@ static void mn10300_serial_shutdown(struct uart_port *_port)
 	free_irq(port->rx_irq, port);
 	free_irq(port->rx_irq, port);
 	free_irq(port->tx_irq, port);
 	free_irq(port->tx_irq, port);
 
 
-	*port->rx_icr = GxICR_LEVEL_1;
-	*port->tx_icr = GxICR_LEVEL_1;
+	arch_local_cli();
+	*port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
+	x = *port->rx_icr;
+	*port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
+	x = *port->tx_icr;
+	arch_local_sti();
 }
 }
 
 
 /*
 /*
@@ -947,11 +1029,66 @@ static void mn10300_serial_change_speed(struct mn10300_serial_port *port,
 	/* Determine divisor based on baud rate */
 	/* Determine divisor based on baud rate */
 	battempt = 0;
 	battempt = 0;
 
 
-	if (div_timer == MNSCx_DIV_TIMER_16BIT)
-		scxctr |= SC0CTR_CK_TM8UFLOW_8; /* ( == SC1CTR_CK_TM9UFLOW_8
-						 *   == SC2CTR_CK_TM10UFLOW) */
-	else if (div_timer == MNSCx_DIV_TIMER_8BIT)
+	switch (port->uart.line) {
+#ifdef CONFIG_MN10300_TTYSM0
+	case 0: /* ttySM0 */
+#if   defined(CONFIG_MN10300_TTYSM0_TIMER8)
+		scxctr |= SC0CTR_CK_TM8UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER0)
+		scxctr |= SC0CTR_CK_TM0UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER2)
 		scxctr |= SC0CTR_CK_TM2UFLOW_8;
 		scxctr |= SC0CTR_CK_TM2UFLOW_8;
+#else
+#error "Unknown config for ttySM0"
+#endif
+		break;
+#endif /* CONFIG_MN10300_TTYSM0 */
+
+#ifdef CONFIG_MN10300_TTYSM1
+	case 1: /* ttySM1 */
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+#if   defined(CONFIG_MN10300_TTYSM1_TIMER9)
+		scxctr |= SC1CTR_CK_TM9UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
+		scxctr |= SC1CTR_CK_TM3UFLOW_8;
+#else
+#error "Unknown config for ttySM1"
+#endif
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+#if defined(CONFIG_MN10300_TTYSM1_TIMER12)
+		scxctr |= SC1CTR_CK_TM12UFLOW_8;
+#else
+#error "Unknown config for ttySM1"
+#endif
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+		break;
+#endif /* CONFIG_MN10300_TTYSM1 */
+
+#ifdef CONFIG_MN10300_TTYSM2
+	case 2: /* ttySM2 */
+#if defined(CONFIG_AM33_2)
+#if   defined(CONFIG_MN10300_TTYSM2_TIMER10)
+		scxctr |= SC2CTR_CK_TM10UFLOW;
+#else
+#error "Unknown config for ttySM2"
+#endif
+#else /* CONFIG_AM33_2 */
+#if   defined(CONFIG_MN10300_TTYSM2_TIMER9)
+		scxctr |= SC2CTR_CK_TM9UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER1)
+		scxctr |= SC2CTR_CK_TM1UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER3)
+		scxctr |= SC2CTR_CK_TM3UFLOW_8;
+#else
+#error "Unknown config for ttySM2"
+#endif
+#endif /* CONFIG_AM33_2 */
+		break;
+#endif /* CONFIG_MN10300_TTYSM2 */
+
+	default:
+		break;
+	}
 
 
 try_alternative:
 try_alternative:
 	baud = uart_get_baud_rate(&port->uart, new, old, 0,
 	baud = uart_get_baud_rate(&port->uart, new, old, 0,
@@ -1195,6 +1332,12 @@ static void mn10300_serial_set_termios(struct uart_port *_port,
 		ctr &= ~SC2CTR_TWE;
 		ctr &= ~SC2CTR_TWE;
 		*port->_control = ctr;
 		*port->_control = ctr;
 	}
 	}
+
+	/* change Transfer bit-order (LSB/MSB) */
+	if (new->c_cflag & CODMSB)
+		*port->_control |= SC01CTR_OD_MSBFIRST; /* MSB MODE */
+	else
+		*port->_control &= ~SC01CTR_OD_MSBFIRST; /* LSB MODE */
 }
 }
 
 
 /*
 /*
@@ -1302,11 +1445,16 @@ static int __init mn10300_serial_init(void)
 	printk(KERN_INFO "%s version %s (%s)\n",
 	printk(KERN_INFO "%s version %s (%s)\n",
 	       serial_name, serial_version, serial_revdate);
 	       serial_name, serial_version, serial_revdate);
 
 
-#ifdef CONFIG_MN10300_TTYSM2
-	SC2TIM = 8; /* make the baud base of timer 2 IOCLK/8 */
+#if defined(CONFIG_MN10300_TTYSM2) && defined(CONFIG_AM33_2)
+	{
+		int tmp;
+		SC2TIM = 8; /* make the baud base of timer 2 IOCLK/8 */
+		tmp = SC2TIM;
+	}
 #endif
 #endif
 
 
-	set_intr_stub(EXCEP_IRQ_LEVEL1, mn10300_serial_vdma_interrupt);
+	set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL),
+		mn10300_serial_vdma_interrupt);
 
 
 	ret = uart_register_driver(&mn10300_serial_driver);
 	ret = uart_register_driver(&mn10300_serial_driver);
 	if (!ret) {
 	if (!ret) {
@@ -1366,9 +1514,11 @@ static void mn10300_serial_console_write(struct console *co,
 	port = mn10300_serial_ports[co->index];
 	port = mn10300_serial_ports[co->index];
 
 
 	/* firstly hijack the serial port from the "virtual DMA" controller */
 	/* firstly hijack the serial port from the "virtual DMA" controller */
+	arch_local_cli();
 	txicr = *port->tx_icr;
 	txicr = *port->tx_icr;
-	*port->tx_icr = GxICR_LEVEL_1;
+	*port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
 	tmp = *port->tx_icr;
 	tmp = *port->tx_icr;
+	arch_local_sti();
 
 
 	/* the transmitter may be disabled */
 	/* the transmitter may be disabled */
 	scxctr = *port->_control;
 	scxctr = *port->_control;
@@ -1422,8 +1572,10 @@ static void mn10300_serial_console_write(struct console *co,
 	if (!(scxctr & SC01CTR_TXE))
 	if (!(scxctr & SC01CTR_TXE))
 		*port->_control = scxctr;
 		*port->_control = scxctr;
 
 
+	arch_local_cli();
 	*port->tx_icr = txicr;
 	*port->tx_icr = txicr;
 	tmp = *port->tx_icr;
 	tmp = *port->tx_icr;
+	arch_local_sti();
 }
 }
 
 
 /*
 /*

+ 8 - 1
arch/mn10300/kernel/mn10300-watchdog-low.S

@@ -16,6 +16,7 @@
 #include <asm/intctl-regs.h>
 #include <asm/intctl-regs.h>
 #include <asm/timer-regs.h>
 #include <asm/timer-regs.h>
 #include <asm/frame.inc>
 #include <asm/frame.inc>
+#include <linux/threads.h>
 
 
 	.text
 	.text
 
 
@@ -53,7 +54,13 @@ watchdog_handler:
 	.type	touch_nmi_watchdog,@function
 	.type	touch_nmi_watchdog,@function
 touch_nmi_watchdog:
 touch_nmi_watchdog:
 	clr	d0
 	clr	d0
-	mov	d0,(watchdog_alert_counter)
+	clr	d1
+	mov	watchdog_alert_counter, a0
+	setlb
+	mov	d0, (a0+)
+	inc	d1
+	cmp	NR_CPUS, d1
+	lne
 	ret	[],0
 	ret	[],0
 
 
 	.size	touch_nmi_watchdog,.-touch_nmi_watchdog
 	.size	touch_nmi_watchdog,.-touch_nmi_watchdog

+ 61 - 39
arch/mn10300/kernel/mn10300-watchdog.c

@@ -30,7 +30,7 @@
 static DEFINE_SPINLOCK(watchdog_print_lock);
 static DEFINE_SPINLOCK(watchdog_print_lock);
 static unsigned int watchdog;
 static unsigned int watchdog;
 static unsigned int watchdog_hz = 1;
 static unsigned int watchdog_hz = 1;
-unsigned int watchdog_alert_counter;
+unsigned int watchdog_alert_counter[NR_CPUS];
 
 
 EXPORT_SYMBOL(touch_nmi_watchdog);
 EXPORT_SYMBOL(touch_nmi_watchdog);
 
 
@@ -39,9 +39,6 @@ EXPORT_SYMBOL(touch_nmi_watchdog);
  * is to check its timer makes IRQ counts. If they are not
  * is to check its timer makes IRQ counts. If they are not
  * changing then that CPU has some problem.
  * changing then that CPU has some problem.
  *
  *
- * as these watchdog NMI IRQs are generated on every CPU, we only
- * have to check the current processor.
- *
  * since NMIs dont listen to _any_ locks, we have to be extremely
  * since NMIs dont listen to _any_ locks, we have to be extremely
  * careful not to rely on unsafe variables. The printk might lock
  * careful not to rely on unsafe variables. The printk might lock
  * up though, so we have to break up any console locks first ...
  * up though, so we have to break up any console locks first ...
@@ -69,8 +66,8 @@ int __init check_watchdog(void)
 
 
 	printk(KERN_INFO "OK.\n");
 	printk(KERN_INFO "OK.\n");
 
 
-	/* now that we know it works we can reduce NMI frequency to
-	 * something more reasonable; makes a difference in some configs
+	/* now that we know it works we can reduce NMI frequency to something
+	 * more reasonable; makes a difference in some configs
 	 */
 	 */
 	watchdog_hz = 1;
 	watchdog_hz = 1;
 
 
@@ -121,15 +118,22 @@ void __init watchdog_go(void)
 	}
 	}
 }
 }
 
 
+#ifdef CONFIG_SMP
+static void watchdog_dump_register(void *dummy)
+{
+	printk(KERN_ERR "--- Register Dump (CPU%d) ---\n", CPUID);
+	show_registers(current_frame());
+}
+#endif
+
 asmlinkage
 asmlinkage
 void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
 void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
 {
 {
-
 	/*
 	/*
 	 * Since current-> is always on the stack, and we always switch
 	 * Since current-> is always on the stack, and we always switch
 	 * the stack NMI-atomically, it's safe to use smp_processor_id().
 	 * the stack NMI-atomically, it's safe to use smp_processor_id().
 	 */
 	 */
-	int sum, cpu = smp_processor_id();
+	int sum, cpu;
 	int irq = NMIIRQ;
 	int irq = NMIIRQ;
 	u8 wdt, tmp;
 	u8 wdt, tmp;
 
 
@@ -138,43 +142,61 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
 	tmp = WDCTR;
 	tmp = WDCTR;
 	NMICR = NMICR_WDIF;
 	NMICR = NMICR_WDIF;
 
 
-	nmi_count(cpu)++;
+	nmi_count(smp_processor_id())++;
 	kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
 	kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
-	sum = irq_stat[cpu].__irq_count;
-
-	if (last_irq_sums[cpu] == sum) {
-		/*
-		 * Ayiee, looks like this CPU is stuck ...
-		 * wait a few IRQs (5 seconds) before doing the oops ...
-		 */
-		watchdog_alert_counter++;
-		if (watchdog_alert_counter == 5 * watchdog_hz) {
-			spin_lock(&watchdog_print_lock);
+
+	for_each_online_cpu(cpu) {
+
+		sum = irq_stat[cpu].__irq_count;
+
+		if ((last_irq_sums[cpu] == sum)
+#if defined(CONFIG_GDBSTUB) && defined(CONFIG_SMP)
+			&& !(CHK_GDBSTUB_BUSY()
+			     || atomic_read(&cpu_doing_single_step))
+#endif
+			) {
 			/*
 			/*
-			 * We are in trouble anyway, lets at least try
-			 * to get a message out.
+			 * Ayiee, looks like this CPU is stuck ...
+			 * wait a few IRQs (5 seconds) before doing the oops ...
 			 */
 			 */
-			bust_spinlocks(1);
-			printk(KERN_ERR
-			       "NMI Watchdog detected LOCKUP on CPU%d,"
-			       " pc %08lx, registers:\n",
-			       cpu, regs->pc);
-			show_registers(regs);
-			printk("console shuts up ...\n");
-			console_silent();
-			spin_unlock(&watchdog_print_lock);
-			bust_spinlocks(0);
+			watchdog_alert_counter[cpu]++;
+			if (watchdog_alert_counter[cpu] == 5 * watchdog_hz) {
+				spin_lock(&watchdog_print_lock);
+				/*
+				 * We are in trouble anyway, lets at least try
+				 * to get a message out.
+				 */
+				bust_spinlocks(1);
+				printk(KERN_ERR
+				       "NMI Watchdog detected LOCKUP on CPU%d,"
+				       " pc %08lx, registers:\n",
+				       cpu, regs->pc);
+#ifdef CONFIG_SMP
+				printk(KERN_ERR
+				       "--- Register Dump (CPU%d) ---\n",
+				       CPUID);
+#endif
+				show_registers(regs);
+#ifdef CONFIG_SMP
+				smp_nmi_call_function(watchdog_dump_register,
+					NULL, 1);
+#endif
+				printk(KERN_NOTICE "console shuts up ...\n");
+				console_silent();
+				spin_unlock(&watchdog_print_lock);
+				bust_spinlocks(0);
 #ifdef CONFIG_GDBSTUB
 #ifdef CONFIG_GDBSTUB
-			if (gdbstub_busy)
-				gdbstub_exception(regs, excep);
-			else
-				gdbstub_intercept(regs, excep);
+				if (CHK_GDBSTUB_BUSY_AND_ACTIVE())
+					gdbstub_exception(regs, excep);
+				else
+					gdbstub_intercept(regs, excep);
 #endif
 #endif
-			do_exit(SIGSEGV);
+				do_exit(SIGSEGV);
+			}
+		} else {
+			last_irq_sums[cpu] = sum;
+			watchdog_alert_counter[cpu] = 0;
 		}
 		}
-	} else {
-		last_irq_sums[cpu] = sum;
-		watchdog_alert_counter = 0;
 	}
 	}
 
 
 	WDCTR = wdt | WDCTR_WDRST;
 	WDCTR = wdt | WDCTR_WDRST;

+ 47 - 14
arch/mn10300/kernel/process.c

@@ -57,6 +57,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
 void (*pm_power_off)(void);
 void (*pm_power_off)(void);
 EXPORT_SYMBOL(pm_power_off);
 EXPORT_SYMBOL(pm_power_off);
 
 
+#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
 /*
 /*
  * we use this if we don't have any better idle routine
  * we use this if we don't have any better idle routine
  */
  */
@@ -69,6 +70,35 @@ static void default_idle(void)
 		local_irq_enable();
 		local_irq_enable();
 }
 }
 
 
+#else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU  */
+/*
+ * On SMP it's slightly faster (but much more power-consuming!)
+ * to poll the ->work.need_resched flag instead of waiting for the
+ * cross-CPU IPI to arrive. Use this option with caution.
+ */
+static inline void poll_idle(void)
+{
+	int oldval;
+
+	local_irq_enable();
+
+	/*
+	 * Deal with another CPU just having chosen a thread to
+	 * run here:
+	 */
+	oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
+
+	if (!oldval) {
+		set_thread_flag(TIF_POLLING_NRFLAG);
+		while (!need_resched())
+			cpu_relax();
+		clear_thread_flag(TIF_POLLING_NRFLAG);
+	} else {
+		set_need_resched();
+	}
+}
+#endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
+
 /*
 /*
  * the idle thread
  * the idle thread
  * - there's no useful work to be done, so just try to conserve power and have
  * - there's no useful work to be done, so just try to conserve power and have
@@ -77,8 +107,6 @@ static void default_idle(void)
  */
  */
 void cpu_idle(void)
 void cpu_idle(void)
 {
 {
-	int cpu = smp_processor_id();
-
 	/* endless idle loop with no priority at all */
 	/* endless idle loop with no priority at all */
 	for (;;) {
 	for (;;) {
 		while (!need_resched()) {
 		while (!need_resched()) {
@@ -86,10 +114,13 @@ void cpu_idle(void)
 
 
 			smp_rmb();
 			smp_rmb();
 			idle = pm_idle;
 			idle = pm_idle;
-			if (!idle)
+			if (!idle) {
+#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
+				idle = poll_idle;
+#else  /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
 				idle = default_idle;
 				idle = default_idle;
-
-			irq_stat[cpu].idle_timestamp = jiffies;
+#endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
+			}
 			idle();
 			idle();
 		}
 		}
 
 
@@ -197,6 +228,7 @@ int copy_thread(unsigned long clone_flags,
 		unsigned long c_usp, unsigned long ustk_size,
 		unsigned long c_usp, unsigned long ustk_size,
 		struct task_struct *p, struct pt_regs *kregs)
 		struct task_struct *p, struct pt_regs *kregs)
 {
 {
+	struct thread_info *ti = task_thread_info(p);
 	struct pt_regs *c_uregs, *c_kregs, *uregs;
 	struct pt_regs *c_uregs, *c_kregs, *uregs;
 	unsigned long c_ksp;
 	unsigned long c_ksp;
 
 
@@ -217,7 +249,7 @@ int copy_thread(unsigned long clone_flags,
 
 
 	/* the new TLS pointer is passed in as arg #5 to sys_clone() */
 	/* the new TLS pointer is passed in as arg #5 to sys_clone() */
 	if (clone_flags & CLONE_SETTLS)
 	if (clone_flags & CLONE_SETTLS)
-		c_uregs->e2 = __frame->d3;
+		c_uregs->e2 = current_frame()->d3;
 
 
 	/* set up the return kernel frame if called from kernel_thread() */
 	/* set up the return kernel frame if called from kernel_thread() */
 	c_kregs = c_uregs;
 	c_kregs = c_uregs;
@@ -235,7 +267,7 @@ int copy_thread(unsigned long clone_flags,
 	}
 	}
 
 
 	/* set up things up so the scheduler can start the new task */
 	/* set up things up so the scheduler can start the new task */
-	p->thread.__frame = c_kregs;
+	ti->frame	= c_kregs;
 	p->thread.a3	= (unsigned long) c_kregs;
 	p->thread.a3	= (unsigned long) c_kregs;
 	p->thread.sp	= c_ksp;
 	p->thread.sp	= c_ksp;
 	p->thread.pc	= (unsigned long) ret_from_fork;
 	p->thread.pc	= (unsigned long) ret_from_fork;
@@ -247,25 +279,26 @@ int copy_thread(unsigned long clone_flags,
 
 
 /*
 /*
  * clone a process
  * clone a process
- * - tlsptr is retrieved by copy_thread() from __frame->d3
+ * - tlsptr is retrieved by copy_thread() from current_frame()->d3
  */
  */
 asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp,
 asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp,
 			  int __user *parent_tidptr, int __user *child_tidptr,
 			  int __user *parent_tidptr, int __user *child_tidptr,
 			  int __user *tlsptr)
 			  int __user *tlsptr)
 {
 {
-	return do_fork(clone_flags, newsp ?: __frame->sp, __frame, 0,
-		       parent_tidptr, child_tidptr);
+	return do_fork(clone_flags, newsp ?: current_frame()->sp,
+		       current_frame(), 0, parent_tidptr, child_tidptr);
 }
 }
 
 
 asmlinkage long sys_fork(void)
 asmlinkage long sys_fork(void)
 {
 {
-	return do_fork(SIGCHLD, __frame->sp, __frame, 0, NULL, NULL);
+	return do_fork(SIGCHLD, current_frame()->sp,
+		       current_frame(), 0, NULL, NULL);
 }
 }
 
 
 asmlinkage long sys_vfork(void)
 asmlinkage long sys_vfork(void)
 {
 {
-	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, __frame->sp, __frame,
-		       0, NULL, NULL);
+	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, current_frame()->sp,
+		       current_frame(), 0, NULL, NULL);
 }
 }
 
 
 asmlinkage long sys_execve(const char __user *name,
 asmlinkage long sys_execve(const char __user *name,
@@ -279,7 +312,7 @@ asmlinkage long sys_execve(const char __user *name,
 	error = PTR_ERR(filename);
 	error = PTR_ERR(filename);
 	if (IS_ERR(filename))
 	if (IS_ERR(filename))
 		return error;
 		return error;
-	error = do_execve(filename, argv, envp, __frame);
+	error = do_execve(filename, argv, envp, current_frame());
 	putname(filename);
 	putname(filename);
 	return error;
 	return error;
 }
 }

+ 1 - 1
arch/mn10300/kernel/profile.c

@@ -41,7 +41,7 @@ static __init int profile_init(void)
 	tmp = TM11ICR;
 	tmp = TM11ICR;
 
 
 	printk(KERN_INFO "Profiling initiated on timer 11, priority 0, %uHz\n",
 	printk(KERN_INFO "Profiling initiated on timer 11, priority 0, %uHz\n",
-	       mn10300_ioclk / 8 / (TM11BR + 1));
+	       MN10300_IOCLK / 8 / (TM11BR + 1));
 	printk(KERN_INFO "Profile histogram stored %p-%p\n",
 	printk(KERN_INFO "Profile histogram stored %p-%p\n",
 	       prof_buffer, (u8 *)(prof_buffer + prof_len) - 1);
 	       prof_buffer, (u8 *)(prof_buffer + prof_len) - 1);
 
 

+ 10 - 31
arch/mn10300/kernel/rtc.c

@@ -20,18 +20,22 @@
 DEFINE_SPINLOCK(rtc_lock);
 DEFINE_SPINLOCK(rtc_lock);
 EXPORT_SYMBOL(rtc_lock);
 EXPORT_SYMBOL(rtc_lock);
 
 
-/* time for RTC to update itself in ioclks */
-static unsigned long mn10300_rtc_update_period;
-
+/*
+ * Read the current RTC time
+ */
 void read_persistent_clock(struct timespec *ts)
 void read_persistent_clock(struct timespec *ts)
 {
 {
 	struct rtc_time tm;
 	struct rtc_time tm;
 
 
 	get_rtc_time(&tm);
 	get_rtc_time(&tm);
 
 
-	ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
-		      tm.tm_hour, tm.tm_min, tm.tm_sec);
 	ts->tv_nsec = 0;
 	ts->tv_nsec = 0;
+	ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
+			    tm.tm_hour, tm.tm_min, tm.tm_sec);
+
+	/* if rtc is way off in the past, set something reasonable */
+	if (ts->tv_sec < 0)
+		ts->tv_sec = mktime(2009, 1, 1, 12, 0, 0);
 }
 }
 
 
 /*
 /*
@@ -115,39 +119,14 @@ int update_persistent_clock(struct timespec now)
  */
  */
 void __init calibrate_clock(void)
 void __init calibrate_clock(void)
 {
 {
-	unsigned long count0, counth, count1;
 	unsigned char status;
 	unsigned char status;
 
 
 	/* make sure the RTC is running and is set to operate in 24hr mode */
 	/* make sure the RTC is running and is set to operate in 24hr mode */
 	status = RTSRC;
 	status = RTSRC;
 	RTCRB |= RTCRB_SET;
 	RTCRB |= RTCRB_SET;
 	RTCRB |= RTCRB_TM_24HR;
 	RTCRB |= RTCRB_TM_24HR;
+	RTCRB &= ~RTCRB_DM_BINARY;
 	RTCRA |= RTCRA_DVR;
 	RTCRA |= RTCRA_DVR;
 	RTCRA &= ~RTCRA_DVR;
 	RTCRA &= ~RTCRA_DVR;
 	RTCRB &= ~RTCRB_SET;
 	RTCRB &= ~RTCRB_SET;
-
-	/* work out the clock speed by counting clock cycles between ends of
-	 * the RTC update cycle - track the RTC through one complete update
-	 * cycle (1 second)
-	 */
-	startup_timestamp_counter();
-
-	while (!(RTCRA & RTCRA_UIP)) {}
-	while ((RTCRA & RTCRA_UIP)) {}
-
-	count0 = TMTSCBC;
-
-	while (!(RTCRA & RTCRA_UIP)) {}
-
-	counth = TMTSCBC;
-
-	while ((RTCRA & RTCRA_UIP)) {}
-
-	count1 = TMTSCBC;
-
-	shutdown_timestamp_counter();
-
-	MN10300_TSCCLK = count0 - count1; /* the timers count down */
-	mn10300_rtc_update_period = counth - count1;
-	MN10300_TSC_PER_HZ = MN10300_TSCCLK / HZ;
 }
 }

+ 43 - 36
arch/mn10300/kernel/setup.c

@@ -22,6 +22,7 @@
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/bootmem.h>
 #include <linux/bootmem.h>
 #include <linux/seq_file.h>
 #include <linux/seq_file.h>
+#include <linux/cpu.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
 #include <linux/console.h>
 #include <linux/console.h>
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
@@ -30,7 +31,6 @@
 #include <asm/io.h>
 #include <asm/io.h>
 #include <asm/smp.h>
 #include <asm/smp.h>
 #include <proc/proc.h>
 #include <proc/proc.h>
-#include <asm/busctl-regs.h>
 #include <asm/fpu.h>
 #include <asm/fpu.h>
 #include <asm/sections.h>
 #include <asm/sections.h>
 
 
@@ -64,11 +64,13 @@ unsigned long memory_size;
 struct thread_info *__current_ti = &init_thread_union.thread_info;
 struct thread_info *__current_ti = &init_thread_union.thread_info;
 struct task_struct *__current = &init_task;
 struct task_struct *__current = &init_task;
 
 
-#define mn10300_known_cpus 3
+#define mn10300_known_cpus 5
 static const char *const mn10300_cputypes[] = {
 static const char *const mn10300_cputypes[] = {
-	"am33v1",
-	"am33v2",
-	"am34v1",
+	"am33-1",
+	"am33-2",
+	"am34-1",
+	"am33-3",
+	"am34-2",
 	"unknown"
 	"unknown"
 };
 };
 
 
@@ -123,6 +125,7 @@ void __init setup_arch(char **cmdline_p)
 
 
 	cpu_init();
 	cpu_init();
 	unit_setup();
 	unit_setup();
+	smp_init_cpus();
 	parse_mem_cmdline(cmdline_p);
 	parse_mem_cmdline(cmdline_p);
 
 
 	init_mm.start_code = (unsigned long)&_text;
 	init_mm.start_code = (unsigned long)&_text;
@@ -179,57 +182,55 @@ void __init setup_arch(char **cmdline_p)
 void __init cpu_init(void)
 void __init cpu_init(void)
 {
 {
 	unsigned long cpurev = CPUREV, type;
 	unsigned long cpurev = CPUREV, type;
-	unsigned long base, size;
 
 
 	type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S;
 	type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S;
 	if (type > mn10300_known_cpus)
 	if (type > mn10300_known_cpus)
 		type = mn10300_known_cpus;
 		type = mn10300_known_cpus;
 
 
-	printk(KERN_INFO "Matsushita %s, rev %ld\n",
+	printk(KERN_INFO "Panasonic %s, rev %ld\n",
 	       mn10300_cputypes[type],
 	       mn10300_cputypes[type],
 	       (cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S);
 	       (cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S);
 
 
-	/* determine the memory size and base from the memory controller regs */
-	memory_size = 0;
-
-	base = SDBASE(0);
-	if (base & SDBASE_CE) {
-		size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
-		size = ~size + 1;
-		base &= SDBASE_CBA;
+	get_mem_info(&phys_memory_base, &memory_size);
+	phys_memory_end = phys_memory_base + memory_size;
 
 
-		printk(KERN_INFO "SDRAM[0]: %luMb @%08lx\n", size >> 20, base);
-		memory_size += size;
-		phys_memory_base = base;
-	}
+	fpu_init_state();
+}
 
 
-	base = SDBASE(1);
-	if (base & SDBASE_CE) {
-		size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
-		size = ~size + 1;
-		base &= SDBASE_CBA;
+static struct cpu cpu_devices[NR_CPUS];
 
 
-		printk(KERN_INFO "SDRAM[1]: %luMb @%08lx\n", size >> 20, base);
-		memory_size += size;
-		if (phys_memory_base == 0)
-			phys_memory_base = base;
-	}
+static int __init topology_init(void)
+{
+	int i;
 
 
-	phys_memory_end = phys_memory_base + memory_size;
+	for_each_present_cpu(i)
+		register_cpu(&cpu_devices[i], i);
 
 
-#ifdef CONFIG_FPU
-	fpu_init_state();
-#endif
+	return 0;
 }
 }
 
 
+subsys_initcall(topology_init);
+
 /*
 /*
  * Get CPU information for use by the procfs.
  * Get CPU information for use by the procfs.
  */
  */
 static int show_cpuinfo(struct seq_file *m, void *v)
 static int show_cpuinfo(struct seq_file *m, void *v)
 {
 {
+#ifdef CONFIG_SMP
+	struct mn10300_cpuinfo *c = v;
+	unsigned long cpu_id = c - cpu_data;
+	unsigned long cpurev = c->type, type, icachesz, dcachesz;
+#else  /* CONFIG_SMP */
+	unsigned long cpu_id = 0;
 	unsigned long cpurev = CPUREV, type, icachesz, dcachesz;
 	unsigned long cpurev = CPUREV, type, icachesz, dcachesz;
+#endif /* CONFIG_SMP */
 
 
-	type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S;
+#ifdef CONFIG_SMP
+	if (!cpu_online(cpu_id))
+		return 0;
+#endif
+
+	type = (cpurev & CPUREV_TYPE) >> CPUREV_TYPE_S;
 	if (type > mn10300_known_cpus)
 	if (type > mn10300_known_cpus)
 		type = mn10300_known_cpus;
 		type = mn10300_known_cpus;
 
 
@@ -244,13 +245,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 		1024;
 		1024;
 
 
 	seq_printf(m,
 	seq_printf(m,
-		   "processor  : 0\n"
-		   "vendor_id  : Matsushita\n"
+		   "processor  : %ld\n"
+		   "vendor_id  : " PROCESSOR_VENDOR_NAME "\n"
 		   "cpu core   : %s\n"
 		   "cpu core   : %s\n"
 		   "cpu rev    : %lu\n"
 		   "cpu rev    : %lu\n"
 		   "model name : " PROCESSOR_MODEL_NAME		"\n"
 		   "model name : " PROCESSOR_MODEL_NAME		"\n"
 		   "icache size: %lu\n"
 		   "icache size: %lu\n"
 		   "dcache size: %lu\n",
 		   "dcache size: %lu\n",
+		   cpu_id,
 		   mn10300_cputypes[type],
 		   mn10300_cputypes[type],
 		   (cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S,
 		   (cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S,
 		   icachesz,
 		   icachesz,
@@ -262,8 +264,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 		   "bogomips   : %lu.%02lu\n\n",
 		   "bogomips   : %lu.%02lu\n\n",
 		   MN10300_IOCLK / 1000000,
 		   MN10300_IOCLK / 1000000,
 		   (MN10300_IOCLK / 10000) % 100,
 		   (MN10300_IOCLK / 10000) % 100,
+#ifdef CONFIG_SMP
+		   c->loops_per_jiffy / (500000 / HZ),
+		   (c->loops_per_jiffy / (5000 / HZ)) % 100
+#else  /* CONFIG_SMP */
 		   loops_per_jiffy / (500000 / HZ),
 		   loops_per_jiffy / (500000 / HZ),
 		   (loops_per_jiffy / (5000 / HZ)) % 100
 		   (loops_per_jiffy / (5000 / HZ)) % 100
+#endif /* CONFIG_SMP */
 		   );
 		   );
 
 
 	return 0;
 	return 0;

+ 11 - 9
arch/mn10300/kernel/signal.c

@@ -91,7 +91,7 @@ asmlinkage long sys_sigaction(int sig,
  */
  */
 asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t *uoss)
 asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t *uoss)
 {
 {
-	return do_sigaltstack(uss, uoss, __frame->sp);
+	return do_sigaltstack(uss, uoss, current_frame()->sp);
 }
 }
 
 
 /*
 /*
@@ -156,10 +156,11 @@ badframe:
  */
  */
 asmlinkage long sys_sigreturn(void)
 asmlinkage long sys_sigreturn(void)
 {
 {
-	struct sigframe __user *frame = (struct sigframe __user *) __frame->sp;
+	struct sigframe __user *frame;
 	sigset_t set;
 	sigset_t set;
 	long d0;
 	long d0;
 
 
+	frame = (struct sigframe __user *) current_frame()->sp;
 	if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
 	if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
 		goto badframe;
 		goto badframe;
 	if (__get_user(set.sig[0], &frame->sc.oldmask))
 	if (__get_user(set.sig[0], &frame->sc.oldmask))
@@ -176,7 +177,7 @@ asmlinkage long sys_sigreturn(void)
 	recalc_sigpending();
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
 	spin_unlock_irq(&current->sighand->siglock);
 
 
-	if (restore_sigcontext(__frame, &frame->sc, &d0))
+	if (restore_sigcontext(current_frame(), &frame->sc, &d0))
 		goto badframe;
 		goto badframe;
 
 
 	return d0;
 	return d0;
@@ -191,11 +192,11 @@ badframe:
  */
  */
 asmlinkage long sys_rt_sigreturn(void)
 asmlinkage long sys_rt_sigreturn(void)
 {
 {
-	struct rt_sigframe __user *frame =
-		(struct rt_sigframe __user *) __frame->sp;
+	struct rt_sigframe __user *frame;
 	sigset_t set;
 	sigset_t set;
-	unsigned long d0;
+	long d0;
 
 
+	frame = (struct rt_sigframe __user *) current_frame()->sp;
 	if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
 	if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
 		goto badframe;
 		goto badframe;
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
@@ -207,10 +208,11 @@ asmlinkage long sys_rt_sigreturn(void)
 	recalc_sigpending();
 	recalc_sigpending();
 	spin_unlock_irq(&current->sighand->siglock);
 	spin_unlock_irq(&current->sighand->siglock);
 
 
-	if (restore_sigcontext(__frame, &frame->uc.uc_mcontext, &d0))
+	if (restore_sigcontext(current_frame(), &frame->uc.uc_mcontext, &d0))
 		goto badframe;
 		goto badframe;
 
 
-	if (do_sigaltstack(&frame->uc.uc_stack, NULL, __frame->sp) == -EFAULT)
+	if (do_sigaltstack(&frame->uc.uc_stack, NULL, current_frame()->sp) ==
+	    -EFAULT)
 		goto badframe;
 		goto badframe;
 
 
 	return d0;
 	return d0;
@@ -572,7 +574,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
 
 
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
 		clear_thread_flag(TIF_NOTIFY_RESUME);
 		clear_thread_flag(TIF_NOTIFY_RESUME);
-		tracehook_notify_resume(__frame);
+		tracehook_notify_resume(current_frame());
 		if (current->replacement_session_keyring)
 		if (current->replacement_session_keyring)
 			key_replace_session_keyring();
 			key_replace_session_keyring();
 	}
 	}

+ 97 - 0
arch/mn10300/kernel/smp-low.S

@@ -0,0 +1,97 @@
+/* SMP IPI low-level handler
+ *
+ * Copyright (C) 2006-2007 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/system.h>
+#include <asm/thread_info.h>
+#include <asm/cpu-regs.h>
+#include <proc/smp-regs.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
+
+	.am33_2
+
+###############################################################################
+#
+# IPI interrupt handler
+#
+###############################################################################
+	.globl mn10300_low_ipi_handler
+mn10300_low_ipi_handler:
+	add	-4,sp
+	mov	d0,(sp)
+	movhu	(IAGR),d0
+	and	IAGR_GN,d0
+	lsr	0x2,d0
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+	cmp	FLUSH_CACHE_IPI,d0
+	beq	mn10300_flush_cache_ipi
+#endif
+	cmp	SMP_BOOT_IRQ,d0
+	beq	mn10300_smp_boot_ipi
+	/* OTHERS */
+	mov	(sp),d0
+	add	4,sp
+#ifdef CONFIG_GDBSTUB
+	jmp	gdbstub_io_rx_handler
+#else
+	jmp	end
+#endif
+
+###############################################################################
+#
+# Cache flush IPI interrupt handler
+#
+###############################################################################
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+mn10300_flush_cache_ipi:
+	mov	(sp),d0
+	add	4,sp
+
+	/* FLUSH_CACHE_IPI */
+	add	-4,sp
+	SAVE_ALL
+	mov	GxICR_DETECT,d2
+	movbu	d2,(GxICR(FLUSH_CACHE_IPI))	# ACK the interrupt
+	movhu	(GxICR(FLUSH_CACHE_IPI)),d2
+	call	smp_cache_interrupt[],0
+	RESTORE_ALL
+	jmp	end
+#endif
+
+###############################################################################
+#
+# SMP boot CPU IPI interrupt handler
+#
+###############################################################################
+mn10300_smp_boot_ipi:
+	/* clear interrupt */
+	movhu	(GxICR(SMP_BOOT_IRQ)),d0
+	and	~GxICR_REQUEST,d0
+	movhu	d0,(GxICR(SMP_BOOT_IRQ))
+	mov	(sp),d0
+	add	4,sp
+
+	# get stack
+	mov	(CPUID),a0
+	add	-1,a0
+	add	a0,a0
+	add	a0,a0
+	mov	(start_stack,a0),a0
+	mov	a0,sp
+	jmp	initialize_secondary
+
+
+# Jump here after RTI to suppress the icache lookahead
+end:

+ 1152 - 0
arch/mn10300/kernel/smp.c

@@ -0,0 +1,1152 @@
+/* SMP support routines.
+ *
+ * Copyright (C) 2006-2008 Panasonic Corporation
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/profile.h>
+#include <linux/smp.h>
+#include <asm/tlbflush.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/processor.h>
+#include <asm/bug.h>
+#include <asm/exceptions.h>
+#include <asm/hardirq.h>
+#include <asm/fpu.h>
+#include <asm/mmu_context.h>
+#include <asm/thread_info.h>
+#include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
+#include "internal.h"
+
+#ifdef CONFIG_HOTPLUG_CPU
+#include <linux/cpu.h>
+#include <asm/cacheflush.h>
+
+static unsigned long sleep_mode[NR_CPUS];
+
+static void run_sleep_cpu(unsigned int cpu);
+static void run_wakeup_cpu(unsigned int cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * Debug Message function
+ */
+
+#undef DEBUG_SMP
+#ifdef DEBUG_SMP
+#define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
+#else
+#define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
+#endif
+
+/* timeout value in msec for smp_nmi_call_function. zero is no timeout. */
+#define	CALL_FUNCTION_NMI_IPI_TIMEOUT	0
+
+/*
+ * Structure and data for smp_nmi_call_function().
+ */
+struct nmi_call_data_struct {
+	smp_call_func_t	func;
+	void		*info;
+	cpumask_t	started;
+	cpumask_t	finished;
+	int		wait;
+	char		size_alignment[0]
+	__attribute__ ((__aligned__(SMP_CACHE_BYTES)));
+} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
+
+static DEFINE_SPINLOCK(smp_nmi_call_lock);
+static struct nmi_call_data_struct *nmi_call_data;
+
+/*
+ * Data structures and variables
+ */
+static cpumask_t cpu_callin_map;	/* Bitmask of callin CPUs */
+static cpumask_t cpu_callout_map;	/* Bitmask of callout CPUs */
+cpumask_t cpu_boot_map;			/* Bitmask of boot APs */
+unsigned long start_stack[NR_CPUS - 1];
+
+/*
+ * Per CPU parameters
+ */
+struct mn10300_cpuinfo cpu_data[NR_CPUS] __cacheline_aligned;
+
+static int cpucount;			/* The count of boot CPUs */
+static cpumask_t smp_commenced_mask;
+cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
+
+/*
+ * Function Prototypes
+ */
+static int do_boot_cpu(int);
+static void smp_show_cpu_info(int cpu_id);
+static void smp_callin(void);
+static void smp_online(void);
+static void smp_store_cpu_info(int);
+static void smp_cpu_init(void);
+static void smp_tune_scheduling(void);
+static void send_IPI_mask(const cpumask_t *cpumask, int irq);
+static void init_ipi(void);
+
+/*
+ * IPI Initialization interrupt definitions
+ */
+static void mn10300_ipi_disable(unsigned int irq);
+static void mn10300_ipi_enable(unsigned int irq);
+static void mn10300_ipi_ack(unsigned int irq);
+static void mn10300_ipi_nop(unsigned int irq);
+
+static struct irq_chip mn10300_ipi_type = {
+	.name		= "cpu_ipi",
+	.disable	= mn10300_ipi_disable,
+	.enable		= mn10300_ipi_enable,
+	.ack		= mn10300_ipi_ack,
+	.eoi		= mn10300_ipi_nop
+};
+
+static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id);
+static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id);
+
+static struct irqaction reschedule_ipi = {
+	.handler	= smp_reschedule_interrupt,
+	.name		= "smp reschedule IPI"
+};
+static struct irqaction call_function_ipi = {
+	.handler	= smp_call_function_interrupt,
+	.name		= "smp call function IPI"
+};
+
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id);
+static struct irqaction local_timer_ipi = {
+	.handler	= smp_ipi_timer_interrupt,
+	.flags		= IRQF_DISABLED,
+	.name		= "smp local timer IPI"
+};
+#endif
+
+/**
+ * init_ipi - Initialise the IPI mechanism
+ */
+static void init_ipi(void)
+{
+	unsigned long flags;
+	u16 tmp16;
+
+	/* set up the reschedule IPI */
+	set_irq_chip_and_handler(RESCHEDULE_IPI,
+				 &mn10300_ipi_type, handle_percpu_irq);
+	setup_irq(RESCHEDULE_IPI, &reschedule_ipi);
+	set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV);
+	mn10300_ipi_enable(RESCHEDULE_IPI);
+
+	/* set up the call function IPI */
+	set_irq_chip_and_handler(CALL_FUNC_SINGLE_IPI,
+				 &mn10300_ipi_type, handle_percpu_irq);
+	setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi);
+	set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV);
+	mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
+
+	/* set up the local timer IPI */
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
+    defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+	set_irq_chip_and_handler(LOCAL_TIMER_IPI,
+				 &mn10300_ipi_type, handle_percpu_irq);
+	setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi);
+	set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV);
+	mn10300_ipi_enable(LOCAL_TIMER_IPI);
+#endif
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+	/* set up the cache flush IPI */
+	flags = arch_local_cli_save();
+	__set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV),
+			mn10300_low_ipi_handler);
+	GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
+	mn10300_ipi_enable(FLUSH_CACHE_IPI);
+	arch_local_irq_restore(flags);
+#endif
+
+	/* set up the NMI call function IPI */
+	flags = arch_local_cli_save();
+	GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
+	tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
+	arch_local_irq_restore(flags);
+
+	/* set up the SMP boot IPI */
+	flags = arch_local_cli_save();
+	__set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV),
+			mn10300_low_ipi_handler);
+	arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_shutdown - Shut down handling of an IPI
+ * @irq: The IPI to be shut down.
+ */
+static void mn10300_ipi_shutdown(unsigned int irq)
+{
+	unsigned long flags;
+	u16 tmp;
+
+	flags = arch_local_cli_save();
+
+	tmp = GxICR(irq);
+	GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
+	tmp = GxICR(irq);
+
+	arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_enable - Enable an IPI
+ * @irq: The IPI to be enabled.
+ */
+static void mn10300_ipi_enable(unsigned int irq)
+{
+	unsigned long flags;
+	u16 tmp;
+
+	flags = arch_local_cli_save();
+
+	tmp = GxICR(irq);
+	GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
+	tmp = GxICR(irq);
+
+	arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_disable - Disable an IPI
+ * @irq: The IPI to be disabled.
+ */
+static void mn10300_ipi_disable(unsigned int irq)
+{
+	unsigned long flags;
+	u16 tmp;
+
+	flags = arch_local_cli_save();
+
+	tmp = GxICR(irq);
+	GxICR(irq) = tmp & GxICR_LEVEL;
+	tmp = GxICR(irq);
+
+	arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_ack - Acknowledge an IPI interrupt in the PIC
+ * @irq: The IPI to be acknowledged.
+ *
+ * Clear the interrupt detection flag for the IPI on the appropriate interrupt
+ * channel in the PIC.
+ */
+static void mn10300_ipi_ack(unsigned int irq)
+{
+	unsigned long flags;
+	u16 tmp;
+
+	flags = arch_local_cli_save();
+	GxICR_u8(irq) = GxICR_DETECT;
+	tmp = GxICR(irq);
+	arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_nop - Dummy IPI action
+ * @irq: The IPI to be acted upon.
+ */
+static void mn10300_ipi_nop(unsigned int irq)
+{
+}
+
+/**
+ * send_IPI_mask - Send IPIs to all CPUs in list
+ * @cpumask: The list of CPUs to target.
+ * @irq: The IPI request to be sent.
+ *
+ * Send the specified IPI to all the CPUs in the list, not waiting for them to
+ * finish before returning.  The caller is responsible for synchronisation if
+ * that is needed.
+ */
+static void send_IPI_mask(const cpumask_t *cpumask, int irq)
+{
+	int i;
+	u16 tmp;
+
+	for (i = 0; i < NR_CPUS; i++) {
+		if (cpu_isset(i, *cpumask)) {
+			/* send IPI */
+			tmp = CROSS_GxICR(irq, i);
+			CROSS_GxICR(irq, i) =
+				tmp | GxICR_REQUEST | GxICR_DETECT;
+			tmp = CROSS_GxICR(irq, i); /* flush write buffer */
+		}
+	}
+}
+
+/**
+ * send_IPI_self - Send an IPI to this CPU.
+ * @irq: The IPI request to be sent.
+ *
+ * Send the specified IPI to the current CPU.
+ */
+void send_IPI_self(int irq)
+{
+	send_IPI_mask(cpumask_of(smp_processor_id()), irq);
+}
+
+/**
+ * send_IPI_allbutself - Send IPIs to all the other CPUs.
+ * @irq: The IPI request to be sent.
+ *
+ * Send the specified IPI to all CPUs in the system barring the current one,
+ * not waiting for them to finish before returning.  The caller is responsible
+ * for synchronisation if that is needed.
+ */
+void send_IPI_allbutself(int irq)
+{
+	cpumask_t cpumask;
+
+	cpumask = cpu_online_map;
+	cpu_clear(smp_processor_id(), cpumask);
+	send_IPI_mask(&cpumask, irq);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+	BUG();
+	/*send_IPI_mask(mask, CALL_FUNCTION_IPI);*/
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+	send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI);
+}
+
+/**
+ * smp_send_reschedule - Send reschedule IPI to a CPU
+ * @cpu: The CPU to target.
+ */
+void smp_send_reschedule(int cpu)
+{
+	send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI);
+}
+
+/**
+ * smp_nmi_call_function - Send a call function NMI IPI to all CPUs
+ * @func: The function to ask to be run.
+ * @info: The context data to pass to that function.
+ * @wait: If true, wait (atomically) until function is run on all CPUs.
+ *
+ * Send a non-maskable request to all CPUs in the system, requesting them to
+ * run the specified function with the given context data, and, potentially, to
+ * wait for completion of that function on all CPUs.
+ *
+ * Returns 0 if successful, -ETIMEDOUT if we were asked to wait, but hit the
+ * timeout.
+ */
+int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
+{
+	struct nmi_call_data_struct data;
+	unsigned long flags;
+	unsigned int cnt;
+	int cpus, ret = 0;
+
+	cpus = num_online_cpus() - 1;
+	if (cpus < 1)
+		return 0;
+
+	data.func = func;
+	data.info = info;
+	data.started = cpu_online_map;
+	cpu_clear(smp_processor_id(), data.started);
+	data.wait = wait;
+	if (wait)
+		data.finished = data.started;
+
+	spin_lock_irqsave(&smp_nmi_call_lock, flags);
+	nmi_call_data = &data;
+	smp_mb();
+
+	/* Send a message to all other CPUs and wait for them to respond */
+	send_IPI_allbutself(CALL_FUNCTION_NMI_IPI);
+
+	/* Wait for response */
+	if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {
+		for (cnt = 0;
+		     cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
+			     !cpus_empty(data.started);
+		     cnt++)
+			mdelay(1);
+
+		if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {
+			for (cnt = 0;
+			     cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
+				     !cpus_empty(data.finished);
+			     cnt++)
+				mdelay(1);
+		}
+
+		if (cnt >= CALL_FUNCTION_NMI_IPI_TIMEOUT)
+			ret = -ETIMEDOUT;
+
+	} else {
+		/* If timeout value is zero, wait until cpumask has been
+		 * cleared */
+		while (!cpus_empty(data.started))
+			barrier();
+		if (wait)
+			while (!cpus_empty(data.finished))
+				barrier();
+	}
+
+	spin_unlock_irqrestore(&smp_nmi_call_lock, flags);
+	return ret;
+}
+
+/**
+ * stop_this_cpu - Callback to stop a CPU.
+ * @unused: Callback context (ignored).
+ */
+void stop_this_cpu(void *unused)
+{
+	static volatile int stopflag;
+	unsigned long flags;
+
+#ifdef CONFIG_GDBSTUB
+	/* In case of single stepping smp_send_stop by other CPU,
+	 * clear procindebug to avoid deadlock.
+	 */
+	atomic_set(&procindebug[smp_processor_id()], 0);
+#endif	/* CONFIG_GDBSTUB */
+
+	flags = arch_local_cli_save();
+	cpu_clear(smp_processor_id(), cpu_online_map);
+
+	while (!stopflag)
+		cpu_relax();
+
+	cpu_set(smp_processor_id(), cpu_online_map);
+	arch_local_irq_restore(flags);
+}
+
+/**
+ * smp_send_stop - Send a stop request to all CPUs.
+ */
+void smp_send_stop(void)
+{
+	smp_nmi_call_function(stop_this_cpu, NULL, 0);
+}
+
+/**
+ * smp_reschedule_interrupt - Reschedule IPI handler
+ * @irq: The interrupt number.
+ * @dev_id: The device ID.
+ *
+ * We need do nothing here, since the scheduling will be effected on our way
+ * back through entry.S.
+ *
+ * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
+ */
+static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
+{
+	/* do nothing */
+	return IRQ_HANDLED;
+}
+
+/**
+ * smp_call_function_interrupt - Call function IPI handler
+ * @irq: The interrupt number.
+ * @dev_id: The device ID.
+ *
+ * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
+ */
+static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
+{
+	/* generic_smp_call_function_interrupt(); */
+	generic_smp_call_function_single_interrupt();
+	return IRQ_HANDLED;
+}
+
+/**
+ * smp_nmi_call_function_interrupt - Non-maskable call function IPI handler
+ */
+void smp_nmi_call_function_interrupt(void)
+{
+	smp_call_func_t func = nmi_call_data->func;
+	void *info = nmi_call_data->info;
+	int wait = nmi_call_data->wait;
+
+	/* Notify the initiating CPU that I've grabbed the data and am about to
+	 * execute the function
+	 */
+	smp_mb();
+	cpu_clear(smp_processor_id(), nmi_call_data->started);
+	(*func)(info);
+
+	if (wait) {
+		smp_mb();
+		cpu_clear(smp_processor_id(), nmi_call_data->finished);
+	}
+}
+
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
+    defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+/**
+ * smp_ipi_timer_interrupt - Local timer IPI handler
+ * @irq: The interrupt number.
+ * @dev_id: The device ID.
+ *
+ * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
+ */
+static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id)
+{
+	return local_timer_interrupt();
+}
+#endif
+
+void __init smp_init_cpus(void)
+{
+	int i;
+	for (i = 0; i < NR_CPUS; i++) {
+		set_cpu_possible(i, true);
+		set_cpu_present(i, true);
+	}
+}
+
+/**
+ * smp_cpu_init - Initialise AP in start_secondary.
+ *
+ * For this Application Processor, set up init_mm, initialise FPU and set
+ * interrupt level 0-6 setting.
+ */
+static void __init smp_cpu_init(void)
+{
+	unsigned long flags;
+	int cpu_id = smp_processor_id();
+	u16 tmp16;
+
+	if (test_and_set_bit(cpu_id, &cpu_initialized)) {
+		printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
+		for (;;)
+			local_irq_enable();
+	}
+	printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
+
+	atomic_inc(&init_mm.mm_count);
+	current->active_mm = &init_mm;
+	BUG_ON(current->mm);
+
+	enter_lazy_tlb(&init_mm, current);
+
+	/* Force FPU initialization */
+	clear_using_fpu(current);
+
+	GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT;
+	mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
+
+	GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT;
+	mn10300_ipi_enable(LOCAL_TIMER_IPI);
+
+	GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT;
+	mn10300_ipi_enable(RESCHEDULE_IPI);
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+	GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
+	mn10300_ipi_enable(FLUSH_CACHE_IPI);
+#endif
+
+	mn10300_ipi_shutdown(SMP_BOOT_IRQ);
+
+	/* Set up the non-maskable call function IPI */
+	flags = arch_local_cli_save();
+	GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
+	tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
+	arch_local_irq_restore(flags);
+}
+
+/**
+ * smp_prepare_cpu_init - Initialise CPU in startup_secondary
+ *
+ * Set interrupt level 0-6 setting and init ICR of gdbstub.
+ */
+void smp_prepare_cpu_init(void)
+{
+	int loop;
+
+	/* Set the interrupt vector registers */
+	IVAR0 = EXCEP_IRQ_LEVEL0;
+	IVAR1 = EXCEP_IRQ_LEVEL1;
+	IVAR2 = EXCEP_IRQ_LEVEL2;
+	IVAR3 = EXCEP_IRQ_LEVEL3;
+	IVAR4 = EXCEP_IRQ_LEVEL4;
+	IVAR5 = EXCEP_IRQ_LEVEL5;
+	IVAR6 = EXCEP_IRQ_LEVEL6;
+
+	/* Disable all interrupts and set to priority 6 (lowest) */
+	for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
+		GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
+
+#ifdef CONFIG_GDBSTUB
+	/* initialise GDB-stub */
+	do {
+		unsigned long flags;
+		u16 tmp16;
+
+		flags = arch_local_cli_save();
+		GxICR(GDB_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
+		tmp16 = GxICR(GDB_NMI_IPI);
+		arch_local_irq_restore(flags);
+	} while (0);
+#endif
+}
+
+/**
+ * start_secondary - Activate a secondary CPU (AP)
+ * @unused: Thread parameter (ignored).
+ */
+int __init start_secondary(void *unused)
+{
+	smp_cpu_init();
+	smp_callin();
+	while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
+		cpu_relax();
+
+	local_flush_tlb();
+	preempt_disable();
+	smp_online();
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+	init_clockevents();
+#endif
+	cpu_idle();
+	return 0;
+}
+
+/**
+ * smp_prepare_cpus - Boot up secondary CPUs (APs)
+ * @max_cpus: Maximum number of CPUs to boot.
+ *
+ * Call do_boot_cpu, and boot up APs.
+ */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+	int phy_id;
+
+	/* Setup boot CPU information */
+	smp_store_cpu_info(0);
+	smp_tune_scheduling();
+
+	init_ipi();
+
+	/* If SMP should be disabled, then finish */
+	if (max_cpus == 0) {
+		printk(KERN_INFO "SMP mode deactivated.\n");
+		goto smp_done;
+	}
+
+	/* Boot secondary CPUs (for which phy_id > 0) */
+	for (phy_id = 0; phy_id < NR_CPUS; phy_id++) {
+		/* Don't boot primary CPU */
+		if (max_cpus <= cpucount + 1)
+			continue;
+		if (phy_id != 0)
+			do_boot_cpu(phy_id);
+		set_cpu_possible(phy_id, true);
+		smp_show_cpu_info(phy_id);
+	}
+
+smp_done:
+	Dprintk("Boot done.\n");
+}
+
+/**
+ * smp_store_cpu_info - Save a CPU's information
+ * @cpu: The CPU to save for.
+ *
+ * Save boot_cpu_data and jiffy for the specified CPU.
+ */
+static void __init smp_store_cpu_info(int cpu)
+{
+	struct mn10300_cpuinfo *ci = &cpu_data[cpu];
+
+	*ci = boot_cpu_data;
+	ci->loops_per_jiffy = loops_per_jiffy;
+	ci->type = CPUREV;
+}
+
+/**
+ * smp_tune_scheduling - Set time slice value
+ *
+ * Nothing to do here.
+ */
+static void __init smp_tune_scheduling(void)
+{
+}
+
+/**
+ * do_boot_cpu: Boot up one CPU
+ * @phy_id: Physical ID of CPU to boot.
+ *
+ * Send an IPI to a secondary CPU to boot it.  Returns 0 on success, 1
+ * otherwise.
+ */
+static int __init do_boot_cpu(int phy_id)
+{
+	struct task_struct *idle;
+	unsigned long send_status, callin_status;
+	int timeout, cpu_id;
+
+	send_status = GxICR_REQUEST;
+	callin_status = 0;
+	timeout = 0;
+	cpu_id = phy_id;
+
+	cpucount++;
+
+	/* Create idle thread for this CPU */
+	idle = fork_idle(cpu_id);
+	if (IS_ERR(idle))
+		panic("Failed fork for CPU#%d.", cpu_id);
+
+	idle->thread.pc = (unsigned long)start_secondary;
+
+	printk(KERN_NOTICE "Booting CPU#%d\n", cpu_id);
+	start_stack[cpu_id - 1] = idle->thread.sp;
+
+	task_thread_info(idle)->cpu = cpu_id;
+
+	/* Send boot IPI to AP */
+	send_IPI_mask(cpumask_of(phy_id), SMP_BOOT_IRQ);
+
+	Dprintk("Waiting for send to finish...\n");
+
+	/* Wait for AP's IPI receive in 100[ms] */
+	do {
+		udelay(1000);
+		send_status =
+			CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST;
+	} while (send_status == GxICR_REQUEST && timeout++ < 100);
+
+	Dprintk("Waiting for cpu_callin_map.\n");
+
+	if (send_status == 0) {
+		/* Allow AP to start initializing */
+		cpu_set(cpu_id, cpu_callout_map);
+
+		/* Wait for setting cpu_callin_map */
+		timeout = 0;
+		do {
+			udelay(1000);
+			callin_status = cpu_isset(cpu_id, cpu_callin_map);
+		} while (callin_status == 0 && timeout++ < 5000);
+
+		if (callin_status == 0)
+			Dprintk("Not responding.\n");
+	} else {
+		printk(KERN_WARNING "IPI not delivered.\n");
+	}
+
+	if (send_status == GxICR_REQUEST || callin_status == 0) {
+		cpu_clear(cpu_id, cpu_callout_map);
+		cpu_clear(cpu_id, cpu_callin_map);
+		cpu_clear(cpu_id, cpu_initialized);
+		cpucount--;
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * smp_show_cpu_info - Show SMP CPU information
+ * @cpu: The CPU of interest.
+ */
+static void __init smp_show_cpu_info(int cpu)
+{
+	struct mn10300_cpuinfo *ci = &cpu_data[cpu];
+
+	printk(KERN_INFO
+	       "CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n",
+	       cpu,
+	       MN10300_IOCLK / 1000000,
+	       (MN10300_IOCLK / 10000) % 100,
+	       ci->loops_per_jiffy / (500000 / HZ),
+	       (ci->loops_per_jiffy / (5000 / HZ)) % 100);
+}
+
+/**
+ * smp_callin - Set cpu_callin_map of the current CPU ID
+ */
+static void __init smp_callin(void)
+{
+	unsigned long timeout;
+	int cpu;
+
+	cpu = smp_processor_id();
+	timeout = jiffies + (2 * HZ);
+
+	if (cpu_isset(cpu, cpu_callin_map)) {
+		printk(KERN_ERR "CPU#%d already present.\n", cpu);
+		BUG();
+	}
+	Dprintk("CPU#%d waiting for CALLOUT\n", cpu);
+
+	/* Wait for AP startup 2s total */
+	while (time_before(jiffies, timeout)) {
+		if (cpu_isset(cpu, cpu_callout_map))
+			break;
+		cpu_relax();
+	}
+
+	if (!time_before(jiffies, timeout)) {
+		printk(KERN_ERR
+		       "BUG: CPU#%d started up but did not get a callout!\n",
+		       cpu);
+		BUG();
+	}
+
+#ifdef CONFIG_CALIBRATE_DELAY
+	calibrate_delay();		/* Get our bogomips */
+#endif
+
+	/* Save our processor parameters */
+	smp_store_cpu_info(cpu);
+
+	/* Allow the boot processor to continue */
+	cpu_set(cpu, cpu_callin_map);
+}
+
+/**
+ * smp_online - Set cpu_online_map
+ */
+static void __init smp_online(void)
+{
+	int cpu;
+
+	cpu = smp_processor_id();
+
+	local_irq_enable();
+
+	cpu_set(cpu, cpu_online_map);
+	smp_wmb();
+}
+
+/**
+ * smp_cpus_done -
+ * @max_cpus: Maximum CPU count.
+ *
+ * Do nothing.
+ */
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+/*
+ * smp_prepare_boot_cpu - Set up stuff for the boot processor.
+ *
+ * Set up the cpu_online_map, cpu_callout_map and cpu_callin_map of the boot
+ * processor (CPU 0).
+ */
+void __devinit smp_prepare_boot_cpu(void)
+{
+	cpu_set(0, cpu_callout_map);
+	cpu_set(0, cpu_callin_map);
+	current_thread_info()->cpu = 0;
+}
+
+/*
+ * initialize_secondary - Initialise a secondary CPU (Application Processor).
+ *
+ * Set SP register and jump to thread's PC address.
+ */
+void initialize_secondary(void)
+{
+	asm volatile (
+		"mov	%0,sp	\n"
+		"jmp	(%1)	\n"
+		:
+		: "a"(current->thread.sp), "a"(current->thread.pc));
+}
+
+/**
+ * __cpu_up - Set smp_commenced_mask for the nominated CPU
+ * @cpu: The target CPU.
+ */
+int __devinit __cpu_up(unsigned int cpu)
+{
+	int timeout;
+
+#ifdef CONFIG_HOTPLUG_CPU
+	if (num_online_cpus() == 1)
+		disable_hlt();
+	if (sleep_mode[cpu])
+		run_wakeup_cpu(cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+	cpu_set(cpu, smp_commenced_mask);
+
+	/* Wait 5s total for a response */
+	for (timeout = 0 ; timeout < 5000 ; timeout++) {
+		if (cpu_isset(cpu, cpu_online_map))
+			break;
+		udelay(1000);
+	}
+
+	BUG_ON(!cpu_isset(cpu, cpu_online_map));
+	return 0;
+}
+
+/**
+ * setup_profiling_timer - Set up the profiling timer
+ * @multiplier - The frequency multiplier to use
+ *
+ * The frequency of the profiling timer can be changed by writing a multiplier
+ * value into /proc/profile.
+ */
+int setup_profiling_timer(unsigned int multiplier)
+{
+	return -EINVAL;
+}
+
+/*
+ * CPU hotplug routines
+ */
+#ifdef CONFIG_HOTPLUG_CPU
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+static int __init topology_init(void)
+{
+	int cpu, ret;
+
+	for_each_cpu(cpu) {
+		ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
+		if (ret)
+			printk(KERN_WARNING
+			       "topology_init: register_cpu %d failed (%d)\n",
+			       cpu, ret);
+	}
+	return 0;
+}
+
+subsys_initcall(topology_init);
+
+int __cpu_disable(void)
+{
+	int cpu = smp_processor_id();
+	if (cpu == 0)
+		return -EBUSY;
+
+	migrate_irqs();
+	cpu_clear(cpu, current->active_mm->cpu_vm_mask);
+	return 0;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+	run_sleep_cpu(cpu);
+
+	if (num_online_cpus() == 1)
+		enable_hlt();
+}
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+static inline void hotplug_cpu_disable_cache(void)
+{
+	int tmp;
+	asm volatile(
+		"	movhu	(%1),%0	\n"
+		"	and	%2,%0	\n"
+		"	movhu	%0,(%1)	\n"
+		"1:	movhu	(%1),%0	\n"
+		"	btst	%3,%0	\n"
+		"	bne	1b	\n"
+		: "=&r"(tmp)
+		: "a"(&CHCTR),
+		  "i"(~(CHCTR_ICEN | CHCTR_DCEN)),
+		  "i"(CHCTR_ICBUSY | CHCTR_DCBUSY)
+		: "memory", "cc");
+}
+
+static inline void hotplug_cpu_enable_cache(void)
+{
+	int tmp;
+	asm volatile(
+		"movhu	(%1),%0	\n"
+		"or	%2,%0	\n"
+		"movhu	%0,(%1)	\n"
+		: "=&r"(tmp)
+		: "a"(&CHCTR),
+		  "i"(CHCTR_ICEN | CHCTR_DCEN)
+		: "memory", "cc");
+}
+
+static inline void hotplug_cpu_invalidate_cache(void)
+{
+	int tmp;
+	asm volatile (
+		"movhu	(%1),%0	\n"
+		"or	%2,%0	\n"
+		"movhu	%0,(%1)	\n"
+		: "=&r"(tmp)
+		: "a"(&CHCTR),
+		  "i"(CHCTR_ICINV | CHCTR_DCINV)
+		: "cc");
+}
+
+#else /* CONFIG_MN10300_CACHE_ENABLED */
+#define hotplug_cpu_disable_cache()	do {} while (0)
+#define hotplug_cpu_enable_cache()	do {} while (0)
+#define hotplug_cpu_invalidate_cache()	do {} while (0)
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+
+/**
+ * hotplug_cpu_nmi_call_function - Call a function on other CPUs for hotplug
+ * @cpumask: List of target CPUs.
+ * @func: The function to call on those CPUs.
+ * @info: The context data for the function to be called.
+ * @wait: Whether to wait for the calls to complete.
+ *
+ * Non-maskably call a function on another CPU for hotplug purposes.
+ *
+ * This function must be called with maskable interrupts disabled.
+ */
+static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,
+					 smp_call_func_t func, void *info,
+					 int wait)
+{
+	/*
+	 * The address and the size of nmi_call_func_mask_data
+	 * need to be aligned on L1_CACHE_BYTES.
+	 */
+	static struct nmi_call_data_struct nmi_call_func_mask_data
+		__cacheline_aligned;
+	unsigned long start, end;
+
+	start = (unsigned long)&nmi_call_func_mask_data;
+	end = start + sizeof(struct nmi_call_data_struct);
+
+	nmi_call_func_mask_data.func = func;
+	nmi_call_func_mask_data.info = info;
+	nmi_call_func_mask_data.started = cpumask;
+	nmi_call_func_mask_data.wait = wait;
+	if (wait)
+		nmi_call_func_mask_data.finished = cpumask;
+
+	spin_lock(&smp_nmi_call_lock);
+	nmi_call_data = &nmi_call_func_mask_data;
+	mn10300_local_dcache_flush_range(start, end);
+	smp_wmb();
+
+	send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI);
+
+	do {
+		mn10300_local_dcache_inv_range(start, end);
+		barrier();
+	} while (!cpus_empty(nmi_call_func_mask_data.started));
+
+	if (wait) {
+		do {
+			mn10300_local_dcache_inv_range(start, end);
+			barrier();
+		} while (!cpus_empty(nmi_call_func_mask_data.finished));
+	}
+
+	spin_unlock(&smp_nmi_call_lock);
+	return 0;
+}
+
+static void restart_wakeup_cpu(void)
+{
+	unsigned int cpu = smp_processor_id();
+
+	cpu_set(cpu, cpu_callin_map);
+	local_flush_tlb();
+	cpu_set(cpu, cpu_online_map);
+	smp_wmb();
+}
+
+static void prepare_sleep_cpu(void *unused)
+{
+	sleep_mode[smp_processor_id()] = 1;
+	smp_mb();
+	mn10300_local_dcache_flush_inv();
+	hotplug_cpu_disable_cache();
+	hotplug_cpu_invalidate_cache();
+}
+
+/* when this function called, IE=0, NMID=0. */
+static void sleep_cpu(void *unused)
+{
+	unsigned int cpu_id = smp_processor_id();
+	/*
+	 * CALL_FUNCTION_NMI_IPI for wakeup_cpu() shall not be requested,
+	 * before this cpu goes in SLEEP mode.
+	 */
+	do {
+		smp_mb();
+		__sleep_cpu();
+	} while (sleep_mode[cpu_id]);
+	restart_wakeup_cpu();
+}
+
+static void run_sleep_cpu(unsigned int cpu)
+{
+	unsigned long flags;
+	cpumask_t cpumask = cpumask_of(cpu);
+
+	flags = arch_local_cli_save();
+	hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);
+	hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);
+	udelay(1);		/* delay for the cpu to sleep. */
+	arch_local_irq_restore(flags);
+}
+
+static void wakeup_cpu(void)
+{
+	hotplug_cpu_invalidate_cache();
+	hotplug_cpu_enable_cache();
+	smp_mb();
+	sleep_mode[smp_processor_id()] = 0;
+}
+
+static void run_wakeup_cpu(unsigned int cpu)
+{
+	unsigned long flags;
+
+	flags = arch_local_cli_save();
+#if NR_CPUS == 2
+	mn10300_local_dcache_flush_inv();
+#else
+	/*
+	 * Before waking up the cpu,
+	 * all online cpus should stop and flush D-Cache for global data.
+	 */
+#error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y.
+#endif
+	hotplug_cpu_nmi_call_function(cpumask_of(cpu), wakeup_cpu, NULL, 1);
+	arch_local_irq_restore(flags);
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */

+ 3 - 4
arch/mn10300/kernel/switch_to.S

@@ -15,6 +15,9 @@
 #include <linux/linkage.h>
 #include <linux/linkage.h>
 #include <asm/thread_info.h>
 #include <asm/thread_info.h>
 #include <asm/cpu-regs.h>
 #include <asm/cpu-regs.h>
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+#endif /* CONFIG_SMP */
 
 
 	.text
 	.text
 
 
@@ -35,8 +38,6 @@ ENTRY(__switch_to)
 	mov	d1,a1
 	mov	d1,a1
 
 
 	# save prev context
 	# save prev context
-	mov	(__frame),d0
-	mov	d0,(THREAD_FRAME,a0)
 	mov	__switch_back,d0
 	mov	__switch_back,d0
 	mov	d0,(THREAD_PC,a0)
 	mov	d0,(THREAD_PC,a0)
 	mov	sp,a2
 	mov	sp,a2
@@ -58,8 +59,6 @@ ENTRY(__switch_to)
 	mov	a2,e2
 	mov	a2,e2
 #endif
 #endif
 
 
-	mov	(THREAD_FRAME,a1),a2
-	mov	a2,(__frame)
 	mov	(THREAD_PC,a1),a2
 	mov	(THREAD_PC,a1),a2
 	mov	d2,d0			# for ret_from_fork
 	mov	d2,d0			# for ret_from_fork
 	mov	d0,a0			# for __switch_to
 	mov	d0,a0			# for __switch_to

+ 85 - 27
arch/mn10300/kernel/time.c

@@ -17,29 +17,18 @@
 #include <linux/smp.h>
 #include <linux/smp.h>
 #include <linux/profile.h>
 #include <linux/profile.h>
 #include <linux/cnt32_to_63.h>
 #include <linux/cnt32_to_63.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
 #include <asm/irq.h>
 #include <asm/irq.h>
 #include <asm/div64.h>
 #include <asm/div64.h>
 #include <asm/processor.h>
 #include <asm/processor.h>
 #include <asm/intctl-regs.h>
 #include <asm/intctl-regs.h>
 #include <asm/rtc.h>
 #include <asm/rtc.h>
-
-#ifdef CONFIG_MN10300_RTC
-unsigned long mn10300_ioclk;		/* system I/O clock frequency */
-unsigned long mn10300_iobclk;		/* system I/O clock frequency */
-unsigned long mn10300_tsc_per_HZ;	/* number of ioclks per jiffy */
-#endif /* CONFIG_MN10300_RTC */
+#include "internal.h"
 
 
 static unsigned long mn10300_last_tsc;	/* time-stamp counter at last time
 static unsigned long mn10300_last_tsc;	/* time-stamp counter at last time
 					 * interrupt occurred */
 					 * interrupt occurred */
 
 
-static irqreturn_t timer_interrupt(int irq, void *dev_id);
-
-static struct irqaction timer_irq = {
-	.handler	= timer_interrupt,
-	.flags		= IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER,
-	.name		= "timer",
-};
-
 static unsigned long sched_clock_multiplier;
 static unsigned long sched_clock_multiplier;
 
 
 /*
 /*
@@ -54,9 +43,12 @@ unsigned long long sched_clock(void)
 	unsigned long tsc, tmp;
 	unsigned long tsc, tmp;
 	unsigned product[3]; /* 96-bit intermediate value */
 	unsigned product[3]; /* 96-bit intermediate value */
 
 
+	/* cnt32_to_63() is not safe with preemption */
+	preempt_disable();
+
 	/* read the TSC value
 	/* read the TSC value
 	 */
 	 */
-	tsc = 0 - get_cycles(); /* get_cycles() counts down */
+	tsc = get_cycles();
 
 
 	/* expand to 64-bits.
 	/* expand to 64-bits.
 	 * - sched_clock() must be called once a minute or better or the
 	 * - sched_clock() must be called once a minute or better or the
@@ -64,6 +56,8 @@ unsigned long long sched_clock(void)
 	 */
 	 */
 	tsc64.ll = cnt32_to_63(tsc) & 0x7fffffffffffffffULL;
 	tsc64.ll = cnt32_to_63(tsc) & 0x7fffffffffffffffULL;
 
 
+	preempt_enable();
+
 	/* scale the 64-bit TSC value to a nanosecond value via a 96-bit
 	/* scale the 64-bit TSC value to a nanosecond value via a 96-bit
 	 * intermediate
 	 * intermediate
 	 */
 	 */
@@ -90,6 +84,20 @@ static void __init mn10300_sched_clock_init(void)
 		__muldiv64u(NSEC_PER_SEC, 1 << 16, MN10300_TSCCLK);
 		__muldiv64u(NSEC_PER_SEC, 1 << 16, MN10300_TSCCLK);
 }
 }
 
 
+/**
+ * local_timer_interrupt - Local timer interrupt handler
+ *
+ * Handle local timer interrupts for this CPU.  They may have been propagated
+ * to this CPU from the CPU that actually gets them by way of an IPI.
+ */
+irqreturn_t local_timer_interrupt(void)
+{
+	profile_tick(CPU_PROFILING);
+	update_process_times(user_mode(get_irq_regs()));
+	return IRQ_HANDLED;
+}
+
+#ifndef CONFIG_GENERIC_TIME
 /*
 /*
  * advance the kernel's time keeping clocks (xtime and jiffies)
  * advance the kernel's time keeping clocks (xtime and jiffies)
  * - we use Timer 0 & 1 cascaded as a clock to nudge us the next time
  * - we use Timer 0 & 1 cascaded as a clock to nudge us the next time
@@ -98,27 +106,73 @@ static void __init mn10300_sched_clock_init(void)
 static irqreturn_t timer_interrupt(int irq, void *dev_id)
 static irqreturn_t timer_interrupt(int irq, void *dev_id)
 {
 {
 	unsigned tsc, elapse;
 	unsigned tsc, elapse;
+	irqreturn_t ret;
 
 
 	write_seqlock(&xtime_lock);
 	write_seqlock(&xtime_lock);
 
 
 	while (tsc = get_cycles(),
 	while (tsc = get_cycles(),
-	       elapse = mn10300_last_tsc - tsc, /* time elapsed since last
+	       elapse = tsc - mn10300_last_tsc, /* time elapsed since last
 						 * tick */
 						 * tick */
 	       elapse > MN10300_TSC_PER_HZ
 	       elapse > MN10300_TSC_PER_HZ
 	       ) {
 	       ) {
-		mn10300_last_tsc -= MN10300_TSC_PER_HZ;
+		mn10300_last_tsc += MN10300_TSC_PER_HZ;
 
 
 		/* advance the kernel's time tracking system */
 		/* advance the kernel's time tracking system */
-		profile_tick(CPU_PROFILING);
 		do_timer(1);
 		do_timer(1);
 	}
 	}
 
 
 	write_sequnlock(&xtime_lock);
 	write_sequnlock(&xtime_lock);
 
 
-	update_process_times(user_mode(get_irq_regs()));
+	ret = local_timer_interrupt();
+#ifdef CONFIG_SMP
+	send_IPI_allbutself(LOCAL_TIMER_IPI);
+#endif
+	return ret;
+}
 
 
-	return IRQ_HANDLED;
+static struct irqaction timer_irq = {
+	.handler	= timer_interrupt,
+	.flags		= IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER,
+	.name		= "timer",
+};
+#endif /* CONFIG_GENERIC_TIME */
+
+#ifdef CONFIG_CSRC_MN10300
+void __init clocksource_set_clock(struct clocksource *cs, unsigned int clock)
+{
+	u64 temp;
+	u32 shift;
+
+	/* Find a shift value */
+	for (shift = 32; shift > 0; shift--) {
+		temp = (u64) NSEC_PER_SEC << shift;
+		do_div(temp, clock);
+		if ((temp >> 32) == 0)
+			break;
+	}
+	cs->shift = shift;
+	cs->mult = (u32) temp;
 }
 }
+#endif
+
+#if CONFIG_CEVT_MN10300
+void __cpuinit clockevent_set_clock(struct clock_event_device *cd,
+				    unsigned int clock)
+{
+	u64 temp;
+	u32 shift;
+
+	/* Find a shift value */
+	for (shift = 32; shift > 0; shift--) {
+		temp = (u64) clock << shift;
+		do_div(temp, NSEC_PER_SEC);
+		if ((temp >> 32) == 0)
+			break;
+	}
+	cd->shift = shift;
+	cd->mult = (u32) temp;
+}
+#endif
 
 
 /*
 /*
  * initialise the various timers used by the main part of the kernel
  * initialise the various timers used by the main part of the kernel
@@ -131,21 +185,25 @@ void __init time_init(void)
 	 */
 	 */
 	TMPSCNT |= TMPSCNT_ENABLE;
 	TMPSCNT |= TMPSCNT_ENABLE;
 
 
+#ifdef CONFIG_GENERIC_TIME
+	init_clocksource();
+#else
 	startup_timestamp_counter();
 	startup_timestamp_counter();
+#endif
 
 
 	printk(KERN_INFO
 	printk(KERN_INFO
 	       "timestamp counter I/O clock running at %lu.%02lu"
 	       "timestamp counter I/O clock running at %lu.%02lu"
 	       " (calibrated against RTC)\n",
 	       " (calibrated against RTC)\n",
 	       MN10300_TSCCLK / 1000000, (MN10300_TSCCLK / 10000) % 100);
 	       MN10300_TSCCLK / 1000000, (MN10300_TSCCLK / 10000) % 100);
 
 
-	mn10300_last_tsc = TMTSCBC;
-
-	/* use timer 0 & 1 cascaded to tick at as close to HZ as possible */
-	setup_irq(TMJCIRQ, &timer_irq);
+	mn10300_last_tsc = read_timestamp_counter();
 
 
-	set_intr_level(TMJCIRQ, TMJCICR_LEVEL);
-
-	startup_jiffies_counter();
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+	init_clockevents();
+#else
+	reload_jiffies_counter(MN10300_JC_PER_HZ - 1);
+	setup_jiffies_interrupt(TMJCIRQ, &timer_irq, CONFIG_TIMER_IRQ_LEVEL);
+#endif
 
 
 #ifdef CONFIG_MN10300_WD_TIMER
 #ifdef CONFIG_MN10300_WD_TIMER
 	/* start the watchdog timer */
 	/* start the watchdog timer */

+ 13 - 29
arch/mn10300/kernel/traps.c

@@ -45,9 +45,6 @@
 #error "INTERRUPT_VECTOR_BASE not aligned to 16MiB boundary!"
 #error "INTERRUPT_VECTOR_BASE not aligned to 16MiB boundary!"
 #endif
 #endif
 
 
-struct pt_regs *__frame; /* current frame pointer */
-EXPORT_SYMBOL(__frame);
-
 int kstack_depth_to_print = 24;
 int kstack_depth_to_print = 24;
 
 
 spinlock_t die_lock = __SPIN_LOCK_UNLOCKED(die_lock);
 spinlock_t die_lock = __SPIN_LOCK_UNLOCKED(die_lock);
@@ -101,7 +98,6 @@ DO_EINFO(SIGILL,  {}, "invalid opcode",		invalid_op,	ILL_ILLOPC);
 DO_EINFO(SIGILL,  {}, "invalid ex opcode",	invalid_exop,	ILL_ILLOPC);
 DO_EINFO(SIGILL,  {}, "invalid ex opcode",	invalid_exop,	ILL_ILLOPC);
 DO_EINFO(SIGBUS,  {}, "invalid address",	mem_error,	BUS_ADRERR);
 DO_EINFO(SIGBUS,  {}, "invalid address",	mem_error,	BUS_ADRERR);
 DO_EINFO(SIGBUS,  {}, "bus error",		bus_error,	BUS_ADRERR);
 DO_EINFO(SIGBUS,  {}, "bus error",		bus_error,	BUS_ADRERR);
-DO_EINFO(SIGILL,  {}, "FPU invalid opcode", 	fpu_invalid_op,	ILL_COPROC);
 
 
 DO_ERROR(SIGTRAP,
 DO_ERROR(SIGTRAP,
 #ifndef CONFIG_MN10300_USING_JTAG
 #ifndef CONFIG_MN10300_USING_JTAG
@@ -222,11 +218,14 @@ void show_registers_only(struct pt_regs *regs)
 	printk(KERN_EMERG "threadinfo=%p task=%p)\n",
 	printk(KERN_EMERG "threadinfo=%p task=%p)\n",
 	       current_thread_info(), current);
 	       current_thread_info(), current);
 
 
-	if ((unsigned long) current >= 0x90000000UL &&
-	    (unsigned long) current < 0x94000000UL)
+	if ((unsigned long) current >= PAGE_OFFSET &&
+	    (unsigned long) current < (unsigned long)high_memory)
 		printk(KERN_EMERG "Process %s (pid: %d)\n",
 		printk(KERN_EMERG "Process %s (pid: %d)\n",
 		       current->comm, current->pid);
 		       current->comm, current->pid);
 
 
+#ifdef CONFIG_SMP
+	printk(KERN_EMERG "CPUID:  %08x\n", CPUID);
+#endif
 	printk(KERN_EMERG "CPUP:   %04hx\n", CPUP);
 	printk(KERN_EMERG "CPUP:   %04hx\n", CPUP);
 	printk(KERN_EMERG "TBR:    %08x\n", TBR);
 	printk(KERN_EMERG "TBR:    %08x\n", TBR);
 	printk(KERN_EMERG "DEAR:   %08x\n", DEAR);
 	printk(KERN_EMERG "DEAR:   %08x\n", DEAR);
@@ -522,8 +521,12 @@ void __init set_intr_stub(enum exception_code code, void *handler)
 {
 {
 	unsigned long addr;
 	unsigned long addr;
 	u8 *vector = (u8 *)(CONFIG_INTERRUPT_VECTOR_BASE + code);
 	u8 *vector = (u8 *)(CONFIG_INTERRUPT_VECTOR_BASE + code);
+	unsigned long flags;
 
 
 	addr = (unsigned long) handler - (unsigned long) vector;
 	addr = (unsigned long) handler - (unsigned long) vector;
+
+	flags = arch_local_cli_save();
+
 	vector[0] = 0xdc;		/* JMP handler */
 	vector[0] = 0xdc;		/* JMP handler */
 	vector[1] = addr;
 	vector[1] = addr;
 	vector[2] = addr >> 8;
 	vector[2] = addr >> 8;
@@ -533,30 +536,12 @@ void __init set_intr_stub(enum exception_code code, void *handler)
 	vector[6] = 0xcb;
 	vector[6] = 0xcb;
 	vector[7] = 0xcb;
 	vector[7] = 0xcb;
 
 
-	mn10300_dcache_flush_inv();
-	mn10300_icache_inv();
-}
-
-/*
- * set an interrupt stub to invoke the JTAG unit and then jump to a handler
- */
-void __init set_jtag_stub(enum exception_code code, void *handler)
-{
-	unsigned long addr;
-	u8 *vector = (u8 *)(CONFIG_INTERRUPT_VECTOR_BASE + code);
-
-	addr = (unsigned long) handler - ((unsigned long) vector + 1);
-	vector[0] = 0xff;		/* PI to jump into JTAG debugger */
-	vector[1] = 0xdc;		/* jmp handler */
-	vector[2] = addr;
-	vector[3] = addr >> 8;
-	vector[4] = addr >> 16;
-	vector[5] = addr >> 24;
-	vector[6] = 0xcb;
-	vector[7] = 0xcb;
+	arch_local_irq_restore(flags);
 
 
+#ifndef CONFIG_MN10300_CACHE_SNOOP
 	mn10300_dcache_flush_inv();
 	mn10300_dcache_flush_inv();
-	flush_icache_range((unsigned long) vector, (unsigned long) vector + 8);
+	mn10300_icache_inv();
+#endif
 }
 }
 
 
 /*
 /*
@@ -581,7 +566,6 @@ void __init trap_init(void)
 	set_excp_vector(EXCEP_PRIVINSACC,	insn_acc_error);
 	set_excp_vector(EXCEP_PRIVINSACC,	insn_acc_error);
 	set_excp_vector(EXCEP_PRIVDATACC,	data_acc_error);
 	set_excp_vector(EXCEP_PRIVDATACC,	data_acc_error);
 	set_excp_vector(EXCEP_DATINSACC,	insn_acc_error);
 	set_excp_vector(EXCEP_DATINSACC,	insn_acc_error);
-	set_excp_vector(EXCEP_FPU_DISABLED,	fpu_disabled);
 	set_excp_vector(EXCEP_FPU_UNIMPINS,	fpu_invalid_op);
 	set_excp_vector(EXCEP_FPU_UNIMPINS,	fpu_invalid_op);
 	set_excp_vector(EXCEP_FPU_OPERATION,	fpu_exception);
 	set_excp_vector(EXCEP_FPU_OPERATION,	fpu_exception);
 
 

+ 2 - 2
arch/mn10300/lib/bitops.c

@@ -15,7 +15,7 @@
 /*
 /*
  * try flipping a bit using BSET and BCLR
  * try flipping a bit using BSET and BCLR
  */
  */
-void change_bit(int nr, volatile void *addr)
+void change_bit(unsigned long nr, volatile void *addr)
 {
 {
 	if (test_bit(nr, addr))
 	if (test_bit(nr, addr))
 		goto try_clear_bit;
 		goto try_clear_bit;
@@ -34,7 +34,7 @@ try_clear_bit:
 /*
 /*
  * try flipping a bit using BSET and BCLR and returning the old value
  * try flipping a bit using BSET and BCLR and returning the old value
  */
  */
-int test_and_change_bit(int nr, volatile void *addr)
+int test_and_change_bit(unsigned long nr, volatile void *addr)
 {
 {
 	if (test_bit(nr, addr))
 	if (test_bit(nr, addr))
 		goto try_clear_bit;
 		goto try_clear_bit;

+ 4 - 4
arch/mn10300/lib/delay.c

@@ -38,14 +38,14 @@ EXPORT_SYMBOL(__delay);
  */
  */
 void __udelay(unsigned long usecs)
 void __udelay(unsigned long usecs)
 {
 {
-	signed long ioclk, stop;
+	unsigned long start, stop, cnt;
 
 
 	/* usecs * CLK / 1E6 */
 	/* usecs * CLK / 1E6 */
 	stop = __muldiv64u(usecs, MN10300_TSCCLK, 1000000);
 	stop = __muldiv64u(usecs, MN10300_TSCCLK, 1000000);
-	stop = TMTSCBC - stop;
+	start = TMTSCBC;
 
 
 	do {
 	do {
-		ioclk = TMTSCBC;
-	} while (stop < ioclk);
+		cnt = start - TMTSCBC;
+	} while (cnt < stop);
 }
 }
 EXPORT_SYMBOL(__udelay);
 EXPORT_SYMBOL(__udelay);

+ 22 - 27
arch/mn10300/lib/do_csum.S

@@ -10,26 +10,25 @@
  */
  */
 #include <asm/cache.h>
 #include <asm/cache.h>
 
 
-        .section .text
-        .balign	L1_CACHE_BYTES
+	.section .text
+	.balign	L1_CACHE_BYTES
 
 
 ###############################################################################
 ###############################################################################
 #
 #
-# unsigned int do_csum(const unsigned char *buff, size_t len)
+# unsigned int do_csum(const unsigned char *buff, int len)
 #
 #
 ###############################################################################
 ###############################################################################
 	.globl	do_csum
 	.globl	do_csum
-        .type	do_csum,@function
+	.type	do_csum,@function
 do_csum:
 do_csum:
 	movm	[d2,d3],(sp)
 	movm	[d2,d3],(sp)
-	mov	d0,(12,sp)
-	mov	d1,(16,sp)
 	mov	d1,d2				# count
 	mov	d1,d2				# count
 	mov	d0,a0				# buff
 	mov	d0,a0				# buff
+	mov	a0,a1
 	clr	d1				# accumulator
 	clr	d1				# accumulator
 
 
 	cmp	+0,d2
 	cmp	+0,d2
-	beq	do_csum_done			# return if zero-length buffer
+	ble	do_csum_done			# check for zero length or negative
 
 
 	# 4-byte align the buffer pointer
 	# 4-byte align the buffer pointer
 	btst	+3,a0
 	btst	+3,a0
@@ -41,17 +40,15 @@ do_csum:
 	inc	a0
 	inc	a0
 	asl	+8,d0
 	asl	+8,d0
 	add	d0,d1
 	add	d0,d1
-	addc	+0,d1
 	add	-1,d2
 	add	-1,d2
-do_csum_addr_not_odd:
 
 
+do_csum_addr_not_odd:
 	cmp	+2,d2
 	cmp	+2,d2
 	bcs	do_csum_fewer_than_4
 	bcs	do_csum_fewer_than_4
 	btst	+2,a0
 	btst	+2,a0
 	beq	do_csum_now_4b_aligned
 	beq	do_csum_now_4b_aligned
 	movhu	(a0+),d0
 	movhu	(a0+),d0
 	add	d0,d1
 	add	d0,d1
-	addc	+0,d1
 	add	-2,d2
 	add	-2,d2
 	cmp	+4,d2
 	cmp	+4,d2
 	bcs	do_csum_fewer_than_4
 	bcs	do_csum_fewer_than_4
@@ -66,20 +63,20 @@ do_csum_now_4b_aligned:
 
 
 do_csum_loop:
 do_csum_loop:
 	mov	(a0+),d0
 	mov	(a0+),d0
-	add	d0,d1
 	mov	(a0+),e0
 	mov	(a0+),e0
-	addc	e0,d1
 	mov	(a0+),e1
 	mov	(a0+),e1
-	addc	e1,d1
 	mov	(a0+),e3
 	mov	(a0+),e3
+	add	d0,d1
+	addc	e0,d1
+	addc	e1,d1
 	addc	e3,d1
 	addc	e3,d1
 	mov	(a0+),d0
 	mov	(a0+),d0
-	addc	d0,d1
 	mov	(a0+),e0
 	mov	(a0+),e0
-	addc	e0,d1
 	mov	(a0+),e1
 	mov	(a0+),e1
-	addc	e1,d1
 	mov	(a0+),e3
 	mov	(a0+),e3
+	addc	d0,d1
+	addc	e0,d1
+	addc	e1,d1
 	addc	e3,d1
 	addc	e3,d1
 	addc	+0,d1
 	addc	+0,d1
 
 
@@ -94,12 +91,12 @@ do_csum_remainder:
 	cmp	+16,d2
 	cmp	+16,d2
 	bcs	do_csum_fewer_than_16
 	bcs	do_csum_fewer_than_16
 	mov	(a0+),d0
 	mov	(a0+),d0
-	add	d0,d1
 	mov	(a0+),e0
 	mov	(a0+),e0
-	addc	e0,d1
 	mov	(a0+),e1
 	mov	(a0+),e1
-	addc	e1,d1
 	mov	(a0+),e3
 	mov	(a0+),e3
+	add	d0,d1
+	addc	e0,d1
+	addc	e1,d1
 	addc	e3,d1
 	addc	e3,d1
 	addc	+0,d1
 	addc	+0,d1
 	add	-16,d2
 	add	-16,d2
@@ -131,9 +128,9 @@ do_csum_fewer_than_4:
 	xor_cmp	d0,d0,+2,d2
 	xor_cmp	d0,d0,+2,d2
 	bcs	do_csum_fewer_than_2
 	bcs	do_csum_fewer_than_2
 	movhu	(a0+),d0
 	movhu	(a0+),d0
-do_csum_fewer_than_2:
 	and	+1,d2
 	and	+1,d2
 	beq	do_csum_add_last_bit
 	beq	do_csum_add_last_bit
+do_csum_fewer_than_2:
 	movbu	(a0),d3
 	movbu	(a0),d3
 	add	d3,d0
 	add	d3,d0
 do_csum_add_last_bit:
 do_csum_add_last_bit:
@@ -142,21 +139,19 @@ do_csum_add_last_bit:
 
 
 do_csum_done:
 do_csum_done:
 	# compress the checksum down to 16 bits
 	# compress the checksum down to 16 bits
-	mov	+0xffff0000,d2
-	and	d1,d2
+	mov	+0xffff0000,d0
+	and	d1,d0
 	asl	+16,d1
 	asl	+16,d1
-	add	d2,d1,d0
+	add	d1,d0
 	addc	+0xffff,d0
 	addc	+0xffff,d0
 	lsr	+16,d0
 	lsr	+16,d0
 
 
 	# flip the halves of the word result if the buffer was oddly aligned
 	# flip the halves of the word result if the buffer was oddly aligned
-	mov	(12,sp),d1
-	and	+1,d1
+	and	+1,a1
 	beq	do_csum_not_oddly_aligned
 	beq	do_csum_not_oddly_aligned
 	swaph	d0,d0				# exchange bits 15:8 with 7:0
 	swaph	d0,d0				# exchange bits 15:8 with 7:0
 
 
 do_csum_not_oddly_aligned:
 do_csum_not_oddly_aligned:
 	ret	[d2,d3],8
 	ret	[d2,d3],8
 
 
-do_csum_end:
-	.size	do_csum, do_csum_end-do_csum
+	.size	do_csum, .-do_csum

+ 101 - 0
arch/mn10300/mm/Kconfig.cache

@@ -0,0 +1,101 @@
+#
+# MN10300 CPU cache options
+#
+
+choice
+	prompt "CPU Caching mode"
+	default MN10300_CACHE_WBACK
+	help
+	  This option determines the caching mode for the kernel.
+
+	  Write-Back caching mode involves the all reads and writes causing
+	  the affected cacheline to be read into the cache first before being
+	  operated upon. Memory is not then updated by a write until the cache
+	  is filled and a cacheline needs to be displaced from the cache to
+	  make room. Only at that point is it written back.
+
+	  Write-Through caching only fetches cachelines from memory on a
+	  read. Writes always get written directly to memory. If the affected
+	  cacheline is also in cache, it will be updated too.
+
+	  The final option is to turn of caching entirely.
+
+config MN10300_CACHE_WBACK
+	bool "Write-Back"
+	help
+	  The dcache operates in delayed write-back mode.  It must be manually
+	  flushed if writes are made that subsequently need to be executed or
+	  to be DMA'd by a device.
+
+config MN10300_CACHE_WTHRU
+	bool "Write-Through"
+	help
+	  The dcache operates in immediate write-through mode.  Writes are
+	  committed to RAM immediately in addition to being stored in the
+	  cache.  This means that the written data is immediately available for
+	  execution or DMA.
+
+	  This is not available for use with an SMP kernel if cache flushing
+	  and invalidation by automatic purge register is not selected.
+
+config MN10300_CACHE_DISABLED
+	bool "Disabled"
+	help
+	  The icache and dcache are disabled.
+
+endchoice
+
+config MN10300_CACHE_ENABLED
+	def_bool y if !MN10300_CACHE_DISABLED
+
+
+choice
+	prompt "CPU cache flush/invalidate method"
+	default MN10300_CACHE_MANAGE_BY_TAG if !AM34_2
+	default MN10300_CACHE_MANAGE_BY_REG if AM34_2
+	depends on MN10300_CACHE_ENABLED
+	help
+	  This determines the method by which CPU cache flushing and
+	  invalidation is performed.
+
+config MN10300_CACHE_MANAGE_BY_TAG
+	bool "Use the cache tag registers directly"
+	depends on !(SMP && MN10300_CACHE_WTHRU)
+
+config MN10300_CACHE_MANAGE_BY_REG
+	bool "Flush areas by way of automatic purge registers (AM34 only)"
+	depends on AM34_2
+
+endchoice
+
+config MN10300_CACHE_INV_BY_TAG
+	def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_ENABLED
+
+config MN10300_CACHE_INV_BY_REG
+	def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_ENABLED
+
+config MN10300_CACHE_FLUSH_BY_TAG
+	def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_WBACK
+
+config MN10300_CACHE_FLUSH_BY_REG
+	def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_WBACK
+
+
+config MN10300_HAS_CACHE_SNOOP
+	def_bool n
+
+config MN10300_CACHE_SNOOP
+	bool "Use CPU Cache Snooping"
+	depends on MN10300_CACHE_ENABLED && MN10300_HAS_CACHE_SNOOP
+	default y
+
+config MN10300_CACHE_FLUSH_ICACHE
+	def_bool y if MN10300_CACHE_WBACK && !MN10300_CACHE_SNOOP
+	help
+	  Set if we need the dcache flushing before the icache is invalidated.
+
+config MN10300_CACHE_INV_ICACHE
+	def_bool y if MN10300_CACHE_WTHRU && !MN10300_CACHE_SNOOP
+	help
+	  Set if we need the icache to be invalidated, even if the dcache is in
+	  write-through mode and doesn't need flushing.

+ 12 - 2
arch/mn10300/mm/Makefile

@@ -2,11 +2,21 @@
 # Makefile for the MN10300-specific memory management code
 # Makefile for the MN10300-specific memory management code
 #
 #
 
 
-cacheflush-y	:= cache.o cache-mn10300.o
-cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o
+cache-smp-wback-$(CONFIG_MN10300_CACHE_WBACK) := cache-smp-flush.o
+
+cacheflush-y	:= cache.o
+cacheflush-$(CONFIG_SMP) += cache-smp.o cache-smp-inv.o $(cache-smp-wback-y)
+cacheflush-$(CONFIG_MN10300_CACHE_INV_ICACHE) += cache-inv-icache.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_ICACHE) += cache-flush-icache.o
+cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_TAG) += cache-inv-by-tag.o
+cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_REG) += cache-inv-by-reg.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_TAG) += cache-flush-by-tag.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_REG) += cache-flush-by-reg.o
 
 
 cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
 cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
 
 
 obj-y := \
 obj-y := \
 	init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
 	init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
 	misalignment.o dma-alloc.o $(cacheflush-y)
 	misalignment.o dma-alloc.o $(cacheflush-y)
+
+obj-$(CONFIG_SMP) += tlb-smp.o

+ 308 - 0
arch/mn10300/mm/cache-flush-by-reg.S

@@ -0,0 +1,308 @@
+/* MN10300 CPU core caching routines, using indirect regs on cache controller
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+
+	.am33_2
+
+#ifndef CONFIG_SMP
+	.globl mn10300_dcache_flush
+	.globl mn10300_dcache_flush_page
+	.globl mn10300_dcache_flush_range
+	.globl mn10300_dcache_flush_range2
+	.globl mn10300_dcache_flush_inv
+	.globl mn10300_dcache_flush_inv_page
+	.globl mn10300_dcache_flush_inv_range
+	.globl mn10300_dcache_flush_inv_range2
+
+mn10300_dcache_flush		= mn10300_local_dcache_flush
+mn10300_dcache_flush_page	= mn10300_local_dcache_flush_page
+mn10300_dcache_flush_range	= mn10300_local_dcache_flush_range
+mn10300_dcache_flush_range2	= mn10300_local_dcache_flush_range2
+mn10300_dcache_flush_inv	= mn10300_local_dcache_flush_inv
+mn10300_dcache_flush_inv_page	= mn10300_local_dcache_flush_inv_page
+mn10300_dcache_flush_inv_range	= mn10300_local_dcache_flush_inv_range
+mn10300_dcache_flush_inv_range2	= mn10300_local_dcache_flush_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush(void)
+# Flush the entire data cache back to RAM
+#
+###############################################################################
+	ALIGN
+	.globl	mn10300_local_dcache_flush
+        .type	mn10300_local_dcache_flush,@function
+mn10300_local_dcache_flush:
+	movhu	(CHCTR),d0
+	btst	CHCTR_DCEN,d0
+	beq	mn10300_local_dcache_flush_end
+
+	mov	DCPGCR,a0
+
+	LOCAL_CLI_SAVE(d1)
+
+	# wait for busy bit of area purge
+	setlb
+	mov	(a0),d0
+	btst	DCPGCR_DCPGBSY,d0
+	lne
+
+	# set mask
+	clr	d0
+	mov	d0,(DCPGMR)
+
+	# area purge
+	#
+	# DCPGCR = DCPGCR_DCP
+	#
+	mov	DCPGCR_DCP,d0
+	mov	d0,(a0)
+
+	# wait for busy bit of area purge
+	setlb
+	mov	(a0),d0
+	btst	DCPGCR_DCPGBSY,d0
+	lne
+
+	LOCAL_IRQ_RESTORE(d1)
+
+mn10300_local_dcache_flush_end:
+	ret	[],0
+	.size	mn10300_local_dcache_flush,.-mn10300_local_dcache_flush
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_page(unsigned long start)
+# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size)
+# Flush a range of addresses on a page in the dcache
+#
+###############################################################################
+	ALIGN
+	.globl	mn10300_local_dcache_flush_page
+	.globl	mn10300_local_dcache_flush_range
+	.globl	mn10300_local_dcache_flush_range2
+	.type	mn10300_local_dcache_flush_page,@function
+	.type	mn10300_local_dcache_flush_range,@function
+	.type	mn10300_local_dcache_flush_range2,@function
+mn10300_local_dcache_flush_page:
+	and	~(PAGE_SIZE-1),d0
+	mov	PAGE_SIZE,d1
+mn10300_local_dcache_flush_range2:
+	add	d0,d1
+mn10300_local_dcache_flush_range:
+	movm	[d2,d3,a2],(sp)
+
+	movhu	(CHCTR),d2
+	btst	CHCTR_DCEN,d2
+	beq	mn10300_local_dcache_flush_range_end
+
+	# calculate alignsize
+	#
+	# alignsize = L1_CACHE_BYTES;
+	# for (i = (end - start - 1) / L1_CACHE_BYTES ;  i > 0; i >>= 1)
+	#     alignsize <<= 1;
+	# d2 = alignsize;
+	#
+	mov	L1_CACHE_BYTES,d2
+	sub	d0,d1,d3
+	add	-1,d3
+	lsr	L1_CACHE_SHIFT,d3
+	beq	2f
+1:
+	add     d2,d2
+	lsr     1,d3
+	bne     1b
+2:
+	mov	d1,a1		# a1 = end
+
+	LOCAL_CLI_SAVE(d3)
+	mov	DCPGCR,a0
+
+	# wait for busy bit of area purge
+	setlb
+	mov	(a0),d1
+	btst	DCPGCR_DCPGBSY,d1
+	lne
+
+	# determine the mask
+	mov	d2,d1
+	add	-1,d1
+	not	d1		# d1 = mask = ~(alignsize-1)
+	mov	d1,(DCPGMR)
+
+	and	d1,d0,a2	# a2 = mask & start
+
+dcpgloop:
+	# area purge
+	mov	a2,d0
+	or	DCPGCR_DCP,d0
+	mov	d0,(a0)		# DCPGCR = (mask & start) | DCPGCR_DCP
+
+	# wait for busy bit of area purge
+	setlb
+	mov	(a0),d1
+	btst	DCPGCR_DCPGBSY,d1
+	lne
+
+	# check purge of end address
+	add	d2,a2		# a2 += alignsize
+	cmp	a1,a2		# if (a2 < end) goto dcpgloop
+	bns	dcpgloop
+
+	LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_flush_range_end:
+	ret	[d2,d3,a2],12
+
+	.size	mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page
+	.size	mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range
+	.size	mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv(void)
+# Flush the entire data cache and invalidate all entries
+#
+###############################################################################
+	ALIGN
+	.globl	mn10300_local_dcache_flush_inv
+	.type	mn10300_local_dcache_flush_inv,@function
+mn10300_local_dcache_flush_inv:
+	movhu	(CHCTR),d0
+	btst	CHCTR_DCEN,d0
+	beq	mn10300_local_dcache_flush_inv_end
+
+	mov	DCPGCR,a0
+
+	LOCAL_CLI_SAVE(d1)
+
+	# wait for busy bit of area purge & invalidate
+	setlb
+	mov	(a0),d0
+	btst	DCPGCR_DCPGBSY,d0
+	lne
+
+	# set the mask to cover everything
+	clr	d0
+	mov	d0,(DCPGMR)
+
+	# area purge & invalidate
+	mov	DCPGCR_DCP|DCPGCR_DCI,d0
+	mov	d0,(a0)
+
+	# wait for busy bit of area purge & invalidate
+	setlb
+	mov	(a0),d0
+	btst	DCPGCR_DCPGBSY,d0
+	lne
+
+	LOCAL_IRQ_RESTORE(d1)
+
+mn10300_local_dcache_flush_inv_end:
+	ret	[],0
+	.size	mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv_page(unsigned long start)
+# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+# Flush and invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+	ALIGN
+	.globl	mn10300_local_dcache_flush_inv_page
+	.globl	mn10300_local_dcache_flush_inv_range
+	.globl	mn10300_local_dcache_flush_inv_range2
+	.type	mn10300_local_dcache_flush_inv_page,@function
+	.type	mn10300_local_dcache_flush_inv_range,@function
+	.type	mn10300_local_dcache_flush_inv_range2,@function
+mn10300_local_dcache_flush_inv_page:
+	and	~(PAGE_SIZE-1),d0
+	mov	PAGE_SIZE,d1
+mn10300_local_dcache_flush_inv_range2:
+	add	d0,d1
+mn10300_local_dcache_flush_inv_range:
+	movm	[d2,d3,a2],(sp)
+
+	movhu	(CHCTR),d2
+	btst	CHCTR_DCEN,d2
+	beq	mn10300_local_dcache_flush_inv_range_end
+
+	# calculate alignsize
+	#
+	# alignsize = L1_CACHE_BYTES;
+	# for (i = (end - start - 1) / L1_CACHE_BYTES; i > 0; i >>= 1)
+	#     alignsize <<= 1;
+	# d2 = alignsize
+	#
+	mov	L1_CACHE_BYTES,d2
+	sub	d0,d1,d3
+	add	-1,d3
+	lsr	L1_CACHE_SHIFT,d3
+	beq	2f
+1:
+	add     d2,d2
+	lsr     1,d3
+	bne     1b
+2:
+	mov	d1,a1		# a1 = end
+
+	LOCAL_CLI_SAVE(d3)
+	mov	DCPGCR,a0
+
+	# wait for busy bit of area purge & invalidate
+	setlb
+	mov	(a0),d1
+	btst	DCPGCR_DCPGBSY,d1
+	lne
+
+	# set the mask
+	mov	d2,d1
+	add	-1,d1
+	not	d1		# d1 = mask = ~(alignsize-1)
+	mov	d1,(DCPGMR)
+
+	and	d1,d0,a2	# a2 = mask & start
+
+dcpgivloop:
+	# area purge & invalidate
+	mov	a2,d0
+	or	DCPGCR_DCP|DCPGCR_DCI,d0
+	mov	d0,(a0)		# DCPGCR = (mask & start)|DCPGCR_DCP|DCPGCR_DCI
+
+	# wait for busy bit of area purge & invalidate
+	setlb
+	mov	(a0),d1
+	btst	DCPGCR_DCPGBSY,d1
+	lne
+
+	# check purge & invalidate of end address
+	add	d2,a2		# a2 += alignsize
+	cmp	a1,a2		# if (a2 < end) goto dcpgivloop
+	bns	dcpgivloop
+
+	LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_flush_inv_range_end:
+	ret	[d2,d3,a2],12
+	.size	mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page
+	.size	mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range
+	.size	mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2

+ 251 - 0
arch/mn10300/mm/cache-flush-by-tag.S

@@ -0,0 +1,251 @@
+/* MN10300 CPU core caching routines, using direct tag flushing
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+
+	.am33_2
+
+#ifndef CONFIG_SMP
+	.globl mn10300_dcache_flush
+	.globl mn10300_dcache_flush_page
+	.globl mn10300_dcache_flush_range
+	.globl mn10300_dcache_flush_range2
+	.globl mn10300_dcache_flush_inv
+	.globl mn10300_dcache_flush_inv_page
+	.globl mn10300_dcache_flush_inv_range
+	.globl mn10300_dcache_flush_inv_range2
+
+mn10300_dcache_flush		= mn10300_local_dcache_flush
+mn10300_dcache_flush_page	= mn10300_local_dcache_flush_page
+mn10300_dcache_flush_range	= mn10300_local_dcache_flush_range
+mn10300_dcache_flush_range2	= mn10300_local_dcache_flush_range2
+mn10300_dcache_flush_inv	= mn10300_local_dcache_flush_inv
+mn10300_dcache_flush_inv_page	= mn10300_local_dcache_flush_inv_page
+mn10300_dcache_flush_inv_range	= mn10300_local_dcache_flush_inv_range
+mn10300_dcache_flush_inv_range2	= mn10300_local_dcache_flush_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush(void)
+# Flush the entire data cache back to RAM
+#
+###############################################################################
+	ALIGN
+	.globl	mn10300_local_dcache_flush
+        .type	mn10300_local_dcache_flush,@function
+mn10300_local_dcache_flush:
+	movhu	(CHCTR),d0
+	btst	CHCTR_DCEN,d0
+	beq	mn10300_local_dcache_flush_end
+
+	# read the addresses tagged in the cache's tag RAM and attempt to flush
+	# those addresses specifically
+	# - we rely on the hardware to filter out invalid tag entry addresses
+	mov	DCACHE_TAG(0,0),a0		# dcache tag RAM access address
+	mov	DCACHE_PURGE(0,0),a1		# dcache purge request address
+	mov	L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1  # total number of entries
+
+mn10300_local_dcache_flush_loop:
+	mov	(a0),d0
+	and	L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
+	or	L1_CACHE_TAG_VALID,d0		# retain valid entries in the
+						# cache
+	mov	d0,(a1)				# conditional purge
+
+	add	L1_CACHE_BYTES,a0
+	add	L1_CACHE_BYTES,a1
+	add	-1,d1
+	bne	mn10300_local_dcache_flush_loop
+
+mn10300_local_dcache_flush_end:
+	ret	[],0
+	.size	mn10300_local_dcache_flush,.-mn10300_local_dcache_flush
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_page(unsigned long start)
+# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size)
+# Flush a range of addresses on a page in the dcache
+#
+###############################################################################
+	ALIGN
+	.globl	mn10300_local_dcache_flush_page
+	.globl	mn10300_local_dcache_flush_range
+	.globl	mn10300_local_dcache_flush_range2
+	.type	mn10300_local_dcache_flush_page,@function
+	.type	mn10300_local_dcache_flush_range,@function
+	.type	mn10300_local_dcache_flush_range2,@function
+mn10300_local_dcache_flush_page:
+	and	~(PAGE_SIZE-1),d0
+	mov	PAGE_SIZE,d1
+mn10300_local_dcache_flush_range2:
+	add	d0,d1
+mn10300_local_dcache_flush_range:
+	movm	[d2],(sp)
+
+	movhu	(CHCTR),d2
+	btst	CHCTR_DCEN,d2
+	beq	mn10300_local_dcache_flush_range_end
+
+	sub	d0,d1,a0
+	cmp	MN10300_DCACHE_FLUSH_BORDER,a0
+	ble	1f
+
+	movm	(sp),[d2]
+	bra	mn10300_local_dcache_flush
+1:
+
+	# round start addr down
+	and	L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
+	mov	d0,a1
+
+	add	L1_CACHE_BYTES,d1			# round end addr up
+	and	L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+	# write a request to flush all instances of an address from the cache
+	mov	DCACHE_PURGE(0,0),a0
+	mov	a1,d0
+	and	L1_CACHE_TAG_ENTRY,d0
+	add	d0,a0				# starting dcache purge control
+						# reg address
+
+	sub	a1,d1
+	lsr	L1_CACHE_SHIFT,d1		# total number of entries to
+						# examine
+
+	or	L1_CACHE_TAG_VALID,a1		# retain valid entries in the
+						# cache
+
+mn10300_local_dcache_flush_range_loop:
+	mov	a1,(L1_CACHE_WAYDISP*0,a0)	# conditionally purge this line
+						# all ways
+
+	add	L1_CACHE_BYTES,a0
+	add	L1_CACHE_BYTES,a1
+	and	~L1_CACHE_WAYDISP,a0		# make sure way stay on way 0
+	add	-1,d1
+	bne	mn10300_local_dcache_flush_range_loop
+
+mn10300_local_dcache_flush_range_end:
+	ret	[d2],4
+
+	.size	mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page
+	.size	mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range
+	.size	mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv(void)
+# Flush the entire data cache and invalidate all entries
+#
+###############################################################################
+	ALIGN
+	.globl	mn10300_local_dcache_flush_inv
+	.type	mn10300_local_dcache_flush_inv,@function
+mn10300_local_dcache_flush_inv:
+	movhu	(CHCTR),d0
+	btst	CHCTR_DCEN,d0
+	beq	mn10300_local_dcache_flush_inv_end
+
+	mov	L1_CACHE_NENTRIES,d1
+	clr	a1
+
+mn10300_local_dcache_flush_inv_loop:
+	mov	(DCACHE_PURGE_WAY0(0),a1),d0	# unconditional purge
+	mov	(DCACHE_PURGE_WAY1(0),a1),d0	# unconditional purge
+	mov	(DCACHE_PURGE_WAY2(0),a1),d0	# unconditional purge
+	mov	(DCACHE_PURGE_WAY3(0),a1),d0	# unconditional purge
+
+	add	L1_CACHE_BYTES,a1
+	add	-1,d1
+	bne	mn10300_local_dcache_flush_inv_loop
+
+mn10300_local_dcache_flush_inv_end:
+	ret	[],0
+	.size	mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv_page(unsigned long start)
+# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+# Flush and invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+	ALIGN
+	.globl	mn10300_local_dcache_flush_inv_page
+	.globl	mn10300_local_dcache_flush_inv_range
+	.globl	mn10300_local_dcache_flush_inv_range2
+	.type	mn10300_local_dcache_flush_inv_page,@function
+	.type	mn10300_local_dcache_flush_inv_range,@function
+	.type	mn10300_local_dcache_flush_inv_range2,@function
+mn10300_local_dcache_flush_inv_page:
+	and	~(PAGE_SIZE-1),d0
+	mov	PAGE_SIZE,d1
+mn10300_local_dcache_flush_inv_range2:
+	add	d0,d1
+mn10300_local_dcache_flush_inv_range:
+	movm	[d2],(sp)
+
+	movhu	(CHCTR),d2
+	btst	CHCTR_DCEN,d2
+	beq	mn10300_local_dcache_flush_inv_range_end
+
+	sub	d0,d1,a0
+	cmp	MN10300_DCACHE_FLUSH_INV_BORDER,a0
+	ble	1f
+
+	movm	(sp),[d2]
+	bra	mn10300_local_dcache_flush_inv
+1:
+
+	and	L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0	# round start
+								# addr down
+	mov	d0,a1
+
+	add	L1_CACHE_BYTES,d1			# round end addr up
+	and	L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+	# write a request to flush and invalidate all instances of an address
+	# from the cache
+	mov	DCACHE_PURGE(0,0),a0
+	mov	a1,d0
+	and	L1_CACHE_TAG_ENTRY,d0
+	add	d0,a0				# starting dcache purge control
+						# reg address
+
+	sub	a1,d1
+	lsr	L1_CACHE_SHIFT,d1		# total number of entries to
+						# examine
+
+mn10300_local_dcache_flush_inv_range_loop:
+	mov	a1,(L1_CACHE_WAYDISP*0,a0)	# conditionally purge this line
+						# in all ways
+
+	add	L1_CACHE_BYTES,a0
+	add	L1_CACHE_BYTES,a1
+	and	~L1_CACHE_WAYDISP,a0		# make sure way stay on way 0
+	add	-1,d1
+	bne	mn10300_local_dcache_flush_inv_range_loop
+
+mn10300_local_dcache_flush_inv_range_end:
+	ret	[d2],4
+	.size	mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page
+	.size	mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range
+	.size	mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2

+ 155 - 0
arch/mn10300/mm/cache-flush-icache.c

@@ -0,0 +1,155 @@
+/* Flush dcache and invalidate icache when the dcache is in writeback mode
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+/**
+ * flush_icache_page - Flush a page from the dcache and invalidate the icache
+ * @vma: The VMA the page is part of.
+ * @page: The page to be flushed.
+ *
+ * Write a page back from the dcache and invalidate the icache so that we can
+ * run code from it that we've just written into it
+ */
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+	unsigned long start = page_to_phys(page);
+	unsigned long flags;
+
+	flags = smp_lock_cache();
+
+	mn10300_local_dcache_flush_page(start);
+	mn10300_local_icache_inv_page(start);
+
+	smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, start + PAGE_SIZE);
+	smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_page);
+
+/**
+ * flush_icache_page_range - Flush dcache and invalidate icache for part of a
+ *				single page
+ * @start: The starting virtual address of the page part.
+ * @end: The ending virtual address of the page part.
+ *
+ * Flush the dcache and invalidate the icache for part of a single page, as
+ * determined by the virtual addresses given.  The page must be in the paged
+ * area.
+ */
+static void flush_icache_page_range(unsigned long start, unsigned long end)
+{
+	unsigned long addr, size, off;
+	struct page *page;
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *ppte, pte;
+
+	/* work out how much of the page to flush */
+	off = start & ~PAGE_MASK;
+	size = end - start;
+
+	/* get the physical address the page is mapped to from the page
+	 * tables */
+	pgd = pgd_offset(current->mm, start);
+	if (!pgd || !pgd_val(*pgd))
+		return;
+
+	pud = pud_offset(pgd, start);
+	if (!pud || !pud_val(*pud))
+		return;
+
+	pmd = pmd_offset(pud, start);
+	if (!pmd || !pmd_val(*pmd))
+		return;
+
+	ppte = pte_offset_map(pmd, start);
+	if (!ppte)
+		return;
+	pte = *ppte;
+	pte_unmap(ppte);
+
+	if (pte_none(pte))
+		return;
+
+	page = pte_page(pte);
+	if (!page)
+		return;
+
+	addr = page_to_phys(page);
+
+	/* flush the dcache and invalidate the icache coverage on that
+	 * region */
+	mn10300_local_dcache_flush_range2(addr + off, size);
+	mn10300_local_icache_inv_range2(addr + off, size);
+	smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end);
+}
+
+/**
+ * flush_icache_range - Globally flush dcache and invalidate icache for region
+ * @start: The starting virtual address of the region.
+ * @end: The ending virtual address of the region.
+ *
+ * This is used by the kernel to globally flush some code it has just written
+ * from the dcache back to RAM and then to globally invalidate the icache over
+ * that region so that that code can be run on all CPUs in the system.
+ */
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+	unsigned long start_page, end_page;
+	unsigned long flags;
+
+	flags = smp_lock_cache();
+
+	if (end > 0x80000000UL) {
+		/* addresses above 0xa0000000 do not go through the cache */
+		if (end > 0xa0000000UL) {
+			end = 0xa0000000UL;
+			if (start >= end)
+				goto done;
+		}
+
+		/* kernel addresses between 0x80000000 and 0x9fffffff do not
+		 * require page tables, so we just map such addresses
+		 * directly */
+		start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
+		mn10300_local_dcache_flush_range(start_page, end);
+		mn10300_local_icache_inv_range(start_page, end);
+		smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start_page, end);
+		if (start_page == start)
+			goto done;
+		end = start_page;
+	}
+
+	start_page = start & PAGE_MASK;
+	end_page = (end - 1) & PAGE_MASK;
+
+	if (start_page == end_page) {
+		/* the first and last bytes are on the same page */
+		flush_icache_page_range(start, end);
+	} else if (start_page + 1 == end_page) {
+		/* split over two virtually contiguous pages */
+		flush_icache_page_range(start, end_page);
+		flush_icache_page_range(end_page, end);
+	} else {
+		/* more than 2 pages; just flush the entire cache */
+		mn10300_dcache_flush();
+		mn10300_icache_inv();
+		smp_cache_call(SMP_IDCACHE_INV_FLUSH, 0, 0);
+	}
+
+done:
+	smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);

+ 0 - 192
arch/mn10300/mm/cache-flush-mn10300.S

@@ -1,192 +0,0 @@
-/* MN10300 CPU core caching routines
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#include <linux/sys.h>
-#include <linux/linkage.h>
-#include <asm/smp.h>
-#include <asm/page.h>
-#include <asm/cache.h>
-
-	.am33_2
-	.globl mn10300_dcache_flush
-	.globl mn10300_dcache_flush_page
-	.globl mn10300_dcache_flush_range
-	.globl mn10300_dcache_flush_range2
-	.globl mn10300_dcache_flush_inv
-	.globl mn10300_dcache_flush_inv_page
-	.globl mn10300_dcache_flush_inv_range
-	.globl mn10300_dcache_flush_inv_range2
-
-###############################################################################
-#
-# void mn10300_dcache_flush(void)
-# Flush the entire data cache back to RAM
-#
-###############################################################################
-	ALIGN
-mn10300_dcache_flush:
-	movhu	(CHCTR),d0
-	btst	CHCTR_DCEN,d0
-	beq	mn10300_dcache_flush_end
-
-	# read the addresses tagged in the cache's tag RAM and attempt to flush
-	# those addresses specifically
-	# - we rely on the hardware to filter out invalid tag entry addresses
-	mov	DCACHE_TAG(0,0),a0		# dcache tag RAM access address
-	mov	DCACHE_PURGE(0,0),a1		# dcache purge request address
-	mov	L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1  # total number of entries
-
-mn10300_dcache_flush_loop:
-	mov	(a0),d0
-	and	L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
-	or	L1_CACHE_TAG_VALID,d0		# retain valid entries in the
-						# cache
-	mov	d0,(a1)				# conditional purge
-
-mn10300_dcache_flush_skip:
-	add	L1_CACHE_BYTES,a0
-	add	L1_CACHE_BYTES,a1
-	add	-1,d1
-	bne	mn10300_dcache_flush_loop
-
-mn10300_dcache_flush_end:
-	ret	[],0
-
-###############################################################################
-#
-# void mn10300_dcache_flush_page(unsigned start)
-# void mn10300_dcache_flush_range(unsigned start, unsigned end)
-# void mn10300_dcache_flush_range2(unsigned start, unsigned size)
-# Flush a range of addresses on a page in the dcache
-#
-###############################################################################
-	ALIGN
-mn10300_dcache_flush_page:
-	mov	PAGE_SIZE,d1
-mn10300_dcache_flush_range2:
-	add	d0,d1
-mn10300_dcache_flush_range:
-	movm	[d2,d3],(sp)
-
-	movhu	(CHCTR),d2
-	btst	CHCTR_DCEN,d2
-	beq	mn10300_dcache_flush_range_end
-
-	# round start addr down
-	and	L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
-	mov	d0,a1
-
-	add	L1_CACHE_BYTES,d1			# round end addr up
-	and	L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
-	# write a request to flush all instances of an address from the cache
-	mov	DCACHE_PURGE(0,0),a0
-	mov	a1,d0
-	and	L1_CACHE_TAG_ENTRY,d0
-	add	d0,a0				# starting dcache purge control
-						# reg address
-
-	sub	a1,d1
-	lsr	L1_CACHE_SHIFT,d1		# total number of entries to
-						# examine
-
-	or	L1_CACHE_TAG_VALID,a1		# retain valid entries in the
-						# cache
-
-mn10300_dcache_flush_range_loop:
-	mov	a1,(L1_CACHE_WAYDISP*0,a0)	# conditionally purge this line
-						# all ways
-
-	add	L1_CACHE_BYTES,a0
-	add	L1_CACHE_BYTES,a1
-	and	~L1_CACHE_WAYDISP,a0		# make sure way stay on way 0
-	add	-1,d1
-	bne	mn10300_dcache_flush_range_loop
-
-mn10300_dcache_flush_range_end:
-	ret	[d2,d3],8
-
-###############################################################################
-#
-# void mn10300_dcache_flush_inv(void)
-# Flush the entire data cache and invalidate all entries
-#
-###############################################################################
-	ALIGN
-mn10300_dcache_flush_inv:
-	movhu	(CHCTR),d0
-	btst	CHCTR_DCEN,d0
-	beq	mn10300_dcache_flush_inv_end
-
-	# hit each line in the dcache with an unconditional purge
-	mov	DCACHE_PURGE(0,0),a1		# dcache purge request address
-	mov	L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1  # total number of entries
-
-mn10300_dcache_flush_inv_loop:
-	mov	(a1),d0				# unconditional purge
-
-	add	L1_CACHE_BYTES,a1
-	add	-1,d1
-	bne	mn10300_dcache_flush_inv_loop
-
-mn10300_dcache_flush_inv_end:
-	ret	[],0
-
-###############################################################################
-#
-# void mn10300_dcache_flush_inv_page(unsigned start)
-# void mn10300_dcache_flush_inv_range(unsigned start, unsigned end)
-# void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size)
-# Flush and invalidate a range of addresses on a page in the dcache
-#
-###############################################################################
-	ALIGN
-mn10300_dcache_flush_inv_page:
-	mov	PAGE_SIZE,d1
-mn10300_dcache_flush_inv_range2:
-	add	d0,d1
-mn10300_dcache_flush_inv_range:
-	movm	[d2,d3],(sp)
-	movhu	(CHCTR),d2
-	btst	CHCTR_DCEN,d2
-	beq	mn10300_dcache_flush_inv_range_end
-
-	and	L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0	# round start
-								# addr down
-	mov	d0,a1
-
-	add	L1_CACHE_BYTES,d1			# round end addr up
-	and	L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
-	# write a request to flush and invalidate all instances of an address
-	# from the cache
-	mov	DCACHE_PURGE(0,0),a0
-	mov	a1,d0
-	and	L1_CACHE_TAG_ENTRY,d0
-	add	d0,a0				# starting dcache purge control
-						# reg address
-
-	sub	a1,d1
-	lsr	L1_CACHE_SHIFT,d1		# total number of entries to
-						# examine
-
-mn10300_dcache_flush_inv_range_loop:
-	mov	a1,(L1_CACHE_WAYDISP*0,a0)	# conditionally purge this line
-						# in all ways
-
-	add	L1_CACHE_BYTES,a0
-	add	L1_CACHE_BYTES,a1
-	and	~L1_CACHE_WAYDISP,a0		# make sure way stay on way 0
-	add	-1,d1
-	bne	mn10300_dcache_flush_inv_range_loop
-
-mn10300_dcache_flush_inv_range_end:
-	ret	[d2,d3],8

+ 356 - 0
arch/mn10300/mm/cache-inv-by-reg.S

@@ -0,0 +1,356 @@
+/* MN10300 CPU cache invalidation routines, using automatic purge registers
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+#include <asm/cacheflush.h>
+
+#define mn10300_local_dcache_inv_range_intr_interval \
+	+((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
+
+#if mn10300_local_dcache_inv_range_intr_interval > 0xff
+#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
+#endif
+
+	.am33_2
+
+#ifndef CONFIG_SMP
+	.globl	mn10300_icache_inv
+	.globl	mn10300_icache_inv_page
+	.globl	mn10300_icache_inv_range
+	.globl	mn10300_icache_inv_range2
+	.globl	mn10300_dcache_inv
+	.globl	mn10300_dcache_inv_page
+	.globl	mn10300_dcache_inv_range
+	.globl	mn10300_dcache_inv_range2
+
+mn10300_icache_inv		= mn10300_local_icache_inv
+mn10300_icache_inv_page		= mn10300_local_icache_inv_page
+mn10300_icache_inv_range	= mn10300_local_icache_inv_range
+mn10300_icache_inv_range2	= mn10300_local_icache_inv_range2
+mn10300_dcache_inv		= mn10300_local_dcache_inv
+mn10300_dcache_inv_page		= mn10300_local_dcache_inv_page
+mn10300_dcache_inv_range	= mn10300_local_dcache_inv_range
+mn10300_dcache_inv_range2	= mn10300_local_dcache_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_icache_inv(void)
+# Invalidate the entire icache
+#
+###############################################################################
+	ALIGN
+	.globl	mn10300_local_icache_inv
+        .type	mn10300_local_icache_inv,@function
+mn10300_local_icache_inv:
+	mov	CHCTR,a0
+
+	movhu	(a0),d0
+	btst	CHCTR_ICEN,d0
+	beq	mn10300_local_icache_inv_end
+
+	# invalidate
+	or	CHCTR_ICINV,d0
+	movhu	d0,(a0)
+	movhu	(a0),d0
+
+mn10300_local_icache_inv_end:
+	ret	[],0
+	.size	mn10300_local_icache_inv,.-mn10300_local_icache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv(void)
+# Invalidate the entire dcache
+#
+###############################################################################
+	ALIGN
+	.globl	mn10300_local_dcache_inv
+	.type	mn10300_local_dcache_inv,@function
+mn10300_local_dcache_inv:
+	mov	CHCTR,a0
+
+	movhu	(a0),d0
+	btst	CHCTR_DCEN,d0
+	beq	mn10300_local_dcache_inv_end
+
+	# invalidate
+	or	CHCTR_DCINV,d0
+	movhu	d0,(a0)
+	movhu	(a0),d0
+
+mn10300_local_dcache_inv_end:
+	ret	[],0
+	.size	mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_dcache_inv_page(unsigned long start)
+# Invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+	ALIGN
+	.globl	mn10300_local_dcache_inv_page
+	.globl	mn10300_local_dcache_inv_range
+	.globl	mn10300_local_dcache_inv_range2
+	.type	mn10300_local_dcache_inv_page,@function
+	.type	mn10300_local_dcache_inv_range,@function
+	.type	mn10300_local_dcache_inv_range2,@function
+mn10300_local_dcache_inv_page:
+	and	~(PAGE_SIZE-1),d0
+	mov	PAGE_SIZE,d1
+mn10300_local_dcache_inv_range2:
+	add	d0,d1
+mn10300_local_dcache_inv_range:
+	# If we are in writeback mode we check the start and end alignments,
+	# and if they're not cacheline-aligned, we must flush any bits outside
+	# the range that share cachelines with stuff inside the range
+#ifdef CONFIG_MN10300_CACHE_WBACK
+	btst	~(L1_CACHE_BYTES-1),d0
+	bne	1f
+	btst	~(L1_CACHE_BYTES-1),d1
+	beq	2f
+1:
+	bra	mn10300_local_dcache_flush_inv_range
+2:
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+	movm	[d2,d3,a2],(sp)
+
+	mov	CHCTR,a0
+	movhu	(a0),d2
+	btst	CHCTR_DCEN,d2
+	beq	mn10300_local_dcache_inv_range_end
+
+	# round the addresses out to be full cachelines, unless we're in
+	# writeback mode, in which case we would be in flush and invalidate by
+	# now
+#ifndef CONFIG_MN10300_CACHE_WBACK
+	and	L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0	# round start
+								# addr down
+
+	mov	L1_CACHE_BYTES-1,d2
+	add	d2,d1
+	and	L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1	# round end addr up
+#endif /* !CONFIG_MN10300_CACHE_WBACK */
+
+	sub	d0,d1,d2		# calculate the total size
+	mov	d0,a2			# A2 = start address
+	mov	d1,a1			# A1 = end address
+
+	LOCAL_CLI_SAVE(d3)
+
+	mov	DCPGCR,a0		# make sure the purger isn't busy
+	setlb
+	mov	(a0),d0
+	btst	DCPGCR_DCPGBSY,d0
+	lne
+
+	# skip initial address alignment calculation if address is zero
+	mov	d2,d1
+	cmp	0,a2
+	beq	1f
+
+dcivloop:
+	/* calculate alignsize
+	 *
+	 * alignsize = L1_CACHE_BYTES;
+	 * while (! start & alignsize) {
+	 *	alignsize <<=1;
+	 * }
+	 * d1 = alignsize;
+	 */
+	mov	L1_CACHE_BYTES,d1
+	lsr	1,d1
+	setlb
+	add	d1,d1
+	mov	d1,d0
+	and	a2,d0
+	leq
+
+1:
+	/* calculate invsize
+	 *
+	 * if (totalsize > alignsize) {
+	 *	invsize = alignsize;
+	 * } else {
+	 *	invsize = totalsize;
+	 *	tmp = 0x80000000;
+	 *	while (! invsize & tmp) {
+	 *		tmp >>= 1;
+	 *	}
+	 *	invsize = tmp;
+	 * }
+	 * d1 = invsize
+	 */
+	cmp	d2,d1
+	bns	2f
+	mov	d2,d1
+
+	mov	0x80000000,d0		# start from 31bit=1
+	setlb
+	lsr	1,d0
+	mov	d0,e0
+	and	d1,e0
+	leq
+	mov	d0,d1
+
+2:
+	/* set mask
+	 *
+	 * mask = ~(invsize-1);
+	 * DCPGMR = mask;
+	 */
+	mov	d1,d0
+	add	-1,d0
+	not	d0
+	mov	d0,(DCPGMR)
+
+	# invalidate area
+	mov	a2,d0
+	or	DCPGCR_DCI,d0
+	mov	d0,(a0)			# DCPGCR = (mask & start) | DCPGCR_DCI
+
+	setlb				# wait for the purge to complete
+	mov	(a0),d0
+	btst	DCPGCR_DCPGBSY,d0
+	lne
+
+	sub	d1,d2			# decrease size remaining
+	add	d1,a2			# increase next start address
+
+	/* check invalidating of end address
+	 *
+	 * a2 = a2 + invsize
+	 * if (a2 < end) {
+	 *     goto dcivloop;
+	 * } */
+	cmp	a1,a2
+	bns	dcivloop
+
+	LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_inv_range_end:
+	ret	[d2,d3,a2],12
+	.size	mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
+	.size	mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
+	.size	mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2
+
+###############################################################################
+#
+# void mn10300_local_icache_inv_page(unsigned long start)
+# void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_icache_inv_range(unsigned long start, unsigned long end)
+# Invalidate a range of addresses on a page in the icache
+#
+###############################################################################
+	ALIGN
+	.globl	mn10300_local_icache_inv_page
+	.globl	mn10300_local_icache_inv_range
+	.globl	mn10300_local_icache_inv_range2
+	.type	mn10300_local_icache_inv_page,@function
+	.type	mn10300_local_icache_inv_range,@function
+	.type	mn10300_local_icache_inv_range2,@function
+mn10300_local_icache_inv_page:
+	and	~(PAGE_SIZE-1),d0
+	mov	PAGE_SIZE,d1
+mn10300_local_icache_inv_range2:
+	add	d0,d1
+mn10300_local_icache_inv_range:
+	movm	[d2,d3,a2],(sp)
+
+	mov	CHCTR,a0
+	movhu	(a0),d2
+	btst	CHCTR_ICEN,d2
+	beq	mn10300_local_icache_inv_range_reg_end
+
+	/* calculate alignsize
+	 *
+	 * alignsize = L1_CACHE_BYTES;
+	 * for (i = (end - start - 1) / L1_CACHE_BYTES ;  i > 0; i >>= 1) {
+	 *     alignsize <<= 1;
+	 * }
+	 * d2 = alignsize;
+	 */
+	mov	L1_CACHE_BYTES,d2
+	sub	d0,d1,d3
+	add	-1,d3
+	lsr	L1_CACHE_SHIFT,d3
+	beq	2f
+1:
+	add	d2,d2
+	lsr	1,d3
+	bne	1b
+2:
+
+	/* a1 = end */
+	mov	d1,a1
+
+	LOCAL_CLI_SAVE(d3)
+
+	mov	ICIVCR,a0
+	/* wait for busy bit of area invalidation */
+	setlb
+	mov	(a0),d1
+	btst	ICIVCR_ICIVBSY,d1
+	lne
+
+	/* set mask
+	 *
+	 * mask = ~(alignsize-1);
+	 * ICIVMR = mask;
+	 */
+	mov	d2,d1
+	add	-1,d1
+	not	d1
+	mov	d1,(ICIVMR)
+	/* a2 = mask & start */
+	and	d1,d0,a2
+
+icivloop:
+	/* area invalidate
+	 *
+	 * ICIVCR = (mask & start) | ICIVCR_ICI
+	 */
+	mov	a2,d0
+	or	ICIVCR_ICI,d0
+	mov	d0,(a0)
+
+	/* wait for busy bit of area invalidation */
+	setlb
+	mov	(a0),d1
+	btst	ICIVCR_ICIVBSY,d1
+	lne
+
+	/* check invalidating of end address
+	 *
+	 * a2 = a2 + alignsize
+	 * if (a2 < end) {
+	 *     goto icivloop;
+	 * } */
+	add	d2,a2
+	cmp	a1,a2
+	bns	icivloop
+
+	LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_icache_inv_range_reg_end:
+	ret	[d2,d3,a2],12
+	.size	mn10300_local_icache_inv_page,.-mn10300_local_icache_inv_page
+	.size	mn10300_local_icache_inv_range,.-mn10300_local_icache_inv_range
+	.size	mn10300_local_icache_inv_range2,.-mn10300_local_icache_inv_range2

+ 348 - 0
arch/mn10300/mm/cache-inv-by-tag.S

@@ -0,0 +1,348 @@
+/* MN10300 CPU core caching routines
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+#include <asm/cacheflush.h>
+
+#define mn10300_local_dcache_inv_range_intr_interval \
+	+((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
+
+#if mn10300_local_dcache_inv_range_intr_interval > 0xff
+#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
+#endif
+
+	.am33_2
+
+	.globl	mn10300_local_icache_inv_page
+	.globl	mn10300_local_icache_inv_range
+	.globl	mn10300_local_icache_inv_range2
+
+mn10300_local_icache_inv_page	= mn10300_local_icache_inv
+mn10300_local_icache_inv_range	= mn10300_local_icache_inv
+mn10300_local_icache_inv_range2	= mn10300_local_icache_inv
+
+#ifndef CONFIG_SMP
+	.globl	mn10300_icache_inv
+	.globl	mn10300_icache_inv_page
+	.globl	mn10300_icache_inv_range
+	.globl	mn10300_icache_inv_range2
+	.globl	mn10300_dcache_inv
+	.globl	mn10300_dcache_inv_page
+	.globl	mn10300_dcache_inv_range
+	.globl	mn10300_dcache_inv_range2
+
+mn10300_icache_inv		= mn10300_local_icache_inv
+mn10300_icache_inv_page		= mn10300_local_icache_inv_page
+mn10300_icache_inv_range	= mn10300_local_icache_inv_range
+mn10300_icache_inv_range2	= mn10300_local_icache_inv_range2
+mn10300_dcache_inv		= mn10300_local_dcache_inv
+mn10300_dcache_inv_page		= mn10300_local_dcache_inv_page
+mn10300_dcache_inv_range	= mn10300_local_dcache_inv_range
+mn10300_dcache_inv_range2	= mn10300_local_dcache_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_icache_inv(void)
+# Invalidate the entire icache
+#
+###############################################################################
+	ALIGN
+	.globl	mn10300_local_icache_inv
+        .type	mn10300_local_icache_inv,@function
+mn10300_local_icache_inv:
+	mov	CHCTR,a0
+
+	movhu	(a0),d0
+	btst	CHCTR_ICEN,d0
+	beq	mn10300_local_icache_inv_end
+
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+	LOCAL_CLI_SAVE(d1)
+
+	# disable the icache
+	and	~CHCTR_ICEN,d0
+	movhu	d0,(a0)
+
+	# and wait for it to calm down
+	setlb
+	movhu	(a0),d0
+	btst	CHCTR_ICBUSY,d0
+	lne
+
+	# invalidate
+	or	CHCTR_ICINV,d0
+	movhu	d0,(a0)
+
+	# wait for the cache to finish
+	mov	CHCTR,a0
+	setlb
+	movhu	(a0),d0
+	btst	CHCTR_ICBUSY,d0
+	lne
+
+	# and reenable it
+	and	~CHCTR_ICINV,d0
+	or	CHCTR_ICEN,d0
+	movhu	d0,(a0)
+	movhu	(a0),d0
+
+	LOCAL_IRQ_RESTORE(d1)
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+	# invalidate
+	or	CHCTR_ICINV,d0
+	movhu	d0,(a0)
+	movhu	(a0),d0
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+
+mn10300_local_icache_inv_end:
+	ret	[],0
+	.size	mn10300_local_icache_inv,.-mn10300_local_icache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv(void)
+# Invalidate the entire dcache
+#
+###############################################################################
+	ALIGN
+	.globl	mn10300_local_dcache_inv
+	.type	mn10300_local_dcache_inv,@function
+mn10300_local_dcache_inv:
+	mov	CHCTR,a0
+
+	movhu	(a0),d0
+	btst	CHCTR_DCEN,d0
+	beq	mn10300_local_dcache_inv_end
+
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+	LOCAL_CLI_SAVE(d1)
+
+	# disable the dcache
+	and	~CHCTR_DCEN,d0
+	movhu	d0,(a0)
+
+	# and wait for it to calm down
+	setlb
+	movhu	(a0),d0
+	btst	CHCTR_DCBUSY,d0
+	lne
+
+	# invalidate
+	or	CHCTR_DCINV,d0
+	movhu	d0,(a0)
+
+	# wait for the cache to finish
+	mov	CHCTR,a0
+	setlb
+	movhu	(a0),d0
+	btst	CHCTR_DCBUSY,d0
+	lne
+
+	# and reenable it
+	and	~CHCTR_DCINV,d0
+	or	CHCTR_DCEN,d0
+	movhu	d0,(a0)
+	movhu	(a0),d0
+
+	LOCAL_IRQ_RESTORE(d1)
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+	# invalidate
+	or	CHCTR_DCINV,d0
+	movhu	d0,(a0)
+	movhu	(a0),d0
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+
+mn10300_local_dcache_inv_end:
+	ret	[],0
+	.size	mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_dcache_inv_page(unsigned long start)
+# Invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+	ALIGN
+	.globl	mn10300_local_dcache_inv_page
+	.globl	mn10300_local_dcache_inv_range
+	.globl	mn10300_local_dcache_inv_range2
+	.type	mn10300_local_dcache_inv_page,@function
+	.type	mn10300_local_dcache_inv_range,@function
+	.type	mn10300_local_dcache_inv_range2,@function
+mn10300_local_dcache_inv_page:
+	and	~(PAGE_SIZE-1),d0
+	mov	PAGE_SIZE,d1
+mn10300_local_dcache_inv_range2:
+	add	d0,d1
+mn10300_local_dcache_inv_range:
+	# If we are in writeback mode we check the start and end alignments,
+	# and if they're not cacheline-aligned, we must flush any bits outside
+	# the range that share cachelines with stuff inside the range
+#ifdef CONFIG_MN10300_CACHE_WBACK
+	btst	~(L1_CACHE_BYTES-1),d0
+	bne	1f
+	btst	~(L1_CACHE_BYTES-1),d1
+	beq	2f
+1:
+	bra	mn10300_local_dcache_flush_inv_range
+2:
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+	movm	[d2,d3,a2],(sp)
+
+	mov	CHCTR,a2
+	movhu	(a2),d2
+	btst	CHCTR_DCEN,d2
+	beq	mn10300_local_dcache_inv_range_end
+
+#ifndef CONFIG_MN10300_CACHE_WBACK
+	and	L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0	# round start
+								# addr down
+
+	add	L1_CACHE_BYTES,d1		# round end addr up
+	and	L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+#endif /* !CONFIG_MN10300_CACHE_WBACK */
+	mov	d0,a1
+
+	clr	d2				# we're going to clear tag RAM
+						# entries
+
+	# read the tags from the tag RAM, and if they indicate a valid dirty
+	# cache line then invalidate that line
+	mov	DCACHE_TAG(0,0),a0
+	mov	a1,d0
+	and	L1_CACHE_TAG_ENTRY,d0
+	add	d0,a0				# starting dcache tag RAM
+						# access address
+
+	sub	a1,d1
+	lsr	L1_CACHE_SHIFT,d1		# total number of entries to
+						# examine
+
+	and	~(L1_CACHE_DISPARITY-1),a1	# determine comparator base
+
+mn10300_local_dcache_inv_range_outer_loop:
+	LOCAL_CLI_SAVE(d3)
+
+	# disable the dcache
+	movhu	(a2),d0
+	and	~CHCTR_DCEN,d0
+	movhu	d0,(a2)
+
+	# and wait for it to calm down
+	setlb
+	movhu	(a2),d0
+	btst	CHCTR_DCBUSY,d0
+	lne
+
+mn10300_local_dcache_inv_range_loop:
+
+	# process the way 0 slot
+	mov	(L1_CACHE_WAYDISP*0,a0),d0	# read the tag in the way 0 slot
+	btst	L1_CACHE_TAG_VALID,d0
+	beq	mn10300_local_dcache_inv_range_skip_0	# jump if this cacheline
+						# is not valid
+
+	xor	a1,d0
+	lsr	12,d0
+	bne	mn10300_local_dcache_inv_range_skip_0	# jump if not this cacheline
+
+	mov	d2,(L1_CACHE_WAYDISP*0,a0)	# kill the tag
+
+mn10300_local_dcache_inv_range_skip_0:
+
+	# process the way 1 slot
+	mov	(L1_CACHE_WAYDISP*1,a0),d0	# read the tag in the way 1 slot
+	btst	L1_CACHE_TAG_VALID,d0
+	beq	mn10300_local_dcache_inv_range_skip_1	# jump if this cacheline
+						# is not valid
+
+	xor	a1,d0
+	lsr	12,d0
+	bne	mn10300_local_dcache_inv_range_skip_1	# jump if not this cacheline
+
+	mov	d2,(L1_CACHE_WAYDISP*1,a0)	# kill the tag
+
+mn10300_local_dcache_inv_range_skip_1:
+
+	# process the way 2 slot
+	mov	(L1_CACHE_WAYDISP*2,a0),d0	# read the tag in the way 2 slot
+	btst	L1_CACHE_TAG_VALID,d0
+	beq	mn10300_local_dcache_inv_range_skip_2	# jump if this cacheline
+						# is not valid
+
+	xor	a1,d0
+	lsr	12,d0
+	bne	mn10300_local_dcache_inv_range_skip_2	# jump if not this cacheline
+
+	mov	d2,(L1_CACHE_WAYDISP*2,a0)	# kill the tag
+
+mn10300_local_dcache_inv_range_skip_2:
+
+	# process the way 3 slot
+	mov	(L1_CACHE_WAYDISP*3,a0),d0	# read the tag in the way 3 slot
+	btst	L1_CACHE_TAG_VALID,d0
+	beq	mn10300_local_dcache_inv_range_skip_3	# jump if this cacheline
+						# is not valid
+
+	xor	a1,d0
+	lsr	12,d0
+	bne	mn10300_local_dcache_inv_range_skip_3	# jump if not this cacheline
+
+	mov	d2,(L1_CACHE_WAYDISP*3,a0)	# kill the tag
+
+mn10300_local_dcache_inv_range_skip_3:
+
+	# approx every N steps we re-enable the cache and see if there are any
+	# interrupts to be processed
+	# we also break out if we've reached the end of the loop
+	# (the bottom nibble of the count is zero in both cases)
+	add	L1_CACHE_BYTES,a0
+	add	L1_CACHE_BYTES,a1
+	and	~L1_CACHE_WAYDISP,a0
+	add	-1,d1
+	btst	mn10300_local_dcache_inv_range_intr_interval,d1
+	bne	mn10300_local_dcache_inv_range_loop
+
+	# wait for the cache to finish what it's doing
+	setlb
+	movhu	(a2),d0
+	btst	CHCTR_DCBUSY,d0
+	lne
+
+	# and reenable it
+	or	CHCTR_DCEN,d0
+	movhu	d0,(a2)
+	movhu	(a2),d0
+
+	# re-enable interrupts
+	# - we don't bother with delay NOPs as we'll have enough instructions
+	#   before we disable interrupts again to give the interrupts a chance
+	#   to happen
+	LOCAL_IRQ_RESTORE(d3)
+
+	# go around again if the counter hasn't yet reached zero
+	add	0,d1
+	bne	mn10300_local_dcache_inv_range_outer_loop
+
+mn10300_local_dcache_inv_range_end:
+	ret	[d2,d3,a2],12
+	.size	mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
+	.size	mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
+	.size	mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2

+ 129 - 0
arch/mn10300/mm/cache-inv-icache.c

@@ -0,0 +1,129 @@
+/* Invalidate icache when dcache doesn't need invalidation as it's in
+ * write-through mode
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+/**
+ * flush_icache_page_range - Flush dcache and invalidate icache for part of a
+ *				single page
+ * @start: The starting virtual address of the page part.
+ * @end: The ending virtual address of the page part.
+ *
+ * Invalidate the icache for part of a single page, as determined by the
+ * virtual addresses given.  The page must be in the paged area.  The dcache is
+ * not flushed as the cache must be in write-through mode to get here.
+ */
+static void flush_icache_page_range(unsigned long start, unsigned long end)
+{
+	unsigned long addr, size, off;
+	struct page *page;
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *ppte, pte;
+
+	/* work out how much of the page to flush */
+	off = start & ~PAGE_MASK;
+	size = end - start;
+
+	/* get the physical address the page is mapped to from the page
+	 * tables */
+	pgd = pgd_offset(current->mm, start);
+	if (!pgd || !pgd_val(*pgd))
+		return;
+
+	pud = pud_offset(pgd, start);
+	if (!pud || !pud_val(*pud))
+		return;
+
+	pmd = pmd_offset(pud, start);
+	if (!pmd || !pmd_val(*pmd))
+		return;
+
+	ppte = pte_offset_map(pmd, start);
+	if (!ppte)
+		return;
+	pte = *ppte;
+	pte_unmap(ppte);
+
+	if (pte_none(pte))
+		return;
+
+	page = pte_page(pte);
+	if (!page)
+		return;
+
+	addr = page_to_phys(page);
+
+	/* invalidate the icache coverage on that region */
+	mn10300_local_icache_inv_range2(addr + off, size);
+	smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
+}
+
+/**
+ * flush_icache_range - Globally flush dcache and invalidate icache for region
+ * @start: The starting virtual address of the region.
+ * @end: The ending virtual address of the region.
+ *
+ * This is used by the kernel to globally flush some code it has just written
+ * from the dcache back to RAM and then to globally invalidate the icache over
+ * that region so that that code can be run on all CPUs in the system.
+ */
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+	unsigned long start_page, end_page;
+	unsigned long flags;
+
+	flags = smp_lock_cache();
+
+	if (end > 0x80000000UL) {
+		/* addresses above 0xa0000000 do not go through the cache */
+		if (end > 0xa0000000UL) {
+			end = 0xa0000000UL;
+			if (start >= end)
+				goto done;
+		}
+
+		/* kernel addresses between 0x80000000 and 0x9fffffff do not
+		 * require page tables, so we just map such addresses
+		 * directly */
+		start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
+		mn10300_icache_inv_range(start_page, end);
+		smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
+		if (start_page == start)
+			goto done;
+		end = start_page;
+	}
+
+	start_page = start & PAGE_MASK;
+	end_page = (end - 1) & PAGE_MASK;
+
+	if (start_page == end_page) {
+		/* the first and last bytes are on the same page */
+		flush_icache_page_range(start, end);
+	} else if (start_page + 1 == end_page) {
+		/* split over two virtually contiguous pages */
+		flush_icache_page_range(start, end_page);
+		flush_icache_page_range(end_page, end);
+	} else {
+		/* more than 2 pages; just flush the entire cache */
+		mn10300_local_icache_inv();
+		smp_cache_call(SMP_ICACHE_INV, 0, 0);
+	}
+
+done:
+	smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);

+ 0 - 289
arch/mn10300/mm/cache-mn10300.S

@@ -1,289 +0,0 @@
-/* MN10300 CPU core caching routines
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#include <linux/sys.h>
-#include <linux/linkage.h>
-#include <asm/smp.h>
-#include <asm/page.h>
-#include <asm/cache.h>
-
-#define mn10300_dcache_inv_range_intr_interval \
-	+((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
-
-#if mn10300_dcache_inv_range_intr_interval > 0xff
-#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
-#endif
-
-	.am33_2
-
-	.globl mn10300_icache_inv
-	.globl mn10300_dcache_inv
-	.globl mn10300_dcache_inv_range
-	.globl mn10300_dcache_inv_range2
-	.globl mn10300_dcache_inv_page
-
-###############################################################################
-#
-# void mn10300_icache_inv(void)
-# Invalidate the entire icache
-#
-###############################################################################
-	ALIGN
-mn10300_icache_inv:
-	mov	CHCTR,a0
-
-	movhu	(a0),d0
-	btst	CHCTR_ICEN,d0
-	beq	mn10300_icache_inv_end
-
-	mov	epsw,d1
-	and	~EPSW_IE,epsw
-	nop
-	nop
-
-	# disable the icache
-	and	~CHCTR_ICEN,d0
-	movhu	d0,(a0)
-
-	# and wait for it to calm down
-	setlb
-	movhu	(a0),d0
-	btst	CHCTR_ICBUSY,d0
-	lne
-
-	# invalidate
-	or	CHCTR_ICINV,d0
-	movhu	d0,(a0)
-
-	# wait for the cache to finish
-	mov	CHCTR,a0
-	setlb
-	movhu	(a0),d0
-	btst	CHCTR_ICBUSY,d0
-	lne
-
-	# and reenable it
-	and	~CHCTR_ICINV,d0
-	or	CHCTR_ICEN,d0
-	movhu	d0,(a0)
-	movhu	(a0),d0
-
-	mov	d1,epsw
-
-mn10300_icache_inv_end:
-	ret	[],0
-
-###############################################################################
-#
-# void mn10300_dcache_inv(void)
-# Invalidate the entire dcache
-#
-###############################################################################
-	ALIGN
-mn10300_dcache_inv:
-	mov	CHCTR,a0
-
-	movhu	(a0),d0
-	btst	CHCTR_DCEN,d0
-	beq	mn10300_dcache_inv_end
-
-	mov	epsw,d1
-	and	~EPSW_IE,epsw
-	nop
-	nop
-
-	# disable the dcache
-	and	~CHCTR_DCEN,d0
-	movhu	d0,(a0)
-
-	# and wait for it to calm down
-	setlb
-	movhu	(a0),d0
-	btst	CHCTR_DCBUSY,d0
-	lne
-
-	# invalidate
-	or	CHCTR_DCINV,d0
-	movhu	d0,(a0)
-
-	# wait for the cache to finish
-	mov	CHCTR,a0
-	setlb
-	movhu	(a0),d0
-	btst	CHCTR_DCBUSY,d0
-	lne
-
-	# and reenable it
-	and	~CHCTR_DCINV,d0
-	or	CHCTR_DCEN,d0
-	movhu	d0,(a0)
-	movhu	(a0),d0
-
-	mov	d1,epsw
-
-mn10300_dcache_inv_end:
-	ret	[],0
-
-###############################################################################
-#
-# void mn10300_dcache_inv_range(unsigned start, unsigned end)
-# void mn10300_dcache_inv_range2(unsigned start, unsigned size)
-# void mn10300_dcache_inv_page(unsigned start)
-# Invalidate a range of addresses on a page in the dcache
-#
-###############################################################################
-	ALIGN
-mn10300_dcache_inv_page:
-	mov	PAGE_SIZE,d1
-mn10300_dcache_inv_range2:
-	add	d0,d1
-mn10300_dcache_inv_range:
-	movm	[d2,d3,a2],(sp)
-	mov	CHCTR,a2
-
-	movhu	(a2),d2
-	btst	CHCTR_DCEN,d2
-	beq	mn10300_dcache_inv_range_end
-
-	and	L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0	# round start
-								# addr down
-	mov	d0,a1
-
-	add	L1_CACHE_BYTES,d1			# round end addr up
-	and	L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
-	clr	d2				# we're going to clear tag ram
-						# entries
-
-	# read the tags from the tag RAM, and if they indicate a valid dirty
-	# cache line then invalidate that line
-	mov	DCACHE_TAG(0,0),a0
-	mov	a1,d0
-	and	L1_CACHE_TAG_ENTRY,d0
-	add	d0,a0				# starting dcache tag RAM
-						# access address
-
-	sub	a1,d1
-	lsr	L1_CACHE_SHIFT,d1		# total number of entries to
-						# examine
-
-	and	~(L1_CACHE_DISPARITY-1),a1	# determine comparator base
-
-mn10300_dcache_inv_range_outer_loop:
-	# disable interrupts
-	mov	epsw,d3
-	and	~EPSW_IE,epsw
-	nop					# note that reading CHCTR and
-						# AND'ing D0 occupy two delay
-						# slots after disabling
-						# interrupts
-
-	# disable the dcache
-	movhu	(a2),d0
-	and	~CHCTR_DCEN,d0
-	movhu	d0,(a2)
-
-	# and wait for it to calm down
-	setlb
-	movhu	(a2),d0
-	btst	CHCTR_DCBUSY,d0
-	lne
-
-mn10300_dcache_inv_range_loop:
-
-	# process the way 0 slot
-	mov	(L1_CACHE_WAYDISP*0,a0),d0	# read the tag in the way 0 slot
-	btst	L1_CACHE_TAG_VALID,d0
-	beq	mn10300_dcache_inv_range_skip_0	# jump if this cacheline is not
-						# valid
-
-	xor	a1,d0
-	lsr	12,d0
-	bne	mn10300_dcache_inv_range_skip_0	# jump if not this cacheline
-
-	mov	d2,(a0)				# kill the tag
-
-mn10300_dcache_inv_range_skip_0:
-
-	# process the way 1 slot
-	mov	(L1_CACHE_WAYDISP*1,a0),d0	# read the tag in the way 1 slot
-	btst	L1_CACHE_TAG_VALID,d0
-	beq	mn10300_dcache_inv_range_skip_1	# jump if this cacheline is not
-						# valid
-
-	xor	a1,d0
-	lsr	12,d0
-	bne	mn10300_dcache_inv_range_skip_1	# jump if not this cacheline
-
-	mov	d2,(a0)				# kill the tag
-
-mn10300_dcache_inv_range_skip_1:
-
-	# process the way 2 slot
-	mov	(L1_CACHE_WAYDISP*2,a0),d0	# read the tag in the way 2 slot
-	btst	L1_CACHE_TAG_VALID,d0
-	beq	mn10300_dcache_inv_range_skip_2	# jump if this cacheline is not
-						# valid
-
-	xor	a1,d0
-	lsr	12,d0
-	bne	mn10300_dcache_inv_range_skip_2	# jump if not this cacheline
-
-	mov	d2,(a0)				# kill the tag
-
-mn10300_dcache_inv_range_skip_2:
-
-	# process the way 3 slot
-	mov	(L1_CACHE_WAYDISP*3,a0),d0	# read the tag in the way 3 slot
-	btst	L1_CACHE_TAG_VALID,d0
-	beq	mn10300_dcache_inv_range_skip_3	# jump if this cacheline is not
-						# valid
-
-	xor	a1,d0
-	lsr	12,d0
-	bne	mn10300_dcache_inv_range_skip_3	# jump if not this cacheline
-
-	mov	d2,(a0)				# kill the tag
-
-mn10300_dcache_inv_range_skip_3:
-
-	# approx every N steps we re-enable the cache and see if there are any
-	# interrupts to be processed
-	# we also break out if we've reached the end of the loop
-	# (the bottom nibble of the count is zero in both cases)
-	add	L1_CACHE_BYTES,a0
-	add	L1_CACHE_BYTES,a1
-	add	-1,d1
-	btst	mn10300_dcache_inv_range_intr_interval,d1
-	bne	mn10300_dcache_inv_range_loop
-
-	# wait for the cache to finish what it's doing
-	setlb
-	movhu	(a2),d0
-	btst	CHCTR_DCBUSY,d0
-	lne
-
-	# and reenable it
-	or	CHCTR_DCEN,d0
-	movhu	d0,(a2)
-	movhu	(a2),d0
-
-	# re-enable interrupts
-	# - we don't bother with delay NOPs as we'll have enough instructions
-	#   before we disable interrupts again to give the interrupts a chance
-	#   to happen
-	mov	d3,epsw
-
-	# go around again if the counter hasn't yet reached zero
-	add	0,d1
-	bne	mn10300_dcache_inv_range_outer_loop
-
-mn10300_dcache_inv_range_end:
-	ret	[d2,d3,a2],12

+ 156 - 0
arch/mn10300/mm/cache-smp-flush.c

@@ -0,0 +1,156 @@
+/* Functions for global dcache flush when writeback caching in SMP
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include "cache-smp.h"
+
+/**
+ * mn10300_dcache_flush - Globally flush data cache
+ *
+ * Flush the data cache on all CPUs.
+ */
+void mn10300_dcache_flush(void)
+{
+	unsigned long flags;
+
+	flags = smp_lock_cache();
+	mn10300_local_dcache_flush();
+	smp_cache_call(SMP_DCACHE_FLUSH, 0, 0);
+	smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_page - Globally flush a page of data cache
+ * @start: The address of the page of memory to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs covering
+ * the page that includes the given address.
+ */
+void mn10300_dcache_flush_page(unsigned long start)
+{
+	unsigned long flags;
+
+	start &= ~(PAGE_SIZE-1);
+
+	flags = smp_lock_cache();
+	mn10300_local_dcache_flush_page(start);
+	smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + PAGE_SIZE);
+	smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_range - Globally flush range of data cache
+ * @start: The start address of the region to be flushed.
+ * @end: The end address of the region to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs, between start and
+ * end-1 inclusive.
+ */
+void mn10300_dcache_flush_range(unsigned long start, unsigned long end)
+{
+	unsigned long flags;
+
+	flags = smp_lock_cache();
+	mn10300_local_dcache_flush_range(start, end);
+	smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, end);
+	smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_range2 - Globally flush range of data cache
+ * @start: The start address of the region to be flushed.
+ * @size: The size of the region to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs, between start and
+ * start+size-1 inclusive.
+ */
+void mn10300_dcache_flush_range2(unsigned long start, unsigned long size)
+{
+	unsigned long flags;
+
+	flags = smp_lock_cache();
+	mn10300_local_dcache_flush_range2(start, size);
+	smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + size);
+	smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv - Globally flush and invalidate data cache
+ *
+ * Flush and invalidate the data cache on all CPUs.
+ */
+void mn10300_dcache_flush_inv(void)
+{
+	unsigned long flags;
+
+	flags = smp_lock_cache();
+	mn10300_local_dcache_flush_inv();
+	smp_cache_call(SMP_DCACHE_FLUSH_INV, 0, 0);
+	smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_page - Globally flush and invalidate a page of data
+ *	cache
+ * @start: The address of the page of memory to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs
+ * covering the page that includes the given address.
+ */
+void mn10300_dcache_flush_inv_page(unsigned long start)
+{
+	unsigned long flags;
+
+	start &= ~(PAGE_SIZE-1);
+
+	flags = smp_lock_cache();
+	mn10300_local_dcache_flush_inv_page(start);
+	smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + PAGE_SIZE);
+	smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_range - Globally flush and invalidate range of data
+ *	cache
+ * @start: The start address of the region to be flushed and invalidated.
+ * @end: The end address of the region to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs,
+ * between start and end-1 inclusive.
+ */
+void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end)
+{
+	unsigned long flags;
+
+	flags = smp_lock_cache();
+	mn10300_local_dcache_flush_inv_range(start, end);
+	smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, end);
+	smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_range2 - Globally flush and invalidate range of data
+ *	cache
+ * @start: The start address of the region to be flushed and invalidated.
+ * @size: The size of the region to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs,
+ * between start and start+size-1 inclusive.
+ */
+void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+{
+	unsigned long flags;
+
+	flags = smp_lock_cache();
+	mn10300_local_dcache_flush_inv_range2(start, size);
+	smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + size);
+	smp_unlock_cache(flags);
+}

+ 153 - 0
arch/mn10300/mm/cache-smp-inv.c

@@ -0,0 +1,153 @@
+/* Functions for global i/dcache invalidation when caching in SMP
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include "cache-smp.h"
+
+/**
+ * mn10300_icache_inv - Globally invalidate instruction cache
+ *
+ * Invalidate the instruction cache on all CPUs.
+ */
+void mn10300_icache_inv(void)
+{
+	unsigned long flags;
+
+	flags = smp_lock_cache();
+	mn10300_local_icache_inv();
+	smp_cache_call(SMP_ICACHE_INV, 0, 0);
+	smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_page - Globally invalidate a page of instruction cache
+ * @start: The address of the page of memory to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs
+ * covering the page that includes the given address.
+ */
+void mn10300_icache_inv_page(unsigned long start)
+{
+	unsigned long flags;
+
+	start &= ~(PAGE_SIZE-1);
+
+	flags = smp_lock_cache();
+	mn10300_local_icache_inv_page(start);
+	smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + PAGE_SIZE);
+	smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_range - Globally invalidate range of instruction cache
+ * @start: The start address of the region to be invalidated.
+ * @end: The end address of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs,
+ * between start and end-1 inclusive.
+ */
+void mn10300_icache_inv_range(unsigned long start, unsigned long end)
+{
+	unsigned long flags;
+
+	flags = smp_lock_cache();
+	mn10300_local_icache_inv_range(start, end);
+	smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
+	smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_range2 - Globally invalidate range of instruction cache
+ * @start: The start address of the region to be invalidated.
+ * @size: The size of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs,
+ * between start and start+size-1 inclusive.
+ */
+void mn10300_icache_inv_range2(unsigned long start, unsigned long size)
+{
+	unsigned long flags;
+
+	flags = smp_lock_cache();
+	mn10300_local_icache_inv_range2(start, size);
+	smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + size);
+	smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv - Globally invalidate data cache
+ *
+ * Invalidate the data cache on all CPUs.
+ */
+void mn10300_dcache_inv(void)
+{
+	unsigned long flags;
+
+	flags = smp_lock_cache();
+	mn10300_local_dcache_inv();
+	smp_cache_call(SMP_DCACHE_INV, 0, 0);
+	smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_page - Globally invalidate a page of data cache
+ * @start: The address of the page of memory to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs covering the
+ * page that includes the given address.
+ */
+void mn10300_dcache_inv_page(unsigned long start)
+{
+	unsigned long flags;
+
+	start &= ~(PAGE_SIZE-1);
+
+	flags = smp_lock_cache();
+	mn10300_local_dcache_inv_page(start);
+	smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + PAGE_SIZE);
+	smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_range - Globally invalidate range of data cache
+ * @start: The start address of the region to be invalidated.
+ * @end: The end address of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs, between start
+ * and end-1 inclusive.
+ */
+void mn10300_dcache_inv_range(unsigned long start, unsigned long end)
+{
+	unsigned long flags;
+
+	flags = smp_lock_cache();
+	mn10300_local_dcache_inv_range(start, end);
+	smp_cache_call(SMP_DCACHE_INV_RANGE, start, end);
+	smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_range2 - Globally invalidate range of data cache
+ * @start: The start address of the region to be invalidated.
+ * @size: The size of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs, between start
+ * and start+size-1 inclusive.
+ */
+void mn10300_dcache_inv_range2(unsigned long start, unsigned long size)
+{
+	unsigned long flags;
+
+	flags = smp_lock_cache();
+	mn10300_local_dcache_inv_range2(start, size);
+	smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + size);
+	smp_unlock_cache(flags);
+}

+ 105 - 0
arch/mn10300/mm/cache-smp.c

@@ -0,0 +1,105 @@
+/* SMP global caching code
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/threads.h>
+#include <linux/interrupt.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+DEFINE_SPINLOCK(smp_cache_lock);
+static unsigned long smp_cache_mask;
+static unsigned long smp_cache_start;
+static unsigned long smp_cache_end;
+static cpumask_t smp_cache_ipi_map;		/* Bitmask of cache IPI done CPUs */
+
+/**
+ * smp_cache_interrupt - Handle IPI request to flush caches.
+ *
+ * Handle a request delivered by IPI to flush the current CPU's
+ * caches.  The parameters are stored in smp_cache_*.
+ */
+void smp_cache_interrupt(void)
+{
+	unsigned long opr_mask = smp_cache_mask;
+
+	switch ((enum smp_dcache_ops)(opr_mask & SMP_DCACHE_OP_MASK)) {
+	case SMP_DCACHE_NOP:
+		break;
+	case SMP_DCACHE_INV:
+		mn10300_local_dcache_inv();
+		break;
+	case SMP_DCACHE_INV_RANGE:
+		mn10300_local_dcache_inv_range(smp_cache_start, smp_cache_end);
+		break;
+	case SMP_DCACHE_FLUSH:
+		mn10300_local_dcache_flush();
+		break;
+	case SMP_DCACHE_FLUSH_RANGE:
+		mn10300_local_dcache_flush_range(smp_cache_start,
+						 smp_cache_end);
+		break;
+	case SMP_DCACHE_FLUSH_INV:
+		mn10300_local_dcache_flush_inv();
+		break;
+	case SMP_DCACHE_FLUSH_INV_RANGE:
+		mn10300_local_dcache_flush_inv_range(smp_cache_start,
+						     smp_cache_end);
+		break;
+	}
+
+	switch ((enum smp_icache_ops)(opr_mask & SMP_ICACHE_OP_MASK)) {
+	case SMP_ICACHE_NOP:
+		break;
+	case SMP_ICACHE_INV:
+		mn10300_local_icache_inv();
+		break;
+	case SMP_ICACHE_INV_RANGE:
+		mn10300_local_icache_inv_range(smp_cache_start, smp_cache_end);
+		break;
+	}
+
+	cpu_clear(smp_processor_id(), smp_cache_ipi_map);
+}
+
+/**
+ * smp_cache_call - Issue an IPI to request the other CPUs flush caches
+ * @opr_mask: Cache operation flags
+ * @start: Start address of request
+ * @end: End address of request
+ *
+ * Send cache flush IPI to other CPUs.  This invokes smp_cache_interrupt()
+ * above on those other CPUs and then waits for them to finish.
+ *
+ * The caller must hold smp_cache_lock.
+ */
+void smp_cache_call(unsigned long opr_mask,
+		    unsigned long start, unsigned long end)
+{
+	smp_cache_mask = opr_mask;
+	smp_cache_start = start;
+	smp_cache_end = end;
+	smp_cache_ipi_map = cpu_online_map;
+	cpu_clear(smp_processor_id(), smp_cache_ipi_map);
+
+	send_IPI_allbutself(FLUSH_CACHE_IPI);
+
+	while (!cpus_empty(smp_cache_ipi_map))
+		/* nothing. lockup detection does not belong here */
+		mb();
+}

+ 69 - 0
arch/mn10300/mm/cache-smp.h

@@ -0,0 +1,69 @@
+/* SMP caching definitions
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+
+/*
+ * Operation requests for smp_cache_call().
+ *
+ * One of smp_icache_ops and one of smp_dcache_ops can be OR'd together.
+ */
+enum smp_icache_ops {
+	SMP_ICACHE_NOP			= 0x0000,
+	SMP_ICACHE_INV			= 0x0001,
+	SMP_ICACHE_INV_RANGE		= 0x0002,
+};
+#define SMP_ICACHE_OP_MASK		0x0003
+
+enum smp_dcache_ops {
+	SMP_DCACHE_NOP			= 0x0000,
+	SMP_DCACHE_INV			= 0x0004,
+	SMP_DCACHE_INV_RANGE		= 0x0008,
+	SMP_DCACHE_FLUSH		= 0x000c,
+	SMP_DCACHE_FLUSH_RANGE		= 0x0010,
+	SMP_DCACHE_FLUSH_INV		= 0x0014,
+	SMP_DCACHE_FLUSH_INV_RANGE	= 0x0018,
+};
+#define SMP_DCACHE_OP_MASK		0x001c
+
+#define	SMP_IDCACHE_INV_FLUSH		(SMP_ICACHE_INV | SMP_DCACHE_FLUSH)
+#define SMP_IDCACHE_INV_FLUSH_RANGE	(SMP_ICACHE_INV_RANGE | SMP_DCACHE_FLUSH_RANGE)
+
+/*
+ * cache-smp.c
+ */
+#ifdef CONFIG_SMP
+extern spinlock_t smp_cache_lock;
+
+extern void smp_cache_call(unsigned long opr_mask,
+			   unsigned long addr, unsigned long end);
+
+static inline unsigned long smp_lock_cache(void)
+	__acquires(&smp_cache_lock)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&smp_cache_lock, flags);
+	return flags;
+}
+
+static inline void smp_unlock_cache(unsigned long flags)
+	__releases(&smp_cache_lock)
+{
+	spin_unlock_irqrestore(&smp_cache_lock, flags);
+}
+
+#else
+static inline unsigned long smp_lock_cache(void) { return 0; }
+static inline void smp_unlock_cache(unsigned long flags) {}
+static inline void smp_cache_call(unsigned long opr_mask,
+				  unsigned long addr, unsigned long end)
+{
+}
+#endif /* CONFIG_SMP */

+ 5 - 90
arch/mn10300/mm/cache.c

@@ -18,8 +18,13 @@
 #include <asm/cacheflush.h>
 #include <asm/cacheflush.h>
 #include <asm/io.h>
 #include <asm/io.h>
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
 
 
 EXPORT_SYMBOL(mn10300_icache_inv);
 EXPORT_SYMBOL(mn10300_icache_inv);
+EXPORT_SYMBOL(mn10300_icache_inv_range);
+EXPORT_SYMBOL(mn10300_icache_inv_range2);
+EXPORT_SYMBOL(mn10300_icache_inv_page);
 EXPORT_SYMBOL(mn10300_dcache_inv);
 EXPORT_SYMBOL(mn10300_dcache_inv);
 EXPORT_SYMBOL(mn10300_dcache_inv_range);
 EXPORT_SYMBOL(mn10300_dcache_inv_range);
 EXPORT_SYMBOL(mn10300_dcache_inv_range2);
 EXPORT_SYMBOL(mn10300_dcache_inv_range2);
@@ -36,96 +41,6 @@ EXPORT_SYMBOL(mn10300_dcache_flush_range2);
 EXPORT_SYMBOL(mn10300_dcache_flush_page);
 EXPORT_SYMBOL(mn10300_dcache_flush_page);
 #endif
 #endif
 
 
-/*
- * write a page back from the dcache and invalidate the icache so that we can
- * run code from it that we've just written into it
- */
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
-{
-	mn10300_dcache_flush_page(page_to_phys(page));
-	mn10300_icache_inv();
-}
-EXPORT_SYMBOL(flush_icache_page);
-
-/*
- * write some code we've just written back from the dcache and invalidate the
- * icache so that we can run that code
- */
-void flush_icache_range(unsigned long start, unsigned long end)
-{
-#ifdef CONFIG_MN10300_CACHE_WBACK
-	unsigned long addr, size, base, off;
-	struct page *page;
-	pgd_t *pgd;
-	pud_t *pud;
-	pmd_t *pmd;
-	pte_t *ppte, pte;
-
-	if (end > 0x80000000UL) {
-		/* addresses above 0xa0000000 do not go through the cache */
-		if (end > 0xa0000000UL) {
-			end = 0xa0000000UL;
-			if (start >= end)
-				return;
-		}
-
-		/* kernel addresses between 0x80000000 and 0x9fffffff do not
-		 * require page tables, so we just map such addresses directly */
-		base = (start >= 0x80000000UL) ? start : 0x80000000UL;
-		mn10300_dcache_flush_range(base, end);
-		if (base == start)
-			goto invalidate;
-		end = base;
-	}
-
-	for (; start < end; start += size) {
-		/* work out how much of the page to flush */
-		off = start & (PAGE_SIZE - 1);
-
-		size = end - start;
-		if (size > PAGE_SIZE - off)
-			size = PAGE_SIZE - off;
-
-		/* get the physical address the page is mapped to from the page
-		 * tables */
-		pgd = pgd_offset(current->mm, start);
-		if (!pgd || !pgd_val(*pgd))
-			continue;
-
-		pud = pud_offset(pgd, start);
-		if (!pud || !pud_val(*pud))
-			continue;
-
-		pmd = pmd_offset(pud, start);
-		if (!pmd || !pmd_val(*pmd))
-			continue;
-
-		ppte = pte_offset_map(pmd, start);
-		if (!ppte)
-			continue;
-		pte = *ppte;
-		pte_unmap(ppte);
-
-		if (pte_none(pte))
-			continue;
-
-		page = pte_page(pte);
-		if (!page)
-			continue;
-
-		addr = page_to_phys(page);
-
-		/* flush the dcache and invalidate the icache coverage on that
-		 * region */
-		mn10300_dcache_flush_range2(addr + off, size);
-	}
-#endif
-
-invalidate:
-	mn10300_icache_inv();
-}
-EXPORT_SYMBOL(flush_icache_range);
-
 /*
 /*
  * allow userspace to flush the instruction cache
  * allow userspace to flush the instruction cache
  */
  */

+ 4 - 13
arch/mn10300/mm/fault.c

@@ -39,10 +39,6 @@ void bust_spinlocks(int yes)
 {
 {
 	if (yes) {
 	if (yes) {
 		oops_in_progress = 1;
 		oops_in_progress = 1;
-#ifdef CONFIG_SMP
-		/* Many serial drivers do __global_cli() */
-		global_irq_lock = 0;
-#endif
 	} else {
 	} else {
 		int loglevel_save = console_loglevel;
 		int loglevel_save = console_loglevel;
 #ifdef CONFIG_VT
 #ifdef CONFIG_VT
@@ -100,8 +96,6 @@ static void print_pagetable_entries(pgd_t *pgdir, unsigned long address)
 }
 }
 #endif
 #endif
 
 
-asmlinkage void monitor_signal(struct pt_regs *);
-
 /*
 /*
  * This routine handles page faults.  It determines the address,
  * This routine handles page faults.  It determines the address,
  * and the problem, and then passes it off to one of the appropriate
  * and the problem, and then passes it off to one of the appropriate
@@ -279,7 +273,6 @@ good_area:
  */
  */
 bad_area:
 bad_area:
 	up_read(&mm->mmap_sem);
 	up_read(&mm->mmap_sem);
-	monitor_signal(regs);
 
 
 	/* User mode accesses just cause a SIGSEGV */
 	/* User mode accesses just cause a SIGSEGV */
 	if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
 	if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
@@ -292,7 +285,6 @@ bad_area:
 	}
 	}
 
 
 no_context:
 no_context:
-	monitor_signal(regs);
 	/* Are we prepared to handle this kernel fault?  */
 	/* Are we prepared to handle this kernel fault?  */
 	if (fixup_exception(regs))
 	if (fixup_exception(regs))
 		return;
 		return;
@@ -338,14 +330,13 @@ no_context:
  */
  */
 out_of_memory:
 out_of_memory:
 	up_read(&mm->mmap_sem);
 	up_read(&mm->mmap_sem);
-	if ((fault_code & MMUFCR_xFC_ACCESS) != MMUFCR_xFC_ACCESS_USR)
-		goto no_context;
-	pagefault_out_of_memory();
-	return;
+	printk(KERN_ALERT "VM: killing process %s\n", tsk->comm);
+	if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
+		do_exit(SIGKILL);
+	goto no_context;
 
 
 do_sigbus:
 do_sigbus:
 	up_read(&mm->mmap_sem);
 	up_read(&mm->mmap_sem);
-	monitor_signal(regs);
 
 
 	/*
 	/*
 	 * Send a sigbus, regardless of whether we were in kernel
 	 * Send a sigbus, regardless of whether we were in kernel

+ 23 - 3
arch/mn10300/mm/init.c

@@ -41,6 +41,10 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
 
 unsigned long highstart_pfn, highend_pfn;
 unsigned long highstart_pfn, highend_pfn;
 
 
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+static struct vm_struct user_iomap_vm;
+#endif
+
 /*
 /*
  * set up paging
  * set up paging
  */
  */
@@ -73,7 +77,24 @@ void __init paging_init(void)
 	/* pass the memory from the bootmem allocator to the main allocator */
 	/* pass the memory from the bootmem allocator to the main allocator */
 	free_area_init(zones_size);
 	free_area_init(zones_size);
 
 
-	__flush_tlb_all();
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+	/* The Atomic Operation Unit registers need to be mapped to userspace
+	 * for all processes.  The following uses vm_area_register_early() to
+	 * reserve the first page of the vmalloc area and sets the pte for that
+	 * page.
+	 *
+	 * glibc hardcodes this virtual mapping, so we're pretty much stuck with
+	 * it from now on.
+	 */
+	user_iomap_vm.flags = VM_USERMAP;
+	user_iomap_vm.size = 1 << PAGE_SHIFT;
+	vm_area_register_early(&user_iomap_vm, PAGE_SIZE);
+	ppte = kernel_vmalloc_ptes;
+	set_pte(ppte, pfn_pte(USER_ATOMIC_OPS_PAGE_ADDR >> PAGE_SHIFT,
+			      PAGE_USERIO));
+#endif
+
+	local_flush_tlb_all();
 }
 }
 
 
 /*
 /*
@@ -84,8 +105,7 @@ void __init mem_init(void)
 	int codesize, reservedpages, datasize, initsize;
 	int codesize, reservedpages, datasize, initsize;
 	int tmp;
 	int tmp;
 
 
-	if (!mem_map)
-		BUG();
+	BUG_ON(!mem_map);
 
 
 #define START_PFN	(contig_page_data.bdata->node_min_pfn)
 #define START_PFN	(contig_page_data.bdata->node_min_pfn)
 #define MAX_LOW_PFN	(contig_page_data.bdata->node_low_pfn)
 #define MAX_LOW_PFN	(contig_page_data.bdata->node_low_pfn)

+ 1 - 2
arch/mn10300/mm/misalignment.c

@@ -449,8 +449,7 @@ found_opcode:
 	       regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]);
 	       regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]);
 
 
 	tmp = format_tbl[pop->format].opsz;
 	tmp = format_tbl[pop->format].opsz;
-	if (tmp > noc)
-		BUG(); /* match was less complete than it ought to have been */
+	BUG_ON(tmp > noc); /* match was less complete than it ought to have been */
 
 
 	if (tmp < noc) {
 	if (tmp < noc) {
 		tmp = noc - tmp;
 		tmp = noc - tmp;

+ 11 - 30
arch/mn10300/mm/mmu-context.c

@@ -13,40 +13,15 @@
 #include <asm/mmu_context.h>
 #include <asm/mmu_context.h>
 #include <asm/tlbflush.h>
 #include <asm/tlbflush.h>
 
 
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
 /*
 /*
  * list of the MMU contexts last allocated on each CPU
  * list of the MMU contexts last allocated on each CPU
  */
  */
 unsigned long mmu_context_cache[NR_CPUS] = {
 unsigned long mmu_context_cache[NR_CPUS] = {
-	[0 ... NR_CPUS - 1] = MMU_CONTEXT_FIRST_VERSION * 2 - 1,
+	[0 ... NR_CPUS - 1] =
+	MMU_CONTEXT_FIRST_VERSION * 2 - (1 - MMU_CONTEXT_TLBPID_LOCK_NR),
 };
 };
-
-/*
- * flush the specified TLB entry
- */
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
-{
-	unsigned long pteu, cnx, flags;
-
-	addr &= PAGE_MASK;
-
-	/* make sure the context doesn't migrate and defend against
-	 * interference from vmalloc'd regions */
-	local_irq_save(flags);
-
-	cnx = mm_context(vma->vm_mm);
-
-	if (cnx != MMU_NO_CONTEXT) {
-		pteu = addr | (cnx & 0x000000ffUL);
-		IPTEU = pteu;
-		DPTEU = pteu;
-		if (IPTEL & xPTEL_V)
-			IPTEL = 0;
-		if (DPTEL & xPTEL_V)
-			DPTEL = 0;
-	}
-
-	local_irq_restore(flags);
-}
+#endif /* CONFIG_MN10300_TLB_USE_PIDR */
 
 
 /*
 /*
  * preemptively set a TLB entry
  * preemptively set a TLB entry
@@ -63,10 +38,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *pte
 	 * interference from vmalloc'd regions */
 	 * interference from vmalloc'd regions */
 	local_irq_save(flags);
 	local_irq_save(flags);
 
 
+	cnx = ~MMU_NO_CONTEXT;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
 	cnx = mm_context(vma->vm_mm);
 	cnx = mm_context(vma->vm_mm);
+#endif
 
 
 	if (cnx != MMU_NO_CONTEXT) {
 	if (cnx != MMU_NO_CONTEXT) {
-		pteu = addr | (cnx & 0x000000ffUL);
+		pteu = addr;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+		pteu |= cnx & MMU_CONTEXT_TLBPID_MASK;
+#endif
 		if (!(pte_val(pte) & _PAGE_NX)) {
 		if (!(pte_val(pte) & _PAGE_NX)) {
 			IPTEU = pteu;
 			IPTEU = pteu;
 			if (IPTEL & xPTEL_V)
 			if (IPTEL & xPTEL_V)

+ 1 - 1
arch/mn10300/mm/pgtable.c

@@ -59,7 +59,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
 	 * It's enough to flush this one mapping.
 	 * It's enough to flush this one mapping.
 	 * (PGE mappings get flushed as well)
 	 * (PGE mappings get flushed as well)
 	 */
 	 */
-	__flush_tlb_one(vaddr);
+	local_flush_tlb_one(vaddr);
 }
 }
 
 
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)

+ 45 - 14
arch/mn10300/mm/tlb-mn10300.S

@@ -27,7 +27,6 @@
 ###############################################################################
 ###############################################################################
 	.type	itlb_miss,@function
 	.type	itlb_miss,@function
 ENTRY(itlb_miss)
 ENTRY(itlb_miss)
-	and	~EPSW_NMID,epsw
 #ifdef CONFIG_GDBSTUB
 #ifdef CONFIG_GDBSTUB
 	movm	[d2,d3,a2],(sp)
 	movm	[d2,d3,a2],(sp)
 #else
 #else
@@ -38,6 +37,12 @@ ENTRY(itlb_miss)
 	nop
 	nop
 #endif
 #endif
 
 
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+	mov	(MMUCTR),d2
+	mov	d2,(MMUCTR)
+#endif
+
+	and	~EPSW_NMID,epsw
 	mov	(IPTEU),d3
 	mov	(IPTEU),d3
 	mov	(PTBR),a2
 	mov	(PTBR),a2
 	mov	d3,d2
 	mov	d3,d2
@@ -56,10 +61,16 @@ ENTRY(itlb_miss)
 	btst	_PAGE_VALID,d2
 	btst	_PAGE_VALID,d2
 	beq	itlb_miss_fault		# jump if doesn't point to a page
 	beq	itlb_miss_fault		# jump if doesn't point to a page
 					# (might be a swap id)
 					# (might be a swap id)
+#if	((_PAGE_ACCESSED & 0xffffff00) == 0)
 	bset	_PAGE_ACCESSED,(0,a2)
 	bset	_PAGE_ACCESSED,(0,a2)
-	and	~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
+#elif	((_PAGE_ACCESSED & 0xffff00ff) == 0)
+	bset	+(_PAGE_ACCESSED >> 8),(1,a2)
+#else
+#error	"_PAGE_ACCESSED value is out of range"
+#endif
+	and	~xPTEL2_UNUSED1,d2
 itlb_miss_set:
 itlb_miss_set:
-	mov	d2,(IPTEL)		# change the TLB
+	mov	d2,(IPTEL2)		# change the TLB
 #ifdef CONFIG_GDBSTUB
 #ifdef CONFIG_GDBSTUB
 	movm	(sp),[d2,d3,a2]
 	movm	(sp),[d2,d3,a2]
 #endif
 #endif
@@ -79,7 +90,6 @@ itlb_miss_fault:
 ###############################################################################
 ###############################################################################
 	.type	dtlb_miss,@function
 	.type	dtlb_miss,@function
 ENTRY(dtlb_miss)
 ENTRY(dtlb_miss)
-	and	~EPSW_NMID,epsw
 #ifdef CONFIG_GDBSTUB
 #ifdef CONFIG_GDBSTUB
 	movm	[d2,d3,a2],(sp)
 	movm	[d2,d3,a2],(sp)
 #else
 #else
@@ -90,6 +100,12 @@ ENTRY(dtlb_miss)
 	nop
 	nop
 #endif
 #endif
 
 
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+	mov	(MMUCTR),d2
+	mov	d2,(MMUCTR)
+#endif
+
+	and	~EPSW_NMID,epsw
 	mov	(DPTEU),d3
 	mov	(DPTEU),d3
 	mov	(PTBR),a2
 	mov	(PTBR),a2
 	mov	d3,d2
 	mov	d3,d2
@@ -108,10 +124,16 @@ ENTRY(dtlb_miss)
 	btst	_PAGE_VALID,d2
 	btst	_PAGE_VALID,d2
 	beq	dtlb_miss_fault		# jump if doesn't point to a page
 	beq	dtlb_miss_fault		# jump if doesn't point to a page
 					# (might be a swap id)
 					# (might be a swap id)
+#if	((_PAGE_ACCESSED & 0xffffff00) == 0)
 	bset	_PAGE_ACCESSED,(0,a2)
 	bset	_PAGE_ACCESSED,(0,a2)
-	and	~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
+#elif	((_PAGE_ACCESSED & 0xffff00ff) == 0)
+	bset	+(_PAGE_ACCESSED >> 8),(1,a2)
+#else
+#error	"_PAGE_ACCESSED value is out of range"
+#endif
+	and	~xPTEL2_UNUSED1,d2
 dtlb_miss_set:
 dtlb_miss_set:
-	mov	d2,(DPTEL)		# change the TLB
+	mov	d2,(DPTEL2)		# change the TLB
 #ifdef CONFIG_GDBSTUB
 #ifdef CONFIG_GDBSTUB
 	movm	(sp),[d2,d3,a2]
 	movm	(sp),[d2,d3,a2]
 #endif
 #endif
@@ -130,9 +152,15 @@ dtlb_miss_fault:
 ###############################################################################
 ###############################################################################
 	.type	itlb_aerror,@function
 	.type	itlb_aerror,@function
 ENTRY(itlb_aerror)
 ENTRY(itlb_aerror)
-	and	~EPSW_NMID,epsw
 	add	-4,sp
 	add	-4,sp
 	SAVE_ALL
 	SAVE_ALL
+
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+	mov	(MMUCTR),d1
+	mov	d1,(MMUCTR)
+#endif
+
+	and	~EPSW_NMID,epsw
 	add	-4,sp				# need to pass three params
 	add	-4,sp				# need to pass three params
 
 
 	# calculate the fault code
 	# calculate the fault code
@@ -140,15 +168,13 @@ ENTRY(itlb_aerror)
 	or	0x00010000,d1			# it's an instruction fetch
 	or	0x00010000,d1			# it's an instruction fetch
 
 
 	# determine the page address
 	# determine the page address
-	mov	(IPTEU),a2
-	mov	a2,d0
+	mov	(IPTEU),d0
 	and	PAGE_MASK,d0
 	and	PAGE_MASK,d0
 	mov	d0,(12,sp)
 	mov	d0,(12,sp)
 
 
 	clr	d0
 	clr	d0
-	mov	d0,(IPTEL)
+	mov	d0,(IPTEL2)
 
 
-	and	~EPSW_NMID,epsw
 	or	EPSW_IE,epsw
 	or	EPSW_IE,epsw
 	mov	fp,d0
 	mov	fp,d0
 	call	do_page_fault[],0		# do_page_fault(regs,code,addr
 	call	do_page_fault[],0		# do_page_fault(regs,code,addr
@@ -163,10 +189,16 @@ ENTRY(itlb_aerror)
 ###############################################################################
 ###############################################################################
 	.type	dtlb_aerror,@function
 	.type	dtlb_aerror,@function
 ENTRY(dtlb_aerror)
 ENTRY(dtlb_aerror)
-	and	~EPSW_NMID,epsw
 	add	-4,sp
 	add	-4,sp
 	SAVE_ALL
 	SAVE_ALL
+
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+	mov	(MMUCTR),d1
+	mov	d1,(MMUCTR)
+#endif
+
 	add	-4,sp				# need to pass three params
 	add	-4,sp				# need to pass three params
+	and	~EPSW_NMID,epsw
 
 
 	# calculate the fault code
 	# calculate the fault code
 	movhu	(MMUFCR_DFC),d1
 	movhu	(MMUFCR_DFC),d1
@@ -178,9 +210,8 @@ ENTRY(dtlb_aerror)
 	mov	d0,(12,sp)
 	mov	d0,(12,sp)
 
 
 	clr	d0
 	clr	d0
-	mov	d0,(DPTEL)
+	mov	d0,(DPTEL2)
 
 
-	and	~EPSW_NMID,epsw
 	or	EPSW_IE,epsw
 	or	EPSW_IE,epsw
 	mov	fp,d0
 	mov	fp,d0
 	call	do_page_fault[],0		# do_page_fault(regs,code,addr
 	call	do_page_fault[],0		# do_page_fault(regs,code,addr

部分文件因文件數量過多而無法顯示