Selaa lähdekoodia

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/vapier/blackfin

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/vapier/blackfin: (32 commits)
  Blackfin: ip0x: fix unused variable warning
  Blackfin: punt unused HDMA masks
  Blackfin: wire up new syscalls
  Blackfin/ipipe: restore pipeline bits in irqflags
  Blackfin/ipipe: fix deferred pipeline sync for the root stage
  Blackfin/ipipe: upgrade to I-pipe mainline
  Blackfin: cpufreq: fix typos
  Blackfin: enable GENERIC_HARDIRQS_NO_DEPRECATED
  Blackfin: SMP: convert to irq chip functions
  Blackfin: use accessor functions in show_interrupts()
  Blackfin: use proper wrapper functions for modifying irq status
  Blackfin: convert gpio irq_chip to new functions
  Blackfin: convert mac irq_chip to new functions
  Blackfin: convert error irq_chip to new functions
  Blackfin: convert internal irq_chip to new functions
  Blackfin: convert core irq_chip to new functions
  Blackfin: use proper wrappers for irq_desc
  Blackfin: optimize startup code
  Blackfin: SMP: work around anomaly 05000491
  Blackfin: SMP: implement cpu_freq support
  ...
Linus Torvalds 14 vuotta sitten
vanhempi
commit
664322a447
52 muutettua tiedostoa jossa 762 lisäystä ja 519 poistoa
  1. 34 8
      arch/blackfin/Kconfig
  2. 1 0
      arch/blackfin/configs/BF518F-EZBRD_defconfig
  3. 1 0
      arch/blackfin/configs/BF526-EZBRD_defconfig
  4. 1 0
      arch/blackfin/configs/BF527-EZKIT-V2_defconfig
  5. 1 0
      arch/blackfin/configs/BF527-EZKIT_defconfig
  6. 1 0
      arch/blackfin/configs/BF533-EZKIT_defconfig
  7. 1 0
      arch/blackfin/configs/BF533-STAMP_defconfig
  8. 1 0
      arch/blackfin/configs/BF537-STAMP_defconfig
  9. 1 0
      arch/blackfin/configs/BF538-EZKIT_defconfig
  10. 1 0
      arch/blackfin/configs/BF548-EZKIT_defconfig
  11. 1 0
      arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
  12. 1 0
      arch/blackfin/configs/BF561-EZKIT_defconfig
  13. 14 2
      arch/blackfin/include/asm/def_LPBlackfin.h
  14. 3 0
      arch/blackfin/include/asm/dpmc.h
  15. 32 63
      arch/blackfin/include/asm/ipipe.h
  16. 9 2
      arch/blackfin/include/asm/ipipe_base.h
  17. 77 10
      arch/blackfin/include/asm/irqflags.h
  18. 7 2
      arch/blackfin/include/asm/smp.h
  19. 4 1
      arch/blackfin/include/asm/unistd.h
  20. 19 13
      arch/blackfin/kernel/bfin_dma_5xx.c
  21. 39 45
      arch/blackfin/kernel/ipipe.c
  22. 6 4
      arch/blackfin/kernel/irqchip.c
  23. 0 4
      arch/blackfin/kernel/kgdb.c
  24. 37 0
      arch/blackfin/kernel/setup.c
  25. 2 1
      arch/blackfin/kernel/vmlinux.lds.S
  26. 0 19
      arch/blackfin/mach-bf518/include/mach/defBF512.h
  27. 0 19
      arch/blackfin/mach-bf527/include/mach/defBF522.h
  28. 0 2
      arch/blackfin/mach-bf533/boards/ip0x.c
  29. 1 1
      arch/blackfin/mach-bf537/boards/cm_bf537e.c
  30. 1 1
      arch/blackfin/mach-bf537/boards/cm_bf537u.c
  31. 0 20
      arch/blackfin/mach-bf537/boards/dnp5370.c
  32. 1 1
      arch/blackfin/mach-bf537/boards/tcm_bf537.c
  33. 0 18
      arch/blackfin/mach-bf537/include/mach/defBF534.h
  34. 59 0
      arch/blackfin/mach-bf548/Kconfig
  35. 48 7
      arch/blackfin/mach-bf548/boards/ezkit.c
  36. 0 18
      arch/blackfin/mach-bf548/include/mach/defBF544.h
  37. 0 19
      arch/blackfin/mach-bf548/include/mach/defBF547.h
  38. 24 4
      arch/blackfin/mach-bf548/include/mach/dma.h
  39. 0 4
      arch/blackfin/mach-bf548/include/mach/irq.h
  40. 1 1
      arch/blackfin/mach-bf561/boards/cm_bf561.c
  41. 9 12
      arch/blackfin/mach-bf561/hotplug.c
  42. 15 20
      arch/blackfin/mach-bf561/secondary.S
  43. 6 9
      arch/blackfin/mach-bf561/smp.c
  44. 1 1
      arch/blackfin/mach-common/arch_checks.c
  45. 32 6
      arch/blackfin/mach-common/cache.S
  46. 3 5
      arch/blackfin/mach-common/cpufreq.c
  47. 48 2
      arch/blackfin/mach-common/dpmc.c
  48. 14 0
      arch/blackfin/mach-common/entry.S
  49. 41 69
      arch/blackfin/mach-common/head.S
  50. 3 3
      arch/blackfin/mach-common/interrupt.S
  51. 144 92
      arch/blackfin/mach-common/ints-priority.c
  52. 17 11
      arch/blackfin/mach-common/smp.c

+ 34 - 8
arch/blackfin/Kconfig

@@ -33,6 +33,7 @@ config BLACKFIN
 	select HAVE_GENERIC_HARDIRQS
 	select GENERIC_IRQ_PROBE
 	select IRQ_PER_CPU if SMP
+	select GENERIC_HARDIRQS_NO_DEPRECATED
 
 config GENERIC_CSUM
 	def_bool y
@@ -690,13 +691,13 @@ endmenu
 
 
 menu "Blackfin Kernel Optimizations"
-	depends on !SMP
 
 comment "Memory Optimizations"
 
 config I_ENTRY_L1
 	bool "Locate interrupt entry code in L1 Memory"
 	default y
+	depends on !SMP
 	help
 	  If enabled, interrupt entry code (STORE/RESTORE CONTEXT) is linked
 	  into L1 instruction memory. (less latency)
@@ -704,6 +705,7 @@ config I_ENTRY_L1
 config EXCPT_IRQ_SYSC_L1
 	bool "Locate entire ASM lowlevel exception / interrupt - Syscall and CPLB handler code in L1 Memory"
 	default y
+	depends on !SMP
 	help
 	  If enabled, the entire ASM lowlevel exception and interrupt entry code
 	  (STORE/RESTORE CONTEXT) is linked into L1 instruction memory.
@@ -712,6 +714,7 @@ config EXCPT_IRQ_SYSC_L1
 config DO_IRQ_L1
 	bool "Locate frequently called do_irq dispatcher function in L1 Memory"
 	default y
+	depends on !SMP
 	help
 	  If enabled, the frequently called do_irq dispatcher function is linked
 	  into L1 instruction memory. (less latency)
@@ -719,6 +722,7 @@ config DO_IRQ_L1
 config CORE_TIMER_IRQ_L1
 	bool "Locate frequently called timer_interrupt() function in L1 Memory"
 	default y
+	depends on !SMP
 	help
 	  If enabled, the frequently called timer_interrupt() function is linked
 	  into L1 instruction memory. (less latency)
@@ -726,6 +730,7 @@ config CORE_TIMER_IRQ_L1
 config IDLE_L1
 	bool "Locate frequently idle function in L1 Memory"
 	default y
+	depends on !SMP
 	help
 	  If enabled, the frequently called idle function is linked
 	  into L1 instruction memory. (less latency)
@@ -733,6 +738,7 @@ config IDLE_L1
 config SCHEDULE_L1
 	bool "Locate kernel schedule function in L1 Memory"
 	default y
+	depends on !SMP
 	help
 	  If enabled, the frequently called kernel schedule is linked
 	  into L1 instruction memory. (less latency)
@@ -740,6 +746,7 @@ config SCHEDULE_L1
 config ARITHMETIC_OPS_L1
 	bool "Locate kernel owned arithmetic functions in L1 Memory"
 	default y
+	depends on !SMP
 	help
 	  If enabled, arithmetic functions are linked
 	  into L1 instruction memory. (less latency)
@@ -747,6 +754,7 @@ config ARITHMETIC_OPS_L1
 config ACCESS_OK_L1
 	bool "Locate access_ok function in L1 Memory"
 	default y
+	depends on !SMP
 	help
 	  If enabled, the access_ok function is linked
 	  into L1 instruction memory. (less latency)
@@ -754,6 +762,7 @@ config ACCESS_OK_L1
 config MEMSET_L1
 	bool "Locate memset function in L1 Memory"
 	default y
+	depends on !SMP
 	help
 	  If enabled, the memset function is linked
 	  into L1 instruction memory. (less latency)
@@ -761,6 +770,7 @@ config MEMSET_L1
 config MEMCPY_L1
 	bool "Locate memcpy function in L1 Memory"
 	default y
+	depends on !SMP
 	help
 	  If enabled, the memcpy function is linked
 	  into L1 instruction memory. (less latency)
@@ -768,6 +778,7 @@ config MEMCPY_L1
 config STRCMP_L1
 	bool "locate strcmp function in L1 Memory"
 	default y
+	depends on !SMP
 	help
 	  If enabled, the strcmp function is linked
 	  into L1 instruction memory (less latency).
@@ -775,6 +786,7 @@ config STRCMP_L1
 config STRNCMP_L1
 	bool "locate strncmp function in L1 Memory"
 	default y
+	depends on !SMP
 	help
 	  If enabled, the strncmp function is linked
 	  into L1 instruction memory (less latency).
@@ -782,6 +794,7 @@ config STRNCMP_L1
 config STRCPY_L1
 	bool "locate strcpy function in L1 Memory"
 	default y
+	depends on !SMP
 	help
 	  If enabled, the strcpy function is linked
 	  into L1 instruction memory (less latency).
@@ -789,6 +802,7 @@ config STRCPY_L1
 config STRNCPY_L1
 	bool "locate strncpy function in L1 Memory"
 	default y
+	depends on !SMP
 	help
 	  If enabled, the strncpy function is linked
 	  into L1 instruction memory (less latency).
@@ -796,6 +810,7 @@ config STRNCPY_L1
 config SYS_BFIN_SPINLOCK_L1
 	bool "Locate sys_bfin_spinlock function in L1 Memory"
 	default y
+	depends on !SMP
 	help
 	  If enabled, sys_bfin_spinlock function is linked
 	  into L1 instruction memory. (less latency)
@@ -803,6 +818,7 @@ config SYS_BFIN_SPINLOCK_L1
 config IP_CHECKSUM_L1
 	bool "Locate IP Checksum function in L1 Memory"
 	default n
+	depends on !SMP
 	help
 	  If enabled, the IP Checksum function is linked
 	  into L1 instruction memory. (less latency)
@@ -811,7 +827,7 @@ config CACHELINE_ALIGNED_L1
 	bool "Locate cacheline_aligned data to L1 Data Memory"
 	default y if !BF54x
 	default n if BF54x
-	depends on !BF531
+	depends on !SMP && !BF531
 	help
 	  If enabled, cacheline_aligned data is linked
 	  into L1 data memory. (less latency)
@@ -819,7 +835,7 @@ config CACHELINE_ALIGNED_L1
 config SYSCALL_TAB_L1
 	bool "Locate Syscall Table L1 Data Memory"
 	default n
-	depends on !BF531
+	depends on !SMP && !BF531
 	help
 	  If enabled, the Syscall LUT is linked
 	  into L1 data memory. (less latency)
@@ -827,16 +843,16 @@ config SYSCALL_TAB_L1
 config CPLB_SWITCH_TAB_L1
 	bool "Locate CPLB Switch Tables L1 Data Memory"
 	default n
-	depends on !BF531
+	depends on !SMP && !BF531
 	help
 	  If enabled, the CPLB Switch Tables are linked
 	  into L1 data memory. (less latency)
 
-config CACHE_FLUSH_L1
-	bool "Locate cache flush funcs in L1 Inst Memory"
+config ICACHE_FLUSH_L1
+	bool "Locate icache flush funcs in L1 Inst Memory"
 	default y
 	help
-	  If enabled, the Blackfin cache flushing functions are linked
+	  If enabled, the Blackfin icache flushing functions are linked
 	  into L1 instruction memory.
 
 	  Note that this might be required to address anomalies, but
@@ -844,9 +860,18 @@ config CACHE_FLUSH_L1
 	  If you are using a processor affected by an anomaly, the build
 	  system will double check for you and prevent it.
 
+config DCACHE_FLUSH_L1
+	bool "Locate dcache flush funcs in L1 Inst Memory"
+	default y
+	depends on !SMP
+	help
+	  If enabled, the Blackfin dcache flushing functions are linked
+	  into L1 instruction memory.
+
 config APP_STACK_L1
 	bool "Support locating application stack in L1 Scratch Memory"
 	default y
+	depends on !SMP
 	help
 	  If enabled the application stack can be located in L1
 	  scratch memory (less latency).
@@ -856,7 +881,7 @@ config APP_STACK_L1
 config EXCEPTION_L1_SCRATCH
 	bool "Locate exception stack in L1 Scratch Memory"
 	default n
-	depends on !APP_STACK_L1
+	depends on !SMP && !APP_STACK_L1
 	help
 	  Whenever an exception occurs, use the L1 Scratch memory for
 	  stack storage.  You cannot place the stacks of FLAT binaries
@@ -868,6 +893,7 @@ comment "Speed Optimizations"
 config BFIN_INS_LOWOVERHEAD
 	bool "ins[bwl] low overhead, higher interrupt latency"
 	default y
+	depends on !SMP
 	help
 	  Reads on the Blackfin are speculative. In Blackfin terms, this means
 	  they can be interrupted at any time (even after they have been issued

+ 1 - 0
arch/blackfin/configs/BF518F-EZBRD_defconfig

@@ -115,6 +115,7 @@ CONFIG_DEBUG_DOUBLEFAULT=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
 CONFIG_EARLY_PRINTK=y
 CONFIG_CPLB_INFO=y
+CONFIG_BFIN_PSEUDODBG_INSNS=y
 CONFIG_CRYPTO=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRC_CCITT=m

+ 1 - 0
arch/blackfin/configs/BF526-EZBRD_defconfig

@@ -153,6 +153,7 @@ CONFIG_DEBUG_DOUBLEFAULT=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
 CONFIG_EARLY_PRINTK=y
 CONFIG_CPLB_INFO=y
+CONFIG_BFIN_PSEUDODBG_INSNS=y
 CONFIG_CRYPTO=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRC_CCITT=m

+ 1 - 0
arch/blackfin/configs/BF527-EZKIT-V2_defconfig

@@ -183,5 +183,6 @@ CONFIG_DEBUG_DOUBLEFAULT=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
 CONFIG_EARLY_PRINTK=y
 CONFIG_CPLB_INFO=y
+CONFIG_BFIN_PSEUDODBG_INSNS=y
 CONFIG_CRYPTO=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set

+ 1 - 0
arch/blackfin/configs/BF527-EZKIT_defconfig

@@ -175,5 +175,6 @@ CONFIG_DEBUG_DOUBLEFAULT=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
 CONFIG_EARLY_PRINTK=y
 CONFIG_CPLB_INFO=y
+CONFIG_BFIN_PSEUDODBG_INSNS=y
 CONFIG_CRYPTO=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set

+ 1 - 0
arch/blackfin/configs/BF533-EZKIT_defconfig

@@ -108,5 +108,6 @@ CONFIG_DEBUG_DOUBLEFAULT=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
 CONFIG_EARLY_PRINTK=y
 CONFIG_CPLB_INFO=y
+CONFIG_BFIN_PSEUDODBG_INSNS=y
 CONFIG_CRYPTO=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set

+ 1 - 0
arch/blackfin/configs/BF533-STAMP_defconfig

@@ -122,5 +122,6 @@ CONFIG_DEBUG_DOUBLEFAULT=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
 CONFIG_EARLY_PRINTK=y
 CONFIG_CPLB_INFO=y
+CONFIG_BFIN_PSEUDODBG_INSNS=y
 CONFIG_CRYPTO=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set

+ 1 - 0
arch/blackfin/configs/BF537-STAMP_defconfig

@@ -133,5 +133,6 @@ CONFIG_DEBUG_DOUBLEFAULT=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
 CONFIG_EARLY_PRINTK=y
 CONFIG_CPLB_INFO=y
+CONFIG_BFIN_PSEUDODBG_INSNS=y
 CONFIG_CRYPTO=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set

+ 1 - 0
arch/blackfin/configs/BF538-EZKIT_defconfig

@@ -131,5 +131,6 @@ CONFIG_DEBUG_DOUBLEFAULT=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
 CONFIG_EARLY_PRINTK=y
 CONFIG_CPLB_INFO=y
+CONFIG_BFIN_PSEUDODBG_INSNS=y
 CONFIG_CRYPTO=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set

+ 1 - 0
arch/blackfin/configs/BF548-EZKIT_defconfig

@@ -205,5 +205,6 @@ CONFIG_DEBUG_DOUBLEFAULT=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
 CONFIG_EARLY_PRINTK=y
 CONFIG_CPLB_INFO=y
+CONFIG_BFIN_PSEUDODBG_INSNS=y
 CONFIG_CRYPTO=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set

+ 1 - 0
arch/blackfin/configs/BF561-EZKIT-SMP_defconfig

@@ -109,5 +109,6 @@ CONFIG_DEBUG_DOUBLEFAULT=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
 CONFIG_EARLY_PRINTK=y
 CONFIG_CPLB_INFO=y
+CONFIG_BFIN_PSEUDODBG_INSNS=y
 CONFIG_CRYPTO=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set

+ 1 - 0
arch/blackfin/configs/BF561-EZKIT_defconfig

@@ -111,5 +111,6 @@ CONFIG_DEBUG_DOUBLEFAULT=y
 CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
 CONFIG_EARLY_PRINTK=y
 CONFIG_CPLB_INFO=y
+CONFIG_BFIN_PSEUDODBG_INSNS=y
 CONFIG_CRYPTO=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set

+ 14 - 2
arch/blackfin/include/asm/def_LPBlackfin.h

@@ -58,14 +58,26 @@
     ({ BUG(); 0; }); \
 })
 #define bfin_write(addr, val) \
-({ \
+do { \
 	switch (sizeof(*(addr))) { \
 	case 1: bfin_write8(addr, val);  break; \
 	case 2: bfin_write16(addr, val); break; \
 	case 4: bfin_write32(addr, val); break; \
 	default: BUG(); \
 	} \
-})
+} while (0)
+
+#define bfin_write_or(addr, bits) \
+do { \
+	void *__addr = (void *)(addr); \
+	bfin_write(__addr, bfin_read(__addr) | (bits)); \
+} while (0)
+
+#define bfin_write_and(addr, bits) \
+do { \
+	void *__addr = (void *)(addr); \
+	bfin_write(__addr, bfin_read(__addr) & (bits)); \
+} while (0)
 
 #endif /* __ASSEMBLY__ */
 

+ 3 - 0
arch/blackfin/include/asm/dpmc.h

@@ -125,6 +125,9 @@ void unset_dram_srfs(void);
 
 #define VRPAIR(vlev, freq) (((vlev) << 16) | ((freq) >> 16))
 
+#ifdef CONFIG_CPU_FREQ
+#define CPUFREQ_CPU 0
+#endif
 struct bfin_dpmc_platform_data {
 	const unsigned int *tuple_tab;
 	unsigned short tabsize;

+ 32 - 63
arch/blackfin/include/asm/ipipe.h

@@ -34,11 +34,12 @@
 #include <asm/bitops.h>
 #include <asm/atomic.h>
 #include <asm/traps.h>
+#include <asm/bitsperlong.h>
 
-#define IPIPE_ARCH_STRING     "1.12-00"
+#define IPIPE_ARCH_STRING     "1.16-01"
 #define IPIPE_MAJOR_NUMBER    1
-#define IPIPE_MINOR_NUMBER    12
-#define IPIPE_PATCH_NUMBER    0
+#define IPIPE_MINOR_NUMBER    16
+#define IPIPE_PATCH_NUMBER    1
 
 #ifdef CONFIG_SMP
 #error "I-pipe/blackfin: SMP not implemented"
@@ -55,25 +56,19 @@ do {						\
 #define task_hijacked(p)						\
 	({								\
 		int __x__ = __ipipe_root_domain_p;			\
-		__clear_bit(IPIPE_SYNC_FLAG, &ipipe_root_cpudom_var(status)); \
 		if (__x__)						\
-			hard_local_irq_enable();				\
+			hard_local_irq_enable();			\
 		!__x__;							\
 	})
 
 struct ipipe_domain;
 
 struct ipipe_sysinfo {
-
-	int ncpus;		/* Number of CPUs on board */
-	u64 cpufreq;		/* CPU frequency (in Hz) */
-
-	/* Arch-dependent block */
-
-	struct {
-		unsigned tmirq;	/* Timer tick IRQ */
-		u64 tmfreq;	/* Timer frequency */
-	} archdep;
+	int sys_nr_cpus;	/* Number of CPUs on board */
+	int sys_hrtimer_irq;	/* hrtimer device IRQ */
+	u64 sys_hrtimer_freq;	/* hrtimer device frequency */
+	u64 sys_hrclock_freq;	/* hrclock device frequency */
+	u64 sys_cpu_freq;	/* CPU frequency (Hz) */
 };
 
 #define ipipe_read_tsc(t)					\
@@ -115,9 +110,19 @@ void __ipipe_enable_irqdesc(struct ipipe_domain *ipd,
 void __ipipe_disable_irqdesc(struct ipipe_domain *ipd,
 			     unsigned irq);
 
-#define __ipipe_enable_irq(irq)		(irq_desc[irq].chip->unmask(irq))
+#define __ipipe_enable_irq(irq)						\
+	do {								\
+		struct irq_desc *desc = irq_to_desc(irq);		\
+		struct irq_chip *chip = get_irq_desc_chip(desc);	\
+		chip->irq_unmask(&desc->irq_data);			\
+	} while (0)
 
-#define __ipipe_disable_irq(irq)	(irq_desc[irq].chip->mask(irq))
+#define __ipipe_disable_irq(irq)					\
+	do {								\
+		struct irq_desc *desc = irq_to_desc(irq);		\
+		struct irq_chip *chip = get_irq_desc_chip(desc);	\
+		chip->irq_mask(&desc->irq_data);			\
+	} while (0)
 
 static inline int __ipipe_check_tickdev(const char *devname)
 {
@@ -128,12 +133,11 @@ void __ipipe_enable_pipeline(void);
 
 #define __ipipe_hook_critical_ipi(ipd) do { } while (0)
 
-#define __ipipe_sync_pipeline  ___ipipe_sync_pipeline
-void ___ipipe_sync_pipeline(unsigned long syncmask);
+void ___ipipe_sync_pipeline(void);
 
 void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs);
 
-int __ipipe_get_irq_priority(unsigned irq);
+int __ipipe_get_irq_priority(unsigned int irq);
 
 void __ipipe_serial_debug(const char *fmt, ...);
 
@@ -152,7 +156,10 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul)
 	return ffs(ul) - 1;
 }
 
-#define __ipipe_run_irqtail()  /* Must be a macro */			\
+#define __ipipe_do_root_xirq(ipd, irq)					\
+	((ipd)->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)))
+
+#define __ipipe_run_irqtail(irq)  /* Must be a macro */			\
 	do {								\
 		unsigned long __pending;				\
 		CSYNC();						\
@@ -164,42 +171,8 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul)
 		}							\
 	} while (0)
 
-#define __ipipe_run_isr(ipd, irq)					\
-	do {								\
-		if (!__ipipe_pipeline_head_p(ipd))			\
-			hard_local_irq_enable();				\
-		if (ipd == ipipe_root_domain) {				\
-			if (unlikely(ipipe_virtual_irq_p(irq))) {	\
-				irq_enter();				\
-				ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
-				irq_exit();				\
-			} else 						\
-				ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \
-		} else {						\
-			__clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
-			ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
-			/* Attempt to exit the outer interrupt level before \
-			 * starting the deferred IRQ processing. */	\
-			__ipipe_run_irqtail();				\
-			__set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
-		}							\
-		hard_local_irq_disable();					\
-	} while (0)
-
 #define __ipipe_syscall_watched_p(p, sc)	\
-	(((p)->flags & PF_EVNOTIFY) || (unsigned long)sc >= NR_syscalls)
-
-void ipipe_init_irq_threads(void);
-
-int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
-
-#ifdef CONFIG_TICKSOURCE_CORETMR
-#define IRQ_SYSTMR		IRQ_CORETMR
-#define IRQ_PRIOTMR		IRQ_CORETMR
-#else
-#define IRQ_SYSTMR		IRQ_TIMER0
-#define IRQ_PRIOTMR		CONFIG_IRQ_TIMER0
-#endif
+	(ipipe_notifier_enabled_p(p) || (unsigned long)sc >= NR_syscalls)
 
 #ifdef CONFIG_BF561
 #define bfin_write_TIMER_DISABLE(val)	bfin_write_TMRS8_DISABLE(val)
@@ -219,11 +192,11 @@ int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
 
 #define task_hijacked(p)		0
 #define ipipe_trap_notify(t, r)  	0
+#define __ipipe_root_tick_p(regs)	1
 
-#define ipipe_init_irq_threads()		do { } while (0)
-#define ipipe_start_irq_thread(irq, desc)	0
+#endif /* !CONFIG_IPIPE */
 
-#ifndef CONFIG_TICKSOURCE_GPTMR0
+#ifdef CONFIG_TICKSOURCE_CORETMR
 #define IRQ_SYSTMR		IRQ_CORETMR
 #define IRQ_PRIOTMR		IRQ_CORETMR
 #else
@@ -231,10 +204,6 @@ int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
 #define IRQ_PRIOTMR		CONFIG_IRQ_TIMER0
 #endif
 
-#define __ipipe_root_tick_p(regs)	1
-
-#endif /* !CONFIG_IPIPE */
-
 #define ipipe_update_tick_evtdev(evtdev)	do { } while (0)
 
 #endif	/* !__ASM_BLACKFIN_IPIPE_H */

+ 9 - 2
arch/blackfin/include/asm/ipipe_base.h

@@ -24,8 +24,10 @@
 
 #ifdef CONFIG_IPIPE
 
+#include <asm/bitsperlong.h>
+#include <mach/irq.h>
+
 #define IPIPE_NR_XIRQS		NR_IRQS
-#define IPIPE_IRQ_ISHIFT	5	/* 2^5 for 32bits arch. */
 
 /* Blackfin-specific, per-cpu pipeline status */
 #define IPIPE_SYNCDEFER_FLAG	15
@@ -42,11 +44,14 @@
 #define IPIPE_EVENT_INIT	(IPIPE_FIRST_EVENT + 4)
 #define IPIPE_EVENT_EXIT	(IPIPE_FIRST_EVENT + 5)
 #define IPIPE_EVENT_CLEANUP	(IPIPE_FIRST_EVENT + 6)
-#define IPIPE_LAST_EVENT	IPIPE_EVENT_CLEANUP
+#define IPIPE_EVENT_RETURN	(IPIPE_FIRST_EVENT + 7)
+#define IPIPE_LAST_EVENT	IPIPE_EVENT_RETURN
 #define IPIPE_NR_EVENTS		(IPIPE_LAST_EVENT + 1)
 
 #define IPIPE_TIMER_IRQ		IRQ_CORETMR
 
+#define __IPIPE_FEATURE_SYSINFO_V2	1
+
 #ifndef __ASSEMBLY__
 
 extern unsigned long __ipipe_root_status; /* Alias to ipipe_root_cpudom_var(status) */
@@ -63,6 +68,8 @@ void __ipipe_unlock_root(void);
 
 #endif /* !__ASSEMBLY__ */
 
+#define __IPIPE_FEATURE_SYSINFO_V2	1
+
 #endif /* CONFIG_IPIPE */
 
 #endif /* !__ASM_BLACKFIN_IPIPE_BASE_H */

+ 77 - 10
arch/blackfin/include/asm/irqflags.h

@@ -89,15 +89,33 @@ static inline void __hard_local_irq_restore(unsigned long flags)
 #ifdef CONFIG_IPIPE
 
 #include <linux/compiler.h>
-#include <linux/ipipe_base.h>
 #include <linux/ipipe_trace.h>
+/*
+ * Way too many inter-deps between low-level headers in this port, so
+ * we redeclare the required bits we cannot pick from
+ * <asm/ipipe_base.h> to prevent circular dependencies.
+ */
+void __ipipe_stall_root(void);
+void __ipipe_unstall_root(void);
+unsigned long __ipipe_test_root(void);
+unsigned long __ipipe_test_and_stall_root(void);
+void __ipipe_restore_root(unsigned long flags);
+
+#ifdef CONFIG_IPIPE_DEBUG_CONTEXT
+struct ipipe_domain;
+extern struct ipipe_domain ipipe_root;
+void ipipe_check_context(struct ipipe_domain *ipd);
+#define __check_irqop_context(ipd)  ipipe_check_context(&ipipe_root)
+#else /* !CONFIG_IPIPE_DEBUG_CONTEXT */
+#define __check_irqop_context(ipd)  do { } while (0)
+#endif /* !CONFIG_IPIPE_DEBUG_CONTEXT */
 
 /*
  * Interrupt pipe interface to linux/irqflags.h.
  */
 static inline void arch_local_irq_disable(void)
 {
-	ipipe_check_context(ipipe_root_domain);
+	__check_irqop_context();
 	__ipipe_stall_root();
 	barrier();
 }
@@ -105,7 +123,7 @@ static inline void arch_local_irq_disable(void)
 static inline void arch_local_irq_enable(void)
 {
 	barrier();
-	ipipe_check_context(ipipe_root_domain);
+	__check_irqop_context();
 	__ipipe_unstall_root();
 }
 
@@ -119,16 +137,21 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
 	return flags == bfin_no_irqs;
 }
 
-static inline void arch_local_irq_save_ptr(unsigned long *_flags)
+static inline unsigned long arch_local_irq_save(void)
 {
-	x = __ipipe_test_and_stall_root() ? bfin_no_irqs : bfin_irq_flags;
+	unsigned long flags;
+
+	__check_irqop_context();
+	flags = __ipipe_test_and_stall_root() ? bfin_no_irqs : bfin_irq_flags;
 	barrier();
+
+	return flags;
 }
 
-static inline unsigned long arch_local_irq_save(void)
+static inline void arch_local_irq_restore(unsigned long flags)
 {
-	ipipe_check_context(ipipe_root_domain);
-	return __hard_local_irq_save();
+	__check_irqop_context();
+	__ipipe_restore_root(flags == bfin_no_irqs);
 }
 
 static inline unsigned long arch_mangle_irq_bits(int virt, unsigned long real)
@@ -192,7 +215,10 @@ static inline void hard_local_irq_restore(unsigned long flags)
 # define hard_local_irq_restore(flags)	__hard_local_irq_restore(flags)
 #endif /* !CONFIG_IPIPE_TRACE_IRQSOFF */
 
-#else /* CONFIG_IPIPE */
+#define hard_local_irq_save_cond()		hard_local_irq_save()
+#define hard_local_irq_restore_cond(flags)	hard_local_irq_restore(flags)
+
+#else /* !CONFIG_IPIPE */
 
 /*
  * Direct interface to linux/irqflags.h.
@@ -212,7 +238,48 @@ static inline void hard_local_irq_restore(unsigned long flags)
 #define hard_local_irq_restore(flags)	__hard_local_irq_restore(flags)
 #define hard_local_irq_enable()		__hard_local_irq_enable()
 #define hard_local_irq_disable()	__hard_local_irq_disable()
-
+#define hard_local_irq_save_cond()		hard_local_save_flags()
+#define hard_local_irq_restore_cond(flags)	do { (void)(flags); } while (0)
 
 #endif /* !CONFIG_IPIPE */
+
+#ifdef CONFIG_SMP
+#define hard_local_irq_save_smp()		hard_local_irq_save()
+#define hard_local_irq_restore_smp(flags)	hard_local_irq_restore(flags)
+#else
+#define hard_local_irq_save_smp()		hard_local_save_flags()
+#define hard_local_irq_restore_smp(flags)	do { (void)(flags); } while (0)
+#endif
+
+/*
+ * Remap the arch-neutral IRQ state manipulation macros to the
+ * blackfin-specific hard_local_irq_* API.
+ */
+#define local_irq_save_hw(flags)			\
+	do {						\
+		(flags) = hard_local_irq_save();	\
+	} while (0)
+#define local_irq_restore_hw(flags)		\
+	do {					\
+		hard_local_irq_restore(flags);	\
+	} while (0)
+#define local_irq_disable_hw()			\
+	do {					\
+		hard_local_irq_disable();	\
+	} while (0)
+#define local_irq_enable_hw()			\
+	do {					\
+		hard_local_irq_enable();	\
+	} while (0)
+#define local_irq_save_hw_notrace(flags)		\
+	do {						\
+		(flags) = __hard_local_irq_save();	\
+	} while (0)
+#define local_irq_restore_hw_notrace(flags)		\
+	do {						\
+		__hard_local_irq_restore(flags);	\
+	} while (0)
+
+#define irqs_disabled_hw()	hard_irqs_disabled()
+
 #endif

+ 7 - 2
arch/blackfin/include/asm/smp.h

@@ -17,7 +17,12 @@
 
 #define raw_smp_processor_id()  blackfin_core_id()
 
-extern char coreb_trampoline_start, coreb_trampoline_end;
+extern void bfin_relocate_coreb_l1_mem(void);
+
+#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
+asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr);
+extern unsigned long blackfin_iflush_l1_entry[NR_CPUS];
+#endif
 
 struct corelock_slot {
 	int lock;
@@ -34,7 +39,7 @@ extern unsigned long dcache_invld_count[NR_CPUS];
 void smp_icache_flush_range_others(unsigned long start,
 				   unsigned long end);
 #ifdef CONFIG_HOTPLUG_CPU
-void coreb_sleep(u32 sic_iwr0, u32 sic_iwr1, u32 sic_iwr2);
+void coreb_die(void);
 void cpu_die(void);
 void platform_cpu_die(void);
 int __cpu_disable(void);

+ 4 - 1
arch/blackfin/include/asm/unistd.h

@@ -393,8 +393,11 @@
 #define __NR_fanotify_mark	372
 #define __NR_prlimit64		373
 #define __NR_cacheflush		374
+#define __NR_name_to_handle_at	375
+#define __NR_open_by_handle_at	376
+#define __NR_clock_adjtime	377
 
-#define __NR_syscall		375
+#define __NR_syscall		378
 #define NR_syscalls		__NR_syscall
 
 /* Old optional stuff no one actually uses */

+ 19 - 13
arch/blackfin/kernel/bfin_dma_5xx.c

@@ -84,6 +84,24 @@ static int __init proc_dma_init(void)
 late_initcall(proc_dma_init);
 #endif
 
+static void set_dma_peripheral_map(unsigned int channel, const char *device_id)
+{
+#ifdef CONFIG_BF54x
+	unsigned int per_map;
+
+	switch (channel) {
+		case CH_UART2_RX: per_map = 0xC << 12; break;
+		case CH_UART2_TX: per_map = 0xD << 12; break;
+		case CH_UART3_RX: per_map = 0xE << 12; break;
+		case CH_UART3_TX: per_map = 0xF << 12; break;
+		default:          return;
+	}
+
+	if (strncmp(device_id, "BFIN_UART", 9) == 0)
+		dma_ch[channel].regs->peripheral_map = per_map;
+#endif
+}
+
 /**
  *	request_dma - request a DMA channel
  *
@@ -111,19 +129,7 @@ int request_dma(unsigned int channel, const char *device_id)
 		return -EBUSY;
 	}
 
-#ifdef CONFIG_BF54x
-	if (channel >= CH_UART2_RX && channel <= CH_UART3_TX) {
-		unsigned int per_map;
-		per_map = dma_ch[channel].regs->peripheral_map & 0xFFF;
-		if (strncmp(device_id, "BFIN_UART", 9) == 0)
-			dma_ch[channel].regs->peripheral_map = per_map |
-				((channel - CH_UART2_RX + 0xC)<<12);
-		else
-			dma_ch[channel].regs->peripheral_map = per_map |
-				((channel - CH_UART2_RX + 0x6)<<12);
-	}
-#endif
-
+	set_dma_peripheral_map(channel, device_id);
 	dma_ch[channel].device_id = device_id;
 	dma_ch[channel].irq = 0;
 

+ 39 - 45
arch/blackfin/kernel/ipipe.c

@@ -154,7 +154,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
 	 * pending for it.
 	 */
 	if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
-	    ipipe_head_cpudom_var(irqpend_himask) == 0)
+	    !__ipipe_ipending_p(ipipe_head_cpudom_ptr()))
 		goto out;
 
 	__ipipe_walk_pipeline(head);
@@ -185,25 +185,21 @@ void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
 }
 EXPORT_SYMBOL(__ipipe_disable_irqdesc);
 
-int __ipipe_syscall_root(struct pt_regs *regs)
+asmlinkage int __ipipe_syscall_root(struct pt_regs *regs)
 {
 	struct ipipe_percpu_domain_data *p;
-	unsigned long flags;
+	void (*hook)(void);
 	int ret;
 
+	WARN_ON_ONCE(irqs_disabled_hw());
+
 	/*
-	 * We need to run the IRQ tail hook whenever we don't
-	 * propagate a syscall to higher domains, because we know that
-	 * important operations might be pending there (e.g. Xenomai
-	 * deferred rescheduling).
+	 * We need to run the IRQ tail hook each time we intercept a
+	 * syscall, because we know that important operations might be
+	 * pending there (e.g. Xenomai deferred rescheduling).
 	 */
-
-	if (regs->orig_p0 < NR_syscalls) {
-		void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
-		hook();
-		if ((current->flags & PF_EVNOTIFY) == 0)
-			return 0;
-	}
+	hook = (__typeof__(hook))__ipipe_irq_tail_hook;
+	hook();
 
 	/*
 	 * This routine either returns:
@@ -214,51 +210,47 @@ int __ipipe_syscall_root(struct pt_regs *regs)
 	 * tail work has to be performed (for handling signals etc).
 	 */
 
-	if (!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
+	if (!__ipipe_syscall_watched_p(current, regs->orig_p0) ||
+	    !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
 		return 0;
 
 	ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);
 
-	flags = hard_local_irq_save();
+	hard_local_irq_disable();
 
-	if (!__ipipe_root_domain_p) {
-		hard_local_irq_restore(flags);
-		return 1;
+	/*
+	 * This is the end of the syscall path, so we may
+	 * safely assume a valid Linux task stack here.
+	 */
+	if (current->ipipe_flags & PF_EVTRET) {
+		current->ipipe_flags &= ~PF_EVTRET;
+		__ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
 	}
 
-	p = ipipe_root_cpudom_ptr();
-	if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0)
-		__ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
+	if (!__ipipe_root_domain_p)
+		ret = -1;
+	else {
+		p = ipipe_root_cpudom_ptr();
+		if (__ipipe_ipending_p(p))
+			__ipipe_sync_pipeline();
+	}
 
-	hard_local_irq_restore(flags);
+	hard_local_irq_enable();
 
 	return -ret;
 }
 
-unsigned long ipipe_critical_enter(void (*syncfn) (void))
-{
-	unsigned long flags;
-
-	flags = hard_local_irq_save();
-
-	return flags;
-}
-
-void ipipe_critical_exit(unsigned long flags)
-{
-	hard_local_irq_restore(flags);
-}
-
 static void __ipipe_no_irqtail(void)
 {
 }
 
 int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
 {
-	info->ncpus = num_online_cpus();
-	info->cpufreq = ipipe_cpu_freq();
-	info->archdep.tmirq = IPIPE_TIMER_IRQ;
-	info->archdep.tmfreq = info->cpufreq;
+	info->sys_nr_cpus = num_online_cpus();
+	info->sys_cpu_freq = ipipe_cpu_freq();
+	info->sys_hrtimer_irq = IPIPE_TIMER_IRQ;
+	info->sys_hrtimer_freq = __ipipe_core_clock;
+	info->sys_hrclock_freq = __ipipe_core_clock;
 
 	return 0;
 }
@@ -289,6 +281,7 @@ int ipipe_trigger_irq(unsigned irq)
 asmlinkage void __ipipe_sync_root(void)
 {
 	void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
+	struct ipipe_percpu_domain_data *p;
 	unsigned long flags;
 
 	BUG_ON(irqs_disabled());
@@ -300,19 +293,20 @@ asmlinkage void __ipipe_sync_root(void)
 
 	clear_thread_flag(TIF_IRQ_SYNC);
 
-	if (ipipe_root_cpudom_var(irqpend_himask) != 0)
-		__ipipe_sync_pipeline(IPIPE_IRQMASK_ANY);
+	p = ipipe_root_cpudom_ptr();
+	if (__ipipe_ipending_p(p))
+		__ipipe_sync_pipeline();
 
 	hard_local_irq_restore(flags);
 }
 
-void ___ipipe_sync_pipeline(unsigned long syncmask)
+void ___ipipe_sync_pipeline(void)
 {
 	if (__ipipe_root_domain_p &&
 	    test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
 		return;
 
-	__ipipe_sync_stage(syncmask);
+	__ipipe_sync_stage();
 }
 
 void __ipipe_disable_root_irqs_hw(void)

+ 6 - 4
arch/blackfin/kernel/irqchip.c

@@ -39,21 +39,23 @@ int show_interrupts(struct seq_file *p, void *v)
 	unsigned long flags;
 
 	if (i < NR_IRQS) {
-		raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
-		action = irq_desc[i].action;
+		struct irq_desc *desc = irq_to_desc(i);
+
+		raw_spin_lock_irqsave(&desc->lock, flags);
+		action = desc->action;
 		if (!action)
 			goto skip;
 		seq_printf(p, "%3d: ", i);
 		for_each_online_cpu(j)
 			seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
-		seq_printf(p, " %8s", irq_desc[i].chip->name);
+		seq_printf(p, " %8s", get_irq_desc_chip(desc)->name);
 		seq_printf(p, "  %s", action->name);
 		for (action = action->next; action; action = action->next)
 			seq_printf(p, "  %s", action->name);
 
 		seq_putc(p, '\n');
  skip:
-		raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+		raw_spin_unlock_irqrestore(&desc->lock, flags);
 	} else if (i == NR_IRQS) {
 		seq_printf(p, "NMI: ");
 		for_each_online_cpu(j)

+ 0 - 4
arch/blackfin/kernel/kgdb.c

@@ -422,11 +422,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
 
 struct kgdb_arch arch_kgdb_ops = {
 	.gdb_bpt_instr = {0xa1},
-#ifdef CONFIG_SMP
-	.flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
-#else
 	.flags = KGDB_HW_BREAKPOINT,
-#endif
 	.set_hw_breakpoint = bfin_set_hw_break,
 	.remove_hw_breakpoint = bfin_remove_hw_break,
 	.disable_hw_break = bfin_disable_hw_debug,

+ 37 - 0
arch/blackfin/kernel/setup.c

@@ -215,11 +215,48 @@ void __init bfin_relocate_l1_mem(void)
 
 	early_dma_memcpy_done();
 
+#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
+	blackfin_iflush_l1_entry[0] = (unsigned long)blackfin_icache_flush_range_l1;
+#endif
+
 	/* if necessary, copy L2 text/data to L2 SRAM */
 	if (L2_LENGTH && l2_len)
 		memcpy(_stext_l2, _l2_lma, l2_len);
 }
 
+#ifdef CONFIG_SMP
+void __init bfin_relocate_coreb_l1_mem(void)
+{
+	unsigned long text_l1_len = (unsigned long)_text_l1_len;
+	unsigned long data_l1_len = (unsigned long)_data_l1_len;
+	unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
+
+	blackfin_dma_early_init();
+
+	/* if necessary, copy L1 text to L1 instruction SRAM */
+	if (L1_CODE_LENGTH && text_l1_len)
+		early_dma_memcpy((void *)COREB_L1_CODE_START, _text_l1_lma,
+				text_l1_len);
+
+	/* if necessary, copy L1 data to L1 data bank A SRAM */
+	if (L1_DATA_A_LENGTH && data_l1_len)
+		early_dma_memcpy((void *)COREB_L1_DATA_A_START, _data_l1_lma,
+				data_l1_len);
+
+	/* if necessary, copy L1 data B to L1 data bank B SRAM */
+	if (L1_DATA_B_LENGTH && data_b_l1_len)
+		early_dma_memcpy((void *)COREB_L1_DATA_B_START, _data_b_l1_lma,
+				data_b_l1_len);
+
+	early_dma_memcpy_done();
+
+#ifdef CONFIG_ICACHE_FLUSH_L1
+	blackfin_iflush_l1_entry[1] = (unsigned long)blackfin_icache_flush_range_l1 -
+			(unsigned long)_stext_l1 + COREB_L1_CODE_START;
+#endif
+}
+#endif
+
 #ifdef CONFIG_ROMKERNEL
 void __init bfin_relocate_xip_data(void)
 {

+ 2 - 1
arch/blackfin/kernel/vmlinux.lds.S

@@ -136,7 +136,7 @@ SECTIONS
 
 	. = ALIGN(16);
 	INIT_DATA_SECTION(16)
-	PERCPU(32, 4)
+	PERCPU(32, PAGE_SIZE)
 
 	.exit.data :
 	{
@@ -176,6 +176,7 @@ SECTIONS
 	{
 		. = ALIGN(4);
 		__stext_l1 = .;
+		*(.l1.text.head)
 		*(.l1.text)
 #ifdef CONFIG_SCHEDULE_L1
 		SCHED_TEXT

+ 0 - 19
arch/blackfin/mach-bf518/include/mach/defBF512.h

@@ -1201,25 +1201,6 @@
 #define	PGTE_PPI		0x0000			/* 		Enable PPI D15:13			*/
 #define	PGTE_SPORT		0x0800			/* 		Enable DT1PRI/TFS1/TSCLK1	*/
 
-
-/*  ******************  HANDSHAKE DMA (HDMA) MASKS  *********************/
-/* HDMAx_CTL Masks														*/
-#define	HMDMAEN		0x0001	/* Enable Handshake DMA 0/1					*/
-#define	REP			0x0002	/* HDMA Request Polarity					*/
-#define	UTE			0x0004	/* Urgency Threshold Enable					*/
-#define	OIE			0x0010	/* Overflow Interrupt Enable				*/
-#define	BDIE		0x0020	/* Block Done Interrupt Enable				*/
-#define	MBDI		0x0040	/* Mask Block Done IRQ If Pending ECNT		*/
-#define	DRQ			0x0300	/* HDMA Request Type						*/
-#define	DRQ_NONE	0x0000	/* 		No Request							*/
-#define	DRQ_SINGLE	0x0100	/* 		Channels Request Single				*/
-#define	DRQ_MULTI	0x0200	/* 		Channels Request Multi (Default)	*/
-#define	DRQ_URGENT	0x0300	/* 		Channels Request Multi Urgent		*/
-#define	RBC			0x1000	/* Reload BCNT With IBCNT					*/
-#define	PS			0x2000	/* HDMA Pin Status							*/
-#define	OI			0x4000	/* Overflow Interrupt Generated				*/
-#define	BDI			0x8000	/* Block Done Interrupt Generated			*/
-
 /* entry addresses of the user-callable Boot ROM functions */
 
 #define _BOOTROM_RESET 0xEF000000

+ 0 - 19
arch/blackfin/mach-bf527/include/mach/defBF522.h

@@ -1204,25 +1204,6 @@
 #define	PGTE_PPI		0x0000			/* 		Enable PPI D15:13			*/
 #define	PGTE_SPORT		0x0800			/* 		Enable DT1PRI/TFS1/TSCLK1	*/
 
-
-/*  ******************  HANDSHAKE DMA (HDMA) MASKS  *********************/
-/* HDMAx_CTL Masks														*/
-#define	HMDMAEN		0x0001	/* Enable Handshake DMA 0/1					*/
-#define	REP			0x0002	/* HDMA Request Polarity					*/
-#define	UTE			0x0004	/* Urgency Threshold Enable					*/
-#define	OIE			0x0010	/* Overflow Interrupt Enable				*/
-#define	BDIE		0x0020	/* Block Done Interrupt Enable				*/
-#define	MBDI		0x0040	/* Mask Block Done IRQ If Pending ECNT		*/
-#define	DRQ			0x0300	/* HDMA Request Type						*/
-#define	DRQ_NONE	0x0000	/* 		No Request							*/
-#define	DRQ_SINGLE	0x0100	/* 		Channels Request Single				*/
-#define	DRQ_MULTI	0x0200	/* 		Channels Request Multi (Default)	*/
-#define	DRQ_URGENT	0x0300	/* 		Channels Request Multi Urgent		*/
-#define	RBC			0x1000	/* Reload BCNT With IBCNT					*/
-#define	PS			0x2000	/* HDMA Pin Status							*/
-#define	OI			0x4000	/* Overflow Interrupt Generated				*/
-#define	BDI			0x8000	/* Block Done Interrupt Generated			*/
-
 /* entry addresses of the user-callable Boot ROM functions */
 
 #define _BOOTROM_RESET 0xEF000000

+ 0 - 2
arch/blackfin/mach-bf533/boards/ip0x.c

@@ -289,8 +289,6 @@ static struct platform_device *ip0x_devices[] __initdata = {
 
 static int __init ip0x_init(void)
 {
-	int i;
-
 	printk(KERN_INFO "%s(): registering device resources\n", __func__);
 	platform_add_devices(ip0x_devices, ARRAY_SIZE(ip0x_devices));
 

+ 1 - 1
arch/blackfin/mach-bf537/boards/cm_bf537e.c

@@ -775,7 +775,7 @@ static int __init cm_bf537e_init(void)
 #endif
 
 #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
-	irq_desc[PATA_INT].status |= IRQ_NOAUTOEN;
+	irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
 #endif
 	return 0;
 }

+ 1 - 1
arch/blackfin/mach-bf537/boards/cm_bf537u.c

@@ -740,7 +740,7 @@ static int __init cm_bf537u_init(void)
 #endif
 
 #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
-	irq_desc[PATA_INT].status |= IRQ_NOAUTOEN;
+	irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
 #endif
 	return 0;
 }

+ 0 - 20
arch/blackfin/mach-bf537/boards/dnp5370.c

@@ -128,30 +128,11 @@ static struct platform_device asmb_flash_device = {
 
 #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
 
-#define MMC_SPI_CARD_DETECT_INT IRQ_PF5
-
-static int bfin_mmc_spi_init(struct device *dev,
-	irqreturn_t (*detect_int)(int, void *), void *data)
-{
-	return request_irq(MMC_SPI_CARD_DETECT_INT, detect_int,
-		IRQF_TRIGGER_FALLING, "mmc-spi-detect", data);
-}
-
-static void bfin_mmc_spi_exit(struct device *dev, void *data)
-{
-	free_irq(MMC_SPI_CARD_DETECT_INT, data);
-}
-
 static struct bfin5xx_spi_chip mmc_spi_chip_info = {
 	.enable_dma    = 0,	 /* use no dma transfer with this chip*/
 	.bits_per_word = 8,
 };
 
-static struct mmc_spi_platform_data bfin_mmc_spi_pdata = {
-	.init = bfin_mmc_spi_init,
-	.exit = bfin_mmc_spi_exit,
-	.detect_delay = 100, /* msecs */
-};
 #endif
 
 #if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE)
@@ -192,7 +173,6 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
 		.max_speed_hz    = 20000000,
 		.bus_num	 = 0,
 		.chip_select     = 1,
-		.platform_data   = &bfin_mmc_spi_pdata,
 		.controller_data = &mmc_spi_chip_info,
 		.mode	         = SPI_MODE_3,
 	},

+ 1 - 1
arch/blackfin/mach-bf537/boards/tcm_bf537.c

@@ -742,7 +742,7 @@ static int __init tcm_bf537_init(void)
 #endif
 
 #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
-	irq_desc[PATA_INT].status |= IRQ_NOAUTOEN;
+	irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
 #endif
 	return 0;
 }

+ 0 - 18
arch/blackfin/mach-bf537/include/mach/defBF534.h

@@ -1520,24 +1520,6 @@
 #define	PGTE_PPI		0x0000	/*              Enable PPI D15:13                       */
 #define	PGTE_SPORT		0x0800	/*              Enable DT1PRI/TFS1/TSCLK1       */
 
-/*  ******************  HANDSHAKE DMA (HDMA) MASKS  *********************/
-/* HDMAx_CTL Masks														*/
-#define	HMDMAEN		0x0001	/* Enable Handshake DMA 0/1                                     */
-#define	REP			0x0002	/* HDMA Request Polarity                                        */
-#define	UTE			0x0004	/* Urgency Threshold Enable                                     */
-#define	OIE			0x0010	/* Overflow Interrupt Enable                            */
-#define	BDIE		0x0020	/* Block Done Interrupt Enable                          */
-#define	MBDI		0x0040	/* Mask Block Done IRQ If Pending ECNT          */
-#define	DRQ			0x0300	/* HDMA Request Type                                            */
-#define	DRQ_NONE	0x0000	/*              No Request                                                      */
-#define	DRQ_SINGLE	0x0100	/*              Channels Request Single                         */
-#define	DRQ_MULTI	0x0200	/*              Channels Request Multi (Default)        */
-#define	DRQ_URGENT	0x0300	/*              Channels Request Multi Urgent           */
-#define	RBC			0x1000	/* Reload BCNT With IBCNT                                       */
-#define	PS			0x2000	/* HDMA Pin Status                                                      */
-#define	OI			0x4000	/* Overflow Interrupt Generated                         */
-#define	BDI			0x8000	/* Block Done Interrupt Generated                       */
-
 /* entry addresses of the user-callable Boot ROM functions */
 
 #define _BOOTROM_RESET 0xEF000000 

+ 59 - 0
arch/blackfin/mach-bf548/Kconfig

@@ -42,6 +42,65 @@ config BF548_ATAPI_ALTERNATIVE_PORT
 	  async address or GPIO port F and G. Select y to route it
 	  to GPIO.
 
+choice
+	prompt "UART2 DMA channel selection"
+	depends on SERIAL_BFIN_UART2
+	default UART2_DMA_RX_ON_DMA18
+	help
+		UART2 DMA channel selection
+		RX -> DMA18
+		TX -> DMA19
+		or
+		RX -> DMA13
+		TX -> DMA14
+
+config UART2_DMA_RX_ON_DMA18
+	bool "UART2 DMA RX -> DMA18 TX -> DMA19"
+	help
+		UART2 DMA channel assignment
+		RX -> DMA18
+		TX -> DMA19
+		use SPORT2 default DMA channel
+
+config UART2_DMA_RX_ON_DMA13
+	bool "UART2 DMA RX -> DMA13 TX -> DMA14"
+	help
+		UART2 DMA channel assignment
+		RX -> DMA13
+		TX -> DMA14
+		use EPPI1 EPPI2 default DMA channel
+endchoice
+
+choice
+	prompt "UART3 DMA channel selection"
+	depends on SERIAL_BFIN_UART3
+	default UART3_DMA_RX_ON_DMA20
+	help
+		UART3 DMA channel selection
+		RX -> DMA20
+		TX -> DMA21
+		or
+		RX -> DMA15
+		TX -> DMA16
+
+config UART3_DMA_RX_ON_DMA20
+	bool "UART3 DMA RX -> DMA20 TX -> DMA21"
+	help
+		UART3 DMA channel assignment
+		RX -> DMA20
+		TX -> DMA21
+		use SPORT3 default DMA channel
+
+config UART3_DMA_RX_ON_DMA15
+	bool "UART3 DMA RX -> DMA15 TX -> DMA16"
+	help
+		UART3 DMA channel assignment
+		RX -> DMA15
+		TX -> DMA16
+		use PIXC default DMA channel
+
+endchoice
+
 comment "Interrupt Priority Assignment"
 menu "Priority"
 

+ 48 - 7
arch/blackfin/mach-bf548/boards/ezkit.c

@@ -778,11 +778,12 @@ static struct platform_device bfin_sport3_uart_device = {
 #endif
 
 #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
-static unsigned short bfin_can_peripherals[] = {
+
+static unsigned short bfin_can0_peripherals[] = {
 	P_CAN0_RX, P_CAN0_TX, 0
 };
 
-static struct resource bfin_can_resources[] = {
+static struct resource bfin_can0_resources[] = {
 	{
 		.start = 0xFFC02A00,
 		.end = 0xFFC02FFF,
@@ -805,14 +806,53 @@ static struct resource bfin_can_resources[] = {
 	},
 };
 
-static struct platform_device bfin_can_device = {
+static struct platform_device bfin_can0_device = {
 	.name = "bfin_can",
-	.num_resources = ARRAY_SIZE(bfin_can_resources),
-	.resource = bfin_can_resources,
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_can0_resources),
+	.resource = bfin_can0_resources,
 	.dev = {
-		.platform_data = &bfin_can_peripherals, /* Passed to driver */
+		.platform_data = &bfin_can0_peripherals, /* Passed to driver */
 	},
 };
+
+static unsigned short bfin_can1_peripherals[] = {
+	P_CAN1_RX, P_CAN1_TX, 0
+};
+
+static struct resource bfin_can1_resources[] = {
+	{
+		.start = 0xFFC03200,
+		.end = 0xFFC037FF,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_CAN1_RX,
+		.end = IRQ_CAN1_RX,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_CAN1_TX,
+		.end = IRQ_CAN1_TX,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_CAN1_ERROR,
+		.end = IRQ_CAN1_ERROR,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device bfin_can1_device = {
+	.name = "bfin_can",
+	.id = 1,
+	.num_resources = ARRAY_SIZE(bfin_can1_resources),
+	.resource = bfin_can1_resources,
+	.dev = {
+		.platform_data = &bfin_can1_peripherals, /* Passed to driver */
+	},
+};
+
 #endif
 
 #if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE)
@@ -1366,7 +1406,8 @@ static struct platform_device *ezkit_devices[] __initdata = {
 #endif
 
 #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
-	&bfin_can_device,
+	&bfin_can0_device,
+	&bfin_can1_device,
 #endif
 
 #if defined(CONFIG_PATA_BF54X) || defined(CONFIG_PATA_BF54X_MODULE)

+ 0 - 18
arch/blackfin/mach-bf548/include/mach/defBF544.h

@@ -657,22 +657,4 @@
 
 /* Bit masks for EPPI0 are obtained from common base header for EPPIx (EPPI1 and EPPI2) */
 
-/* Bit masks for HMDMAx_CONTROL */
-
-#define                   HMDMAEN  0x1        /* Handshake MDMA Enable */
-#define                       REP  0x2        /* Handshake MDMA Request Polarity */
-#define                       UTE  0x8        /* Urgency Threshold Enable */
-#define                       OIE  0x10       /* Overflow Interrupt Enable */
-#define                      BDIE  0x20       /* Block Done Interrupt Enable */
-#define                      MBDI  0x40       /* Mask Block Done Interrupt */
-#define                       DRQ  0x300      /* Handshake MDMA Request Type */
-#define                       RBC  0x1000     /* Force Reload of BCOUNT */
-#define                        PS  0x2000     /* Pin Status */
-#define                        OI  0x4000     /* Overflow Interrupt Generated */
-#define                       BDI  0x8000     /* Block Done Interrupt Generated */
-
-/* ******************************************* */
-/*     MULTI BIT MACRO ENUMERATIONS            */
-/* ******************************************* */
-
 #endif /* _DEF_BF544_H */

+ 0 - 19
arch/blackfin/mach-bf548/include/mach/defBF547.h

@@ -1063,23 +1063,4 @@
 
 #define             DMA_COUNT_LOW  0xffff     /* Lower 16-bits of byte count of DMA transfer for DMA master channel */
 
-/* Bit masks for HMDMAx_CONTROL */
-
-#define                   HMDMAEN  0x1        /* Handshake MDMA Enable */
-#define                       REP  0x2        /* Handshake MDMA Request Polarity */
-#define                       UTE  0x8        /* Urgency Threshold Enable */
-#define                       OIE  0x10       /* Overflow Interrupt Enable */
-#define                      BDIE  0x20       /* Block Done Interrupt Enable */
-#define                      MBDI  0x40       /* Mask Block Done Interrupt */
-#define                       DRQ  0x300      /* Handshake MDMA Request Type */
-#define                       RBC  0x1000     /* Force Reload of BCOUNT */
-#define                        PS  0x2000     /* Pin Status */
-#define                        OI  0x4000     /* Overflow Interrupt Generated */
-#define                       BDI  0x8000     /* Block Done Interrupt Generated */
-
-/* ******************************************* */
-/*     MULTI BIT MACRO ENUMERATIONS            */
-/* ******************************************* */
-
-
 #endif /* _DEF_BF547_H */

+ 24 - 4
arch/blackfin/mach-bf548/include/mach/dma.h

@@ -27,17 +27,37 @@
 #define CH_PIXC_OVERLAY		16
 #define CH_PIXC_OUTPUT		17
 #define CH_SPORT2_RX		18
-#define CH_UART2_RX		18
 #define CH_SPORT2_TX		19
-#define CH_UART2_TX		19
 #define CH_SPORT3_RX		20
-#define CH_UART3_RX		20
 #define CH_SPORT3_TX		21
-#define CH_UART3_TX		21
 #define CH_SDH			22
 #define CH_NFC			22
 #define CH_SPI2			23
 
+#if defined(CONFIG_UART2_DMA_RX_ON_DMA13)
+#define CH_UART2_RX		13
+#define IRQ_UART2_RX		BFIN_IRQ(37)	/* UART2 RX USE EPP1 (DMA13) Interrupt */
+#define CH_UART2_TX		14
+#define IRQ_UART2_TX		BFIN_IRQ(38)	/* UART2 RX USE EPP1 (DMA14) Interrupt */
+#else						/* Default USE SPORT2's DMA Channel */
+#define CH_UART2_RX		18
+#define IRQ_UART2_RX		BFIN_IRQ(33)	/* UART2 RX (DMA18) Interrupt */
+#define CH_UART2_TX		19
+#define IRQ_UART2_TX		BFIN_IRQ(34)	/* UART2 TX (DMA19) Interrupt */
+#endif
+
+#if defined(CONFIG_UART3_DMA_RX_ON_DMA15)
+#define CH_UART3_RX		15
+#define IRQ_UART3_RX		BFIN_IRQ(64)	/* UART3 RX USE PIXC IN0 (DMA15) Interrupt */
+#define CH_UART3_TX		16
+#define IRQ_UART3_TX		BFIN_IRQ(65)	/* UART3 TX USE PIXC IN1 (DMA16) Interrupt */
+#else						/* Default USE SPORT3's DMA Channel */
+#define CH_UART3_RX		20
+#define IRQ_UART3_RX		BFIN_IRQ(35)	/* UART3 RX (DMA20) Interrupt */
+#define CH_UART3_TX		21
+#define IRQ_UART3_TX		BFIN_IRQ(36)	/* UART3 TX (DMA21) Interrupt */
+#endif
+
 #define CH_MEM_STREAM0_DEST	24
 #define CH_MEM_STREAM0_SRC	25
 #define CH_MEM_STREAM1_DEST	26

+ 0 - 4
arch/blackfin/mach-bf548/include/mach/irq.h

@@ -74,13 +74,9 @@ Events         (highest priority)  EMU         0
 #define IRQ_UART2_ERROR		BFIN_IRQ(31)	/* UART2 Status (Error) Interrupt */
 #define IRQ_CAN0_ERROR		BFIN_IRQ(32)	/* CAN0 Status (Error) Interrupt */
 #define IRQ_SPORT2_RX		BFIN_IRQ(33)	/* SPORT2 RX (DMA18) Interrupt */
-#define IRQ_UART2_RX		BFIN_IRQ(33)	/* UART2 RX (DMA18) Interrupt */
 #define IRQ_SPORT2_TX		BFIN_IRQ(34)	/* SPORT2 TX (DMA19) Interrupt */
-#define IRQ_UART2_TX		BFIN_IRQ(34)	/* UART2 TX (DMA19) Interrupt */
 #define IRQ_SPORT3_RX		BFIN_IRQ(35)	/* SPORT3 RX (DMA20) Interrupt */
-#define IRQ_UART3_RX		BFIN_IRQ(35)	/* UART3 RX (DMA20) Interrupt */
 #define IRQ_SPORT3_TX		BFIN_IRQ(36)	/* SPORT3 TX (DMA21) Interrupt */
-#define IRQ_UART3_TX		BFIN_IRQ(36)	/* UART3 TX (DMA21) Interrupt */
 #define IRQ_EPPI1		BFIN_IRQ(37)	/* EPP1 (DMA13) Interrupt */
 #define IRQ_EPPI2		BFIN_IRQ(38)	/* EPP2 (DMA14) Interrupt */
 #define IRQ_SPI1		BFIN_IRQ(39)	/* SPI1 (DMA5) Interrupt */

+ 1 - 1
arch/blackfin/mach-bf561/boards/cm_bf561.c

@@ -541,7 +541,7 @@ static int __init cm_bf561_init(void)
 #endif
 
 #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
-	irq_desc[PATA_INT].status |= IRQ_NOAUTOEN;
+	irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN);
 #endif
 	return 0;
 }

+ 9 - 12
arch/blackfin/mach-bf561/hotplug.c

@@ -5,30 +5,27 @@
  * Licensed under the GPL-2 or later.
  */
 
+#include <linux/smp.h>
 #include <asm/blackfin.h>
-#include <asm/irq.h>
-#include <asm/smp.h>
-
-#define SIC_SYSIRQ(irq)	(irq - (IRQ_CORETMR + 1))
+#include <mach/pll.h>
 
 int hotplug_coreb;
 
 void platform_cpu_die(void)
 {
-	unsigned long iwr[2] = {0, 0};
-	unsigned long bank = SIC_SYSIRQ(IRQ_SUPPLE_0) / 32;
-	unsigned long bit = 1 << (SIC_SYSIRQ(IRQ_SUPPLE_0) % 32);
-
+	unsigned long iwr;
 	hotplug_coreb = 1;
 
-	iwr[bank] = bit;
-
 	/* disable core timer */
 	bfin_write_TCNTL(0);
 
-	/* clear ipi interrupt IRQ_SUPPLE_0 */
+	/* clear ipi interrupt IRQ_SUPPLE_0 of CoreB */
 	bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (10 + 1)));
 	SSYNC();
 
-	coreb_sleep(iwr[0], iwr[1], 0);
+	/* set CoreB wakeup by ipi0, iwr will be discarded */
+	bfin_iwr_set_sup0(&iwr, &iwr, &iwr);
+	SSYNC();
+
+	coreb_die();
 }

+ 15 - 20
arch/blackfin/mach-bf561/secondary.S

@@ -13,7 +13,11 @@
 #include <asm/asm-offsets.h>
 #include <asm/trace.h>
 
-__INIT
+/*
+ * This code must come first as CoreB is hardcoded (in hardware)
+ * to start at the beginning of its L1 instruction memory.
+ */
+.section .l1.text.head
 
 /* Lay the initial stack into the L1 scratch area of Core B */
 #define INITIAL_STACK	(COREB_L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12)
@@ -160,43 +164,34 @@ ENTRY(_coreb_trampoline_start)
 .LWAIT_HERE:
 	jump .LWAIT_HERE;
 ENDPROC(_coreb_trampoline_start)
-ENTRY(_coreb_trampoline_end)
 
+#ifdef CONFIG_HOTPLUG_CPU
 .section ".text"
-ENTRY(_set_sicb_iwr)
-	P0.H = hi(SICB_IWR0);
-	P0.L = lo(SICB_IWR0);
-	P1.H = hi(SICB_IWR1);
-	P1.L = lo(SICB_IWR1);
-	[P0] = R0;
-	[P1] = R1;
-	SSYNC;
-	RTS;
-ENDPROC(_set_sicb_iwr)
-
-ENTRY(_coreb_sleep)
+ENTRY(_coreb_die)
 	sp.l = lo(INITIAL_STACK);
 	sp.h = hi(INITIAL_STACK);
 	fp = sp;
 	usp = sp;
 
-	call _set_sicb_iwr;
-
 	CLI R2;
 	SSYNC;
 	IDLE;
 	STI R2;
 
 	R0 = IWR_DISABLE_ALL;
-	R1 = IWR_DISABLE_ALL;
-	call _set_sicb_iwr;
+	P0.H = hi(SYSMMR_BASE);
+	P0.L = lo(SYSMMR_BASE);
+	[P0 + (SICB_IWR0 - SYSMMR_BASE)] = R0;
+	[P0 + (SICB_IWR1 - SYSMMR_BASE)] = R0;
+	SSYNC;
 
 	p0.h = hi(COREB_L1_CODE_START);
 	p0.l = lo(COREB_L1_CODE_START);
 	jump (p0);
-ENDPROC(_coreb_sleep)
+ENDPROC(_coreb_die)
+#endif
 
-__CPUINIT
+__INIT
 ENTRY(_coreb_start)
 	[--sp] = reti;
 

+ 6 - 9
arch/blackfin/mach-bf561/smp.c

@@ -30,18 +30,11 @@ void __init platform_init_cpus(void)
 
 void __init platform_prepare_cpus(unsigned int max_cpus)
 {
-	int len;
-
-	len = &coreb_trampoline_end - &coreb_trampoline_start + 1;
-	BUG_ON(len > L1_CODE_LENGTH);
-
-	dma_memcpy((void *)COREB_L1_CODE_START, &coreb_trampoline_start, len);
+	bfin_relocate_coreb_l1_mem();
 
 	/* Both cores ought to be present on a bf561! */
 	cpu_set(0, cpu_present_map); /* CoreA */
 	cpu_set(1, cpu_present_map); /* CoreB */
-
-	printk(KERN_INFO "CoreB bootstrap code to SRAM %p via DMA.\n", (void *)COREB_L1_CODE_START);
 }
 
 int __init setup_profiling_timer(unsigned int multiplier) /* not supported */
@@ -161,9 +154,13 @@ void platform_clear_ipi(unsigned int cpu, int irq)
 void __cpuinit bfin_local_timer_setup(void)
 {
 #if defined(CONFIG_TICKSOURCE_CORETMR)
+	struct irq_chip *chip = get_irq_chip(IRQ_CORETMR);
+	struct irq_desc *desc = irq_to_desc(IRQ_CORETMR);
+
 	bfin_coretmr_init();
 	bfin_coretmr_clockevent_init();
-	get_irq_chip(IRQ_CORETMR)->unmask(IRQ_CORETMR);
+
+	chip->irq_unmask(&desc->irq_data);
 #else
 	/* Power down the core timer, just to play safe. */
 	bfin_write_TCNTL(0);

+ 1 - 1
arch/blackfin/mach-common/arch_checks.c

@@ -61,6 +61,6 @@
 # error "Anomaly 05000220 does not allow you to use Write Back cache with L2 or External Memory"
 #endif
 
-#if ANOMALY_05000491 && !defined(CONFIG_CACHE_FLUSH_L1)
+#if ANOMALY_05000491 && !defined(CONFIG_ICACHE_FLUSH_L1)
 # error You need IFLUSH in L1 inst while Anomaly 05000491 applies
 #endif

+ 32 - 6
arch/blackfin/mach-common/cache.S

@@ -11,12 +11,6 @@
 #include <asm/cache.h>
 #include <asm/page.h>
 
-#ifdef CONFIG_CACHE_FLUSH_L1
-.section .l1.text
-#else
-.text
-#endif
-
 /* 05000443 - IFLUSH cannot be last instruction in hardware loop */
 #if ANOMALY_05000443
 # define BROK_FLUSH_INST "IFLUSH"
@@ -68,11 +62,43 @@
 	RTS;
 .endm
 
+#ifdef CONFIG_ICACHE_FLUSH_L1
+.section .l1.text
+#else
+.text
+#endif
+
 /* Invalidate all instruction cache lines assocoiated with this memory area */
+#ifdef CONFIG_SMP
+# define _blackfin_icache_flush_range _blackfin_icache_flush_range_l1
+#endif
 ENTRY(_blackfin_icache_flush_range)
 	do_flush IFLUSH
 ENDPROC(_blackfin_icache_flush_range)
 
+#ifdef CONFIG_SMP
+.text
+# undef _blackfin_icache_flush_range
+ENTRY(_blackfin_icache_flush_range)
+	p0.L = LO(DSPID);
+	p0.H = HI(DSPID);
+	r3 = [p0];
+	r3 = r3.b (z);
+	p2 = r3;
+	p0.L = _blackfin_iflush_l1_entry;
+	p0.H = _blackfin_iflush_l1_entry;
+	p0 = p0 + (p2 << 2);
+	p1 = [p0];
+	jump (p1);
+ENDPROC(_blackfin_icache_flush_range)
+#endif
+
+#ifdef CONFIG_DCACHE_FLUSH_L1
+.section .l1.text
+#else
+.text
+#endif
+
 /* Throw away all D-cached data in specified region without any obligation to
  * write them back.  Since the Blackfin ISA does not have an "invalidate"
  * instruction, we use flush/invalidate.  Perhaps as a speed optimization we

+ 3 - 5
arch/blackfin/mach-common/cpufreq.c

@@ -1,7 +1,7 @@
 /*
  * Blackfin core clock scaling
  *
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2011 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -16,10 +16,8 @@
 #include <asm/time.h>
 #include <asm/dpmc.h>
 
-#define CPUFREQ_CPU 0
-
 /* this is the table of CCLK frequencies, in Hz */
-/* .index is the entry in the auxillary dpm_state_table[] */
+/* .index is the entry in the auxiliary dpm_state_table[] */
 static struct cpufreq_frequency_table bfin_freq_table[] = {
 	{
 		.frequency = CPUFREQ_TABLE_END,
@@ -46,7 +44,7 @@ static struct bfin_dpm_state {
 
 #if defined(CONFIG_CYCLES_CLOCKSOURCE)
 /*
- * normalized to maximum frequncy offset for CYCLES,
+ * normalized to maximum frequency offset for CYCLES,
  * used in time-ts cycles clock source, but could be used
  * somewhere also.
  */

+ 48 - 2
arch/blackfin/mach-common/dpmc.c

@@ -61,17 +61,63 @@ err_out:
 }
 
 #ifdef CONFIG_CPU_FREQ
+# ifdef CONFIG_SMP
+static void bfin_idle_this_cpu(void *info)
+{
+	unsigned long flags = 0;
+	unsigned long iwr0, iwr1, iwr2;
+	unsigned int cpu = smp_processor_id();
+
+	local_irq_save_hw(flags);
+	bfin_iwr_set_sup0(&iwr0, &iwr1, &iwr2);
+
+	platform_clear_ipi(cpu, IRQ_SUPPLE_0);
+	SSYNC();
+	asm("IDLE;");
+	bfin_iwr_restore(iwr0, iwr1, iwr2);
+
+	local_irq_restore_hw(flags);
+}
+
+static void bfin_idle_cpu(void)
+{
+	smp_call_function(bfin_idle_this_cpu, NULL, 0);
+}
+
+static void bfin_wakeup_cpu(void)
+{
+	unsigned int cpu;
+	unsigned int this_cpu = smp_processor_id();
+	cpumask_t mask = cpu_online_map;
+
+	cpu_clear(this_cpu, mask);
+	for_each_cpu_mask(cpu, mask)
+		platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
+}
+
+# else
+static void bfin_idle_cpu(void) {}
+static void bfin_wakeup_cpu(void) {}
+# endif
+
 static int
 vreg_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
 {
 	struct cpufreq_freqs *freq = data;
 
+	if (freq->cpu != CPUFREQ_CPU)
+		return 0;
+
 	if (val == CPUFREQ_PRECHANGE && freq->old < freq->new) {
+		bfin_idle_cpu();
 		bfin_set_vlev(bfin_get_vlev(freq->new));
 		udelay(pdata->vr_settling_time); /* Wait until Volatge settled */
-
-	} else if (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)
+		bfin_wakeup_cpu();
+	} else if (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) {
+		bfin_idle_cpu();
 		bfin_set_vlev(bfin_get_vlev(freq->new));
+		bfin_wakeup_cpu();
+	}
 
 	return 0;
 }

+ 14 - 0
arch/blackfin/mach-common/entry.S

@@ -952,8 +952,17 @@ ENDPROC(_evt_up_evt14)
 #ifdef CONFIG_IPIPE
 
 _resume_kernel_from_int:
+	r1 = LO(~0x8000) (Z);
+	r1 = r0 & r1;
+	r0 = 1;
+	r0 = r1 - r0;
+	r2 = r1 & r0;
+	cc = r2 == 0;
+	/* Sync the root stage only from the outer interrupt level. */
+	if !cc jump .Lnosync;
 	r0.l = ___ipipe_sync_root;
 	r0.h = ___ipipe_sync_root;
+	[--sp] = reti;
 	[--sp] = rets;
 	[--sp] = ( r7:4, p5:3 );
 	SP += -12;
@@ -961,6 +970,8 @@ _resume_kernel_from_int:
 	SP += 12;
 	( r7:4, p5:3 ) = [sp++];
 	rets = [sp++];
+	reti = [sp++];
+.Lnosync:
 	rts
 #elif defined(CONFIG_PREEMPT)
 
@@ -1738,6 +1749,9 @@ ENTRY(_sys_call_table)
 	.long _sys_fanotify_mark
 	.long _sys_prlimit64
 	.long _sys_cacheflush
+	.long _sys_name_to_handle_at	/* 375 */
+	.long _sys_open_by_handle_at
+	.long _sys_clock_adjtime
 
 	.rept NR_syscalls-(.-_sys_call_table)/4
 	.long _sys_ni_syscall

+ 41 - 69
arch/blackfin/mach-common/head.S

@@ -31,6 +31,7 @@ ENDPROC(__init_clear_bss)
 ENTRY(__start)
 	/* R0: argument of command line string, passed from uboot, save it */
 	R7 = R0;
+
 	/* Enable Cycle Counter and Nesting Of Interrupts */
 #ifdef CONFIG_BFIN_SCRATCH_REG_CYCLES
 	R0 = SYSCFG_SNEN;
@@ -38,76 +39,49 @@ ENTRY(__start)
 	R0 = SYSCFG_SNEN | SYSCFG_CCEN;
 #endif
 	SYSCFG = R0;
-	R0 = 0;
-
-	/* Clear Out All the data and pointer Registers */
-	R1 = R0;
-	R2 = R0;
-	R3 = R0;
-	R4 = R0;
-	R5 = R0;
-	R6 = R0;
-
-	P0 = R0;
-	P1 = R0;
-	P2 = R0;
-	P3 = R0;
-	P4 = R0;
-	P5 = R0;
-
-	LC0 = r0;
-	LC1 = r0;
-	L0 = r0;
-	L1 = r0;
-	L2 = r0;
-	L3 = r0;
-
-	/* Clear Out All the DAG Registers */
-	B0 = r0;
-	B1 = r0;
-	B2 = r0;
-	B3 = r0;
-
-	I0 = r0;
-	I1 = r0;
-	I2 = r0;
-	I3 = r0;
-
-	M0 = r0;
-	M1 = r0;
-	M2 = r0;
-	M3 = r0;
+
+	/* Optimization register tricks: keep a base value in the
+	 * reserved P registers so we use the load/store with an
+	 * offset syntax.  R0 = [P5 + <constant>];
+	 *   P5 - core MMR base
+	 *   R6 - 0
+	 */
+	r6 = 0;
+	p5.l = 0;
+	p5.h = hi(COREMMR_BASE);
+
+	/* Zero out registers required by Blackfin ABI */
+
+	/* Disable circular buffers */
+	L0 = r6;
+	L1 = r6;
+	L2 = r6;
+	L3 = r6;
+
+	/* Disable hardware loops in case we were started by 'go' */
+	LC0 = r6;
+	LC1 = r6;
 
 	/*
 	 * Clear ITEST_COMMAND and DTEST_COMMAND registers,
 	 * Leaving these as non-zero can confuse the emulator
 	 */
-	p0.L = LO(DTEST_COMMAND);
-	p0.H = HI(DTEST_COMMAND);
-	[p0] = R0;
-	[p0 + (ITEST_COMMAND - DTEST_COMMAND)] = R0;
+	[p5 + (DTEST_COMMAND - COREMMR_BASE)] = r6;
+	[p5 + (ITEST_COMMAND - COREMMR_BASE)] = r6;
 	CSYNC;
 
 	trace_buffer_init(p0,r0);
-	P0 = R1;
-	R0 = R1;
 
 	/* Turn off the icache */
-	p0.l = LO(IMEM_CONTROL);
-	p0.h = HI(IMEM_CONTROL);
-	R1 = [p0];
-	R0 = ~ENICPLB;
-	R0 = R0 & R1;
-	[p0] = R0;
+	r1 = [p5 + (IMEM_CONTROL - COREMMR_BASE)];
+	BITCLR (r1, ENICPLB_P);
+	[p5 + (IMEM_CONTROL - COREMMR_BASE)] = r1;
 	SSYNC;
 
 	/* Turn off the dcache */
-	p0.l = LO(DMEM_CONTROL);
-	p0.h = HI(DMEM_CONTROL);
-	R1 = [p0];
-	R0 = ~ENDCPLB;
-	R0 = R0 & R1;
-	[p0] = R0;
+	r1 = [p5 + (DMEM_CONTROL - COREMMR_BASE)];
+	BITCLR (r1, ENDCPLB_P);
+	[p5 + (DMEM_CONTROL - COREMMR_BASE)] = r1;
 	SSYNC;
 
 	/* in case of double faults, save a few things */
@@ -122,25 +96,25 @@ ENTRY(__start)
 	 * below
 	 */
 	GET_PDA(p0, r0);
-	r6 = [p0 + PDA_DF_RETX];
+	r5 = [p0 + PDA_DF_RETX];
 	p1.l = _init_saved_retx;
 	p1.h = _init_saved_retx;
-	[p1] = r6;
+	[p1] = r5;
 
-	r6 = [p0 + PDA_DF_DCPLB];
+	r5 = [p0 + PDA_DF_DCPLB];
 	p1.l = _init_saved_dcplb_fault_addr;
 	p1.h = _init_saved_dcplb_fault_addr;
-	[p1] = r6;
+	[p1] = r5;
 
-	r6 = [p0 + PDA_DF_ICPLB];
+	r5 = [p0 + PDA_DF_ICPLB];
 	p1.l = _init_saved_icplb_fault_addr;
 	p1.h = _init_saved_icplb_fault_addr;
-	[p1] = r6;
+	[p1] = r5;
 
-	r6 = [p0 + PDA_DF_SEQSTAT];
+	r5 = [p0 + PDA_DF_SEQSTAT];
 	p1.l = _init_saved_seqstat;
 	p1.h = _init_saved_seqstat;
-	[p1] = r6;
+	[p1] = r5;
 #endif
 
 	/* Initialize stack pointer */
@@ -155,7 +129,7 @@ ENTRY(__start)
 	sti r0;
 #endif
 
-	r0 = 0 (x);
+	r0 = r6;
 	/* Zero out all of the fun bss regions */
 #if L1_DATA_A_LENGTH > 0
 	r1.l = __sbss_l1;
@@ -210,11 +184,9 @@ ENTRY(__start)
 
 	/* EVT15 = _real_start */
 
-	p0.l = lo(EVT15);
-	p0.h = hi(EVT15);
 	p1.l = _real_start;
 	p1.h = _real_start;
-	[p0] = p1;
+	[p5 + (EVT15 - COREMMR_BASE)] = p1;
 	csync;
 
 #ifdef CONFIG_EARLY_PRINTK

+ 3 - 3
arch/blackfin/mach-common/interrupt.S

@@ -274,16 +274,16 @@ ENDPROC(_evt_system_call)
  * level to EVT14 to prepare the caller for a normal interrupt
  * return through RTI.
  *
- * We currently use this facility in two occasions:
+ * We currently use this feature in two occasions:
  *
- * - to branch to __ipipe_irq_tail_hook as requested by a high
+ * - before branching to __ipipe_irq_tail_hook as requested by a high
  *   priority domain after the pipeline delivered an interrupt,
  *   e.g. such as Xenomai, in order to start its rescheduling
  *   procedure, since we may not switch tasks when IRQ levels are
  *   nested on the Blackfin, so we have to fake an interrupt return
  *   so that we may reschedule immediately.
  *
- * - to branch to sync_root_irqs, in order to play any interrupt
+ * - before branching to __ipipe_sync_root(), in order to play any interrupt
  *   pending for the root domain (i.e. the Linux kernel). This lowers
  *   the core priority level enough so that Linux IRQ handlers may
  *   never delay interrupts handled by high priority domains; we defer

+ 144 - 92
arch/blackfin/mach-common/ints-priority.c

@@ -15,6 +15,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/seq_file.h>
 #include <linux/irq.h>
+#include <linux/sched.h>
 #ifdef CONFIG_IPIPE
 #include <linux/ipipe.h>
 #endif
@@ -124,21 +125,21 @@ static void __init search_IAR(void)
  * This is for core internal IRQs
  */
 
-static void bfin_ack_noop(unsigned int irq)
+static void bfin_ack_noop(struct irq_data *d)
 {
 	/* Dummy function.  */
 }
 
-static void bfin_core_mask_irq(unsigned int irq)
+static void bfin_core_mask_irq(struct irq_data *d)
 {
-	bfin_irq_flags &= ~(1 << irq);
+	bfin_irq_flags &= ~(1 << d->irq);
 	if (!hard_irqs_disabled())
 		hard_local_irq_enable();
 }
 
-static void bfin_core_unmask_irq(unsigned int irq)
+static void bfin_core_unmask_irq(struct irq_data *d)
 {
-	bfin_irq_flags |= 1 << irq;
+	bfin_irq_flags |= 1 << d->irq;
 	/*
 	 * If interrupts are enabled, IMASK must contain the same value
 	 * as bfin_irq_flags.  Make sure that invariant holds.  If interrupts
@@ -176,6 +177,11 @@ static void bfin_internal_mask_irq(unsigned int irq)
 	hard_local_irq_restore(flags);
 }
 
+static void bfin_internal_mask_irq_chip(struct irq_data *d)
+{
+	bfin_internal_mask_irq(d->irq);
+}
+
 #ifdef CONFIG_SMP
 static void bfin_internal_unmask_irq_affinity(unsigned int irq,
 		const struct cpumask *affinity)
@@ -211,19 +217,24 @@ static void bfin_internal_unmask_irq(unsigned int irq)
 }
 
 #ifdef CONFIG_SMP
-static void bfin_internal_unmask_irq(unsigned int irq)
+static void bfin_internal_unmask_irq_chip(struct irq_data *d)
 {
-	struct irq_desc *desc = irq_to_desc(irq);
-	bfin_internal_unmask_irq_affinity(irq, desc->affinity);
+	bfin_internal_unmask_irq_affinity(d->irq, d->affinity);
 }
 
-static int bfin_internal_set_affinity(unsigned int irq, const struct cpumask *mask)
+static int bfin_internal_set_affinity(struct irq_data *d,
+				      const struct cpumask *mask, bool force)
 {
-	bfin_internal_mask_irq(irq);
-	bfin_internal_unmask_irq_affinity(irq, mask);
+	bfin_internal_mask_irq(d->irq);
+	bfin_internal_unmask_irq_affinity(d->irq, mask);
 
 	return 0;
 }
+#else
+static void bfin_internal_unmask_irq_chip(struct irq_data *d)
+{
+	bfin_internal_unmask_irq(d->irq);
+}
 #endif
 
 #ifdef CONFIG_PM
@@ -279,28 +290,33 @@ int bfin_internal_set_wake(unsigned int irq, unsigned int state)
 
 	return 0;
 }
+
+static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
+{
+	return bfin_internal_set_wake(d->irq, state);
+}
 #endif
 
 static struct irq_chip bfin_core_irqchip = {
 	.name = "CORE",
-	.ack = bfin_ack_noop,
-	.mask = bfin_core_mask_irq,
-	.unmask = bfin_core_unmask_irq,
+	.irq_ack = bfin_ack_noop,
+	.irq_mask = bfin_core_mask_irq,
+	.irq_unmask = bfin_core_unmask_irq,
 };
 
 static struct irq_chip bfin_internal_irqchip = {
 	.name = "INTN",
-	.ack = bfin_ack_noop,
-	.mask = bfin_internal_mask_irq,
-	.unmask = bfin_internal_unmask_irq,
-	.mask_ack = bfin_internal_mask_irq,
-	.disable = bfin_internal_mask_irq,
-	.enable = bfin_internal_unmask_irq,
+	.irq_ack = bfin_ack_noop,
+	.irq_mask = bfin_internal_mask_irq_chip,
+	.irq_unmask = bfin_internal_unmask_irq_chip,
+	.irq_mask_ack = bfin_internal_mask_irq_chip,
+	.irq_disable = bfin_internal_mask_irq_chip,
+	.irq_enable = bfin_internal_unmask_irq_chip,
 #ifdef CONFIG_SMP
-	.set_affinity = bfin_internal_set_affinity,
+	.irq_set_affinity = bfin_internal_set_affinity,
 #endif
 #ifdef CONFIG_PM
-	.set_wake = bfin_internal_set_wake,
+	.irq_set_wake = bfin_internal_set_wake_chip,
 #endif
 };
 
@@ -312,33 +328,32 @@ static void bfin_handle_irq(unsigned irq)
 	__ipipe_handle_irq(irq, &regs);
 	ipipe_trace_irq_exit(irq);
 #else /* !CONFIG_IPIPE */
-	struct irq_desc *desc = irq_desc + irq;
-	desc->handle_irq(irq, desc);
+	generic_handle_irq(irq);
 #endif  /* !CONFIG_IPIPE */
 }
 
 #ifdef BF537_GENERIC_ERROR_INT_DEMUX
 static int error_int_mask;
 
-static void bfin_generic_error_mask_irq(unsigned int irq)
+static void bfin_generic_error_mask_irq(struct irq_data *d)
 {
-	error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR));
+	error_int_mask &= ~(1L << (d->irq - IRQ_PPI_ERROR));
 	if (!error_int_mask)
 		bfin_internal_mask_irq(IRQ_GENERIC_ERROR);
 }
 
-static void bfin_generic_error_unmask_irq(unsigned int irq)
+static void bfin_generic_error_unmask_irq(struct irq_data *d)
 {
 	bfin_internal_unmask_irq(IRQ_GENERIC_ERROR);
-	error_int_mask |= 1L << (irq - IRQ_PPI_ERROR);
+	error_int_mask |= 1L << (d->irq - IRQ_PPI_ERROR);
 }
 
 static struct irq_chip bfin_generic_error_irqchip = {
 	.name = "ERROR",
-	.ack = bfin_ack_noop,
-	.mask_ack = bfin_generic_error_mask_irq,
-	.mask = bfin_generic_error_mask_irq,
-	.unmask = bfin_generic_error_unmask_irq,
+	.irq_ack = bfin_ack_noop,
+	.irq_mask_ack = bfin_generic_error_mask_irq,
+	.irq_mask = bfin_generic_error_mask_irq,
+	.irq_unmask = bfin_generic_error_unmask_irq,
 };
 
 static void bfin_demux_error_irq(unsigned int int_err_irq,
@@ -448,8 +463,10 @@ static void bfin_mac_status_ack_irq(unsigned int irq)
 	}
 }
 
-static void bfin_mac_status_mask_irq(unsigned int irq)
+static void bfin_mac_status_mask_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
 	mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
 #ifdef BF537_GENERIC_ERROR_INT_DEMUX
 	switch (irq) {
@@ -466,8 +483,10 @@ static void bfin_mac_status_mask_irq(unsigned int irq)
 	bfin_mac_status_ack_irq(irq);
 }
 
-static void bfin_mac_status_unmask_irq(unsigned int irq)
+static void bfin_mac_status_unmask_irq(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
+
 #ifdef BF537_GENERIC_ERROR_INT_DEMUX
 	switch (irq) {
 	case IRQ_MAC_PHYINT:
@@ -484,7 +503,7 @@ static void bfin_mac_status_unmask_irq(unsigned int irq)
 }
 
 #ifdef CONFIG_PM
-int bfin_mac_status_set_wake(unsigned int irq, unsigned int state)
+int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state)
 {
 #ifdef BF537_GENERIC_ERROR_INT_DEMUX
 	return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
@@ -496,12 +515,12 @@ int bfin_mac_status_set_wake(unsigned int irq, unsigned int state)
 
 static struct irq_chip bfin_mac_status_irqchip = {
 	.name = "MACST",
-	.ack = bfin_ack_noop,
-	.mask_ack = bfin_mac_status_mask_irq,
-	.mask = bfin_mac_status_mask_irq,
-	.unmask = bfin_mac_status_unmask_irq,
+	.irq_ack = bfin_ack_noop,
+	.irq_mask_ack = bfin_mac_status_mask_irq,
+	.irq_mask = bfin_mac_status_mask_irq,
+	.irq_unmask = bfin_mac_status_unmask_irq,
 #ifdef CONFIG_PM
-	.set_wake = bfin_mac_status_set_wake,
+	.irq_set_wake = bfin_mac_status_set_wake,
 #endif
 };
 
@@ -538,13 +557,9 @@ static void bfin_demux_mac_status_irq(unsigned int int_err_irq,
 static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
 {
 #ifdef CONFIG_IPIPE
-	_set_irq_handler(irq, handle_level_irq);
-#else
-	struct irq_desc *desc = irq_desc + irq;
-	/* May not call generic set_irq_handler() due to spinlock
-	   recursion. */
-	desc->handle_irq = handle;
+	handle = handle_level_irq;
 #endif
+	__set_irq_handler_unlocked(irq, handle);
 }
 
 static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
@@ -552,17 +567,18 @@ extern void bfin_gpio_irq_prepare(unsigned gpio);
 
 #if !defined(CONFIG_BF54x)
 
-static void bfin_gpio_ack_irq(unsigned int irq)
+static void bfin_gpio_ack_irq(struct irq_data *d)
 {
 	/* AFAIK ack_irq in case mask_ack is provided
 	 * get's only called for edge sense irqs
 	 */
-	set_gpio_data(irq_to_gpio(irq), 0);
+	set_gpio_data(irq_to_gpio(d->irq), 0);
 }
 
-static void bfin_gpio_mask_ack_irq(unsigned int irq)
+static void bfin_gpio_mask_ack_irq(struct irq_data *d)
 {
-	struct irq_desc *desc = irq_desc + irq;
+	unsigned int irq = d->irq;
+	struct irq_desc *desc = irq_to_desc(irq);
 	u32 gpionr = irq_to_gpio(irq);
 
 	if (desc->handle_irq == handle_edge_irq)
@@ -571,39 +587,40 @@ static void bfin_gpio_mask_ack_irq(unsigned int irq)
 	set_gpio_maska(gpionr, 0);
 }
 
-static void bfin_gpio_mask_irq(unsigned int irq)
+static void bfin_gpio_mask_irq(struct irq_data *d)
 {
-	set_gpio_maska(irq_to_gpio(irq), 0);
+	set_gpio_maska(irq_to_gpio(d->irq), 0);
 }
 
-static void bfin_gpio_unmask_irq(unsigned int irq)
+static void bfin_gpio_unmask_irq(struct irq_data *d)
 {
-	set_gpio_maska(irq_to_gpio(irq), 1);
+	set_gpio_maska(irq_to_gpio(d->irq), 1);
 }
 
-static unsigned int bfin_gpio_irq_startup(unsigned int irq)
+static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
 {
-	u32 gpionr = irq_to_gpio(irq);
+	u32 gpionr = irq_to_gpio(d->irq);
 
 	if (__test_and_set_bit(gpionr, gpio_enabled))
 		bfin_gpio_irq_prepare(gpionr);
 
-	bfin_gpio_unmask_irq(irq);
+	bfin_gpio_unmask_irq(d);
 
 	return 0;
 }
 
-static void bfin_gpio_irq_shutdown(unsigned int irq)
+static void bfin_gpio_irq_shutdown(struct irq_data *d)
 {
-	u32 gpionr = irq_to_gpio(irq);
+	u32 gpionr = irq_to_gpio(d->irq);
 
-	bfin_gpio_mask_irq(irq);
+	bfin_gpio_mask_irq(d);
 	__clear_bit(gpionr, gpio_enabled);
 	bfin_gpio_irq_free(gpionr);
 }
 
-static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
+static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
 {
+	unsigned int irq = d->irq;
 	int ret;
 	char buf[16];
 	u32 gpionr = irq_to_gpio(irq);
@@ -664,9 +681,9 @@ static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
 }
 
 #ifdef CONFIG_PM
-int bfin_gpio_set_wake(unsigned int irq, unsigned int state)
+int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
 {
-	return gpio_pm_wakeup_ctrl(irq_to_gpio(irq), state);
+	return gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state);
 }
 #endif
 
@@ -818,10 +835,10 @@ void init_pint_lut(void)
 	}
 }
 
-static void bfin_gpio_ack_irq(unsigned int irq)
+static void bfin_gpio_ack_irq(struct irq_data *d)
 {
-	struct irq_desc *desc = irq_desc + irq;
-	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
+	struct irq_desc *desc = irq_to_desc(d->irq);
+	u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
 	u32 pintbit = PINT_BIT(pint_val);
 	u32 bank = PINT_2_BANK(pint_val);
 
@@ -835,10 +852,10 @@ static void bfin_gpio_ack_irq(unsigned int irq)
 
 }
 
-static void bfin_gpio_mask_ack_irq(unsigned int irq)
+static void bfin_gpio_mask_ack_irq(struct irq_data *d)
 {
-	struct irq_desc *desc = irq_desc + irq;
-	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
+	struct irq_desc *desc = irq_to_desc(d->irq);
+	u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
 	u32 pintbit = PINT_BIT(pint_val);
 	u32 bank = PINT_2_BANK(pint_val);
 
@@ -853,24 +870,25 @@ static void bfin_gpio_mask_ack_irq(unsigned int irq)
 	pint[bank]->mask_clear = pintbit;
 }
 
-static void bfin_gpio_mask_irq(unsigned int irq)
+static void bfin_gpio_mask_irq(struct irq_data *d)
 {
-	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
+	u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
 
 	pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val);
 }
 
-static void bfin_gpio_unmask_irq(unsigned int irq)
+static void bfin_gpio_unmask_irq(struct irq_data *d)
 {
-	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
+	u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
 	u32 pintbit = PINT_BIT(pint_val);
 	u32 bank = PINT_2_BANK(pint_val);
 
 	pint[bank]->mask_set = pintbit;
 }
 
-static unsigned int bfin_gpio_irq_startup(unsigned int irq)
+static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
 {
+	unsigned int irq = d->irq;
 	u32 gpionr = irq_to_gpio(irq);
 	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
 
@@ -884,22 +902,23 @@ static unsigned int bfin_gpio_irq_startup(unsigned int irq)
 	if (__test_and_set_bit(gpionr, gpio_enabled))
 		bfin_gpio_irq_prepare(gpionr);
 
-	bfin_gpio_unmask_irq(irq);
+	bfin_gpio_unmask_irq(d);
 
 	return 0;
 }
 
-static void bfin_gpio_irq_shutdown(unsigned int irq)
+static void bfin_gpio_irq_shutdown(struct irq_data *d)
 {
-	u32 gpionr = irq_to_gpio(irq);
+	u32 gpionr = irq_to_gpio(d->irq);
 
-	bfin_gpio_mask_irq(irq);
+	bfin_gpio_mask_irq(d);
 	__clear_bit(gpionr, gpio_enabled);
 	bfin_gpio_irq_free(gpionr);
 }
 
-static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
+static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
 {
+	unsigned int irq = d->irq;
 	int ret;
 	char buf[16];
 	u32 gpionr = irq_to_gpio(irq);
@@ -961,10 +980,10 @@ static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
 u32 pint_saved_masks[NR_PINT_SYS_IRQS];
 u32 pint_wakeup_masks[NR_PINT_SYS_IRQS];
 
-int bfin_gpio_set_wake(unsigned int irq, unsigned int state)
+int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
 {
 	u32 pint_irq;
-	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
+	u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
 	u32 bank = PINT_2_BANK(pint_val);
 	u32 pintbit = PINT_BIT(pint_val);
 
@@ -1066,17 +1085,17 @@ static void bfin_demux_gpio_irq(unsigned int inta_irq,
 
 static struct irq_chip bfin_gpio_irqchip = {
 	.name = "GPIO",
-	.ack = bfin_gpio_ack_irq,
-	.mask = bfin_gpio_mask_irq,
-	.mask_ack = bfin_gpio_mask_ack_irq,
-	.unmask = bfin_gpio_unmask_irq,
-	.disable = bfin_gpio_mask_irq,
-	.enable = bfin_gpio_unmask_irq,
-	.set_type = bfin_gpio_irq_type,
-	.startup = bfin_gpio_irq_startup,
-	.shutdown = bfin_gpio_irq_shutdown,
+	.irq_ack = bfin_gpio_ack_irq,
+	.irq_mask = bfin_gpio_mask_irq,
+	.irq_mask_ack = bfin_gpio_mask_ack_irq,
+	.irq_unmask = bfin_gpio_unmask_irq,
+	.irq_disable = bfin_gpio_mask_irq,
+	.irq_enable = bfin_gpio_unmask_irq,
+	.irq_set_type = bfin_gpio_irq_type,
+	.irq_startup = bfin_gpio_irq_startup,
+	.irq_shutdown = bfin_gpio_irq_shutdown,
 #ifdef CONFIG_PM
-	.set_wake = bfin_gpio_set_wake,
+	.irq_set_wake = bfin_gpio_set_wake,
 #endif
 };
 
@@ -1373,7 +1392,7 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
 	struct ipipe_domain *this_domain = __ipipe_current_domain;
 	struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
 	struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
-	int irq, s;
+	int irq, s = 0;
 
 	if (likely(vec == EVT_IVTMR_P))
 		irq = IRQ_CORETMR;
@@ -1423,6 +1442,21 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
 			__raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
 	}
 
+	/*
+	 * We don't want Linux interrupt handlers to run at the
+	 * current core priority level (i.e. < EVT15), since this
+	 * might delay other interrupts handled by a high priority
+	 * domain. Here is what we do instead:
+	 *
+	 * - we raise the SYNCDEFER bit to prevent
+	 * __ipipe_handle_irq() to sync the pipeline for the root
+	 * stage for the incoming interrupt. Upon return, that IRQ is
+	 * pending in the interrupt log.
+	 *
+	 * - we raise the TIF_IRQ_SYNC bit for the current thread, so
+	 * that _schedule_and_signal_from_int will eventually sync the
+	 * pipeline from EVT15.
+	 */
 	if (this_domain == ipipe_root_domain) {
 		s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
 		barrier();
@@ -1432,6 +1466,24 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
 	__ipipe_handle_irq(irq, regs);
 	ipipe_trace_irq_exit(irq);
 
+	if (user_mode(regs) &&
+	    !ipipe_test_foreign_stack() &&
+	    (current->ipipe_flags & PF_EVTRET) != 0) {
+		/*
+		 * Testing for user_regs() does NOT fully eliminate
+		 * foreign stack contexts, because of the forged
+		 * interrupt returns we do through
+		 * __ipipe_call_irqtail. In that case, we might have
+		 * preempted a foreign stack context in a high
+		 * priority domain, with a single interrupt level now
+		 * pending after the irqtail unwinding is done. In
+		 * which case user_mode() is now true, and the event
+		 * gets dispatched spuriously.
+		 */
+		current->ipipe_flags &= ~PF_EVTRET;
+		__ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
+	}
+
 	if (this_domain == ipipe_root_domain) {
 		set_thread_flag(TIF_IRQ_SYNC);
 		if (!s) {

+ 17 - 11
arch/blackfin/mach-common/smp.c

@@ -40,6 +40,10 @@
  */
 struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
 
+#ifdef CONFIG_ICACHE_FLUSH_L1
+unsigned long blackfin_iflush_l1_entry[NR_CPUS];
+#endif
+
 void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
 	*init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
 	*init_saved_dcplb_fault_addr_coreb;
@@ -105,10 +109,10 @@ static void ipi_flush_icache(void *info)
 	struct blackfin_flush_data *fdata = info;
 
 	/* Invalidate the memory holding the bounds of the flushed region. */
-	blackfin_dcache_invalidate_range((unsigned long)fdata,
-					 (unsigned long)fdata + sizeof(*fdata));
+	invalidate_dcache_range((unsigned long)fdata,
+		(unsigned long)fdata + sizeof(*fdata));
 
-	blackfin_icache_flush_range(fdata->start, fdata->end);
+	flush_icache_range(fdata->start, fdata->end);
 }
 
 static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
@@ -244,12 +248,13 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
 {
 	cpumask_t callmap;
 
+	preempt_disable();
 	callmap = cpu_online_map;
 	cpu_clear(smp_processor_id(), callmap);
-	if (cpus_empty(callmap))
-		return 0;
+	if (!cpus_empty(callmap))
+		smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
 
-	smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
+	preempt_enable();
 
 	return 0;
 }
@@ -286,12 +291,13 @@ void smp_send_stop(void)
 {
 	cpumask_t callmap;
 
+	preempt_disable();
 	callmap = cpu_online_map;
 	cpu_clear(smp_processor_id(), callmap);
-	if (cpus_empty(callmap))
-		return;
+	if (!cpus_empty(callmap))
+		smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
 
-	smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
+	preempt_enable();
 
 	return;
 }
@@ -361,8 +367,6 @@ void __cpuinit secondary_start_kernel(void)
 	 */
 	init_exception_vectors();
 
-	bfin_setup_caches(cpu);
-
 	local_irq_disable();
 
 	/* Attach the new idle task to the global mm. */
@@ -381,6 +385,8 @@ void __cpuinit secondary_start_kernel(void)
 
 	local_irq_enable();
 
+	bfin_setup_caches(cpu);
+
 	/*
 	 * Calibrate loops per jiffy value.
 	 * IRQs need to be enabled here - D-cache can be invalidated