smp.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. /*
  2. * Copyright 2007-2009 Analog Devices Inc.
  3. * Philippe Gerum <rpm@xenomai.org>
  4. *
  5. * Licensed under the GPL-2 or later.
  6. */
  7. #include <linux/init.h>
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/delay.h>
  11. #include <asm/smp.h>
  12. #include <asm/dma.h>
  13. #include <asm/time.h>
  14. static DEFINE_SPINLOCK(boot_lock);
  15. /*
  16. * platform_init_cpus() - Tell the world about how many cores we
  17. * have. This is called while setting up the architecture support
  18. * (setup_arch()), so don't be too demanding here with respect to
  19. * available kernel services.
  20. */
  21. void __init platform_init_cpus(void)
  22. {
  23. cpu_set(0, cpu_possible_map); /* CoreA */
  24. cpu_set(1, cpu_possible_map); /* CoreB */
  25. }
  26. void __init platform_prepare_cpus(unsigned int max_cpus)
  27. {
  28. int len;
  29. len = &coreb_trampoline_end - &coreb_trampoline_start + 1;
  30. BUG_ON(len > L1_CODE_LENGTH);
  31. dma_memcpy((void *)COREB_L1_CODE_START, &coreb_trampoline_start, len);
  32. /* Both cores ought to be present on a bf561! */
  33. cpu_set(0, cpu_present_map); /* CoreA */
  34. cpu_set(1, cpu_present_map); /* CoreB */
  35. printk(KERN_INFO "CoreB bootstrap code to SRAM %p via DMA.\n", (void *)COREB_L1_CODE_START);
  36. }
  37. int __init setup_profiling_timer(unsigned int multiplier) /* not supported */
  38. {
  39. return -EINVAL;
  40. }
  41. void __cpuinit platform_secondary_init(unsigned int cpu)
  42. {
  43. /* Clone setup for peripheral interrupt sources from CoreA. */
  44. bfin_write_SICB_IMASK0(bfin_read_SIC_IMASK0());
  45. bfin_write_SICB_IMASK1(bfin_read_SIC_IMASK1());
  46. SSYNC();
  47. /* Clone setup for IARs from CoreA. */
  48. bfin_write_SICB_IAR0(bfin_read_SIC_IAR0());
  49. bfin_write_SICB_IAR1(bfin_read_SIC_IAR1());
  50. bfin_write_SICB_IAR2(bfin_read_SIC_IAR2());
  51. bfin_write_SICB_IAR3(bfin_read_SIC_IAR3());
  52. bfin_write_SICB_IAR4(bfin_read_SIC_IAR4());
  53. bfin_write_SICB_IAR5(bfin_read_SIC_IAR5());
  54. bfin_write_SICB_IAR6(bfin_read_SIC_IAR6());
  55. bfin_write_SICB_IAR7(bfin_read_SIC_IAR7());
  56. bfin_write_SICB_IWR0(IWR_DISABLE_ALL);
  57. bfin_write_SICB_IWR1(IWR_DISABLE_ALL);
  58. SSYNC();
  59. /* Store CPU-private information to the cpu_data array. */
  60. bfin_setup_cpudata(cpu);
  61. /* We are done with local CPU inits, unblock the boot CPU. */
  62. set_cpu_online(cpu, true);
  63. spin_lock(&boot_lock);
  64. spin_unlock(&boot_lock);
  65. }
  66. int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle)
  67. {
  68. unsigned long timeout;
  69. printk(KERN_INFO "Booting Core B.\n");
  70. spin_lock(&boot_lock);
  71. if ((bfin_read_SYSCR() & COREB_SRAM_INIT) == 0) {
  72. /* CoreB already running, sending ipi to wakeup it */
  73. platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
  74. } else {
  75. /* Kick CoreB, which should start execution from CORE_SRAM_BASE. */
  76. bfin_write_SYSCR(bfin_read_SYSCR() & ~COREB_SRAM_INIT);
  77. SSYNC();
  78. }
  79. timeout = jiffies + 1 * HZ;
  80. while (time_before(jiffies, timeout)) {
  81. if (cpu_online(cpu))
  82. break;
  83. udelay(100);
  84. barrier();
  85. }
  86. if (cpu_online(cpu)) {
  87. /* release the lock and let coreb run */
  88. spin_unlock(&boot_lock);
  89. return 0;
  90. } else
  91. panic("CPU%u: processor failed to boot\n", cpu);
  92. }
  93. static const char supple0[] = "IRQ_SUPPLE_0";
  94. static const char supple1[] = "IRQ_SUPPLE_1";
  95. void __init platform_request_ipi(int irq, void *handler)
  96. {
  97. int ret;
  98. const char *name = (irq == IRQ_SUPPLE_0) ? supple0 : supple1;
  99. ret = request_irq(irq, handler, IRQF_DISABLED | IRQF_PERCPU, name, handler);
  100. if (ret)
  101. panic("Cannot request %s for IPI service", name);
  102. }
  103. void platform_send_ipi(cpumask_t callmap, int irq)
  104. {
  105. unsigned int cpu;
  106. int offset = (irq == IRQ_SUPPLE_0) ? 6 : 8;
  107. for_each_cpu_mask(cpu, callmap) {
  108. BUG_ON(cpu >= 2);
  109. SSYNC();
  110. bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu)));
  111. SSYNC();
  112. }
  113. }
  114. void platform_send_ipi_cpu(unsigned int cpu, int irq)
  115. {
  116. int offset = (irq == IRQ_SUPPLE_0) ? 6 : 8;
  117. BUG_ON(cpu >= 2);
  118. SSYNC();
  119. bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu)));
  120. SSYNC();
  121. }
  122. void platform_clear_ipi(unsigned int cpu, int irq)
  123. {
  124. int offset = (irq == IRQ_SUPPLE_0) ? 10 : 12;
  125. BUG_ON(cpu >= 2);
  126. SSYNC();
  127. bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu)));
  128. SSYNC();
  129. }
  130. /*
  131. * Setup core B's local core timer.
  132. * In SMP, core timer is used for clock event device.
  133. */
  134. void __cpuinit bfin_local_timer_setup(void)
  135. {
  136. #if defined(CONFIG_TICKSOURCE_CORETMR)
  137. bfin_coretmr_init();
  138. bfin_coretmr_clockevent_init();
  139. get_irq_chip(IRQ_CORETMR)->unmask(IRQ_CORETMR);
  140. #else
  141. /* Power down the core timer, just to play safe. */
  142. bfin_write_TCNTL(0);
  143. #endif
  144. }