smp.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. /*
  2. * This program is free software; you can redistribute it and/or
  3. * modify it under the terms of the GNU General Public License
  4. * as published by the Free Software Foundation; either version 2
  5. * of the License, or (at your option) any later version.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, write to the Free Software
  14. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  15. *
  16. * Copyright (C) 2000, 2001 Kanoj Sarcar
  17. * Copyright (C) 2000, 2001 Ralf Baechle
  18. * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
  19. * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
  20. */
  21. #include <linux/cache.h>
  22. #include <linux/delay.h>
  23. #include <linux/init.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/threads.h>
  27. #include <linux/module.h>
  28. #include <linux/time.h>
  29. #include <linux/timex.h>
  30. #include <linux/sched.h>
  31. #include <linux/cpumask.h>
  32. #include <linux/cpu.h>
  33. #include <asm/atomic.h>
  34. #include <asm/cpu.h>
  35. #include <asm/processor.h>
  36. #include <asm/system.h>
  37. #include <asm/mmu_context.h>
  38. #include <asm/smp.h>
  39. #ifdef CONFIG_MIPS_MT_SMTC
  40. #include <asm/mipsmtregs.h>
  41. #endif /* CONFIG_MIPS_MT_SMTC */
  42. cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
  43. volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
  44. cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
  45. int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
  46. int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
  47. EXPORT_SYMBOL(phys_cpu_present_map);
  48. EXPORT_SYMBOL(cpu_online_map);
  49. static void smp_tune_scheduling (void)
  50. {
  51. struct cache_desc *cd = &current_cpu_data.scache;
  52. unsigned long cachesize; /* kB */
  53. unsigned long cpu_khz;
  54. /*
  55. * Crude estimate until we actually meassure ...
  56. */
  57. cpu_khz = loops_per_jiffy * 2 * HZ / 1000;
  58. /*
  59. * Rough estimation for SMP scheduling, this is the number of
  60. * cycles it takes for a fully memory-limited process to flush
  61. * the SMP-local cache.
  62. *
  63. * (For a P5 this pretty much means we will choose another idle
  64. * CPU almost always at wakeup time (this is due to the small
  65. * L1 cache), on PIIs it's around 50-100 usecs, depending on
  66. * the cache size)
  67. */
  68. if (!cpu_khz)
  69. return;
  70. cachesize = cd->linesz * cd->sets * cd->ways;
  71. }
  72. extern void __init calibrate_delay(void);
  73. extern ATTRIB_NORET void cpu_idle(void);
  74. /*
  75. * First C code run on the secondary CPUs after being started up by
  76. * the master.
  77. */
  78. asmlinkage void start_secondary(void)
  79. {
  80. unsigned int cpu;
  81. #ifdef CONFIG_MIPS_MT_SMTC
  82. /* Only do cpu_probe for first TC of CPU */
  83. if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
  84. #endif /* CONFIG_MIPS_MT_SMTC */
  85. cpu_probe();
  86. cpu_report();
  87. per_cpu_trap_init();
  88. prom_init_secondary();
  89. /*
  90. * XXX parity protection should be folded in here when it's converted
  91. * to an option instead of something based on .cputype
  92. */
  93. calibrate_delay();
  94. preempt_disable();
  95. cpu = smp_processor_id();
  96. cpu_data[cpu].udelay_val = loops_per_jiffy;
  97. prom_smp_finish();
  98. cpu_set(cpu, cpu_callin_map);
  99. cpu_idle();
  100. }
  101. DEFINE_SPINLOCK(smp_call_lock);
  102. struct call_data_struct *call_data;
  103. /*
  104. * Run a function on all other CPUs.
  105. * <func> The function to run. This must be fast and non-blocking.
  106. * <info> An arbitrary pointer to pass to the function.
  107. * <retry> If true, keep retrying until ready.
  108. * <wait> If true, wait until function has completed on other CPUs.
  109. * [RETURNS] 0 on success, else a negative status code.
  110. *
  111. * Does not return until remote CPUs are nearly ready to execute <func>
  112. * or are or have executed.
  113. *
  114. * You must not call this function with disabled interrupts or from a
  115. * hardware interrupt handler or from a bottom half handler:
  116. *
  117. * CPU A CPU B
  118. * Disable interrupts
  119. * smp_call_function()
  120. * Take call_lock
  121. * Send IPIs
  122. * Wait for all cpus to acknowledge IPI
  123. * CPU A has not responded, spin waiting
  124. * for cpu A to respond, holding call_lock
  125. * smp_call_function()
  126. * Spin waiting for call_lock
  127. * Deadlock Deadlock
  128. */
  129. int smp_call_function (void (*func) (void *info), void *info, int retry,
  130. int wait)
  131. {
  132. struct call_data_struct data;
  133. int i, cpus = num_online_cpus() - 1;
  134. int cpu = smp_processor_id();
  135. /*
  136. * Can die spectacularly if this CPU isn't yet marked online
  137. */
  138. BUG_ON(!cpu_online(cpu));
  139. if (!cpus)
  140. return 0;
  141. /* Can deadlock when called with interrupts disabled */
  142. WARN_ON(irqs_disabled());
  143. data.func = func;
  144. data.info = info;
  145. atomic_set(&data.started, 0);
  146. data.wait = wait;
  147. if (wait)
  148. atomic_set(&data.finished, 0);
  149. spin_lock(&smp_call_lock);
  150. call_data = &data;
  151. mb();
  152. /* Send a message to all other CPUs and wait for them to respond */
  153. for_each_online_cpu(i)
  154. if (i != cpu)
  155. core_send_ipi(i, SMP_CALL_FUNCTION);
  156. /* Wait for response */
  157. /* FIXME: lock-up detection, backtrace on lock-up */
  158. while (atomic_read(&data.started) != cpus)
  159. barrier();
  160. if (wait)
  161. while (atomic_read(&data.finished) != cpus)
  162. barrier();
  163. call_data = NULL;
  164. spin_unlock(&smp_call_lock);
  165. return 0;
  166. }
  167. void smp_call_function_interrupt(void)
  168. {
  169. void (*func) (void *info) = call_data->func;
  170. void *info = call_data->info;
  171. int wait = call_data->wait;
  172. /*
  173. * Notify initiating CPU that I've grabbed the data and am
  174. * about to execute the function.
  175. */
  176. mb();
  177. atomic_inc(&call_data->started);
  178. /*
  179. * At this point the info structure may be out of scope unless wait==1.
  180. */
  181. irq_enter();
  182. (*func)(info);
  183. irq_exit();
  184. if (wait) {
  185. mb();
  186. atomic_inc(&call_data->finished);
  187. }
  188. }
  189. static void stop_this_cpu(void *dummy)
  190. {
  191. /*
  192. * Remove this CPU:
  193. */
  194. cpu_clear(smp_processor_id(), cpu_online_map);
  195. local_irq_enable(); /* May need to service _machine_restart IPI */
  196. for (;;); /* Wait if available. */
  197. }
  198. void smp_send_stop(void)
  199. {
  200. smp_call_function(stop_this_cpu, NULL, 1, 0);
  201. }
  202. void __init smp_cpus_done(unsigned int max_cpus)
  203. {
  204. prom_cpus_done();
  205. }
  206. /* called from main before smp_init() */
  207. void __init smp_prepare_cpus(unsigned int max_cpus)
  208. {
  209. init_new_context(current, &init_mm);
  210. current_thread_info()->cpu = 0;
  211. smp_tune_scheduling();
  212. plat_prepare_cpus(max_cpus);
  213. #ifndef CONFIG_HOTPLUG_CPU
  214. cpu_present_map = cpu_possible_map;
  215. #endif
  216. }
  217. /* preload SMP state for boot cpu */
  218. void __devinit smp_prepare_boot_cpu(void)
  219. {
  220. /*
  221. * This assumes that bootup is always handled by the processor
  222. * with the logic and physical number 0.
  223. */
  224. __cpu_number_map[0] = 0;
  225. __cpu_logical_map[0] = 0;
  226. cpu_set(0, phys_cpu_present_map);
  227. cpu_set(0, cpu_online_map);
  228. cpu_set(0, cpu_callin_map);
  229. }
  230. /*
  231. * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
  232. * and keep control until "cpu_online(cpu)" is set. Note: cpu is
  233. * physical, not logical.
  234. */
  235. int __devinit __cpu_up(unsigned int cpu)
  236. {
  237. struct task_struct *idle;
  238. /*
  239. * Processor goes to start_secondary(), sets online flag
  240. * The following code is purely to make sure
  241. * Linux can schedule processes on this slave.
  242. */
  243. idle = fork_idle(cpu);
  244. if (IS_ERR(idle))
  245. panic(KERN_ERR "Fork failed for CPU %d", cpu);
  246. prom_boot_secondary(cpu, idle);
  247. /*
  248. * Trust is futile. We should really have timeouts ...
  249. */
  250. while (!cpu_isset(cpu, cpu_callin_map))
  251. udelay(100);
  252. cpu_set(cpu, cpu_online_map);
  253. return 0;
  254. }
  255. /* Not really SMP stuff ... */
  256. int setup_profiling_timer(unsigned int multiplier)
  257. {
  258. return 0;
  259. }
  260. static void flush_tlb_all_ipi(void *info)
  261. {
  262. local_flush_tlb_all();
  263. }
  264. void flush_tlb_all(void)
  265. {
  266. on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
  267. }
  268. static void flush_tlb_mm_ipi(void *mm)
  269. {
  270. local_flush_tlb_mm((struct mm_struct *)mm);
  271. }
  272. /*
  273. * The following tlb flush calls are invoked when old translations are
  274. * being torn down, or pte attributes are changing. For single threaded
  275. * address spaces, a new context is obtained on the current cpu, and tlb
  276. * context on other cpus are invalidated to force a new context allocation
  277. * at switch_mm time, should the mm ever be used on other cpus. For
  278. * multithreaded address spaces, intercpu interrupts have to be sent.
  279. * Another case where intercpu interrupts are required is when the target
  280. * mm might be active on another cpu (eg debuggers doing the flushes on
  281. * behalf of debugees, kswapd stealing pages from another process etc).
  282. * Kanoj 07/00.
  283. */
  284. void flush_tlb_mm(struct mm_struct *mm)
  285. {
  286. preempt_disable();
  287. if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
  288. smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
  289. } else {
  290. int i;
  291. for (i = 0; i < num_online_cpus(); i++)
  292. if (smp_processor_id() != i)
  293. cpu_context(i, mm) = 0;
  294. }
  295. local_flush_tlb_mm(mm);
  296. preempt_enable();
  297. }
  298. struct flush_tlb_data {
  299. struct vm_area_struct *vma;
  300. unsigned long addr1;
  301. unsigned long addr2;
  302. };
  303. static void flush_tlb_range_ipi(void *info)
  304. {
  305. struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
  306. local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
  307. }
  308. void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
  309. {
  310. struct mm_struct *mm = vma->vm_mm;
  311. preempt_disable();
  312. if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
  313. struct flush_tlb_data fd;
  314. fd.vma = vma;
  315. fd.addr1 = start;
  316. fd.addr2 = end;
  317. smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
  318. } else {
  319. int i;
  320. for (i = 0; i < num_online_cpus(); i++)
  321. if (smp_processor_id() != i)
  322. cpu_context(i, mm) = 0;
  323. }
  324. local_flush_tlb_range(vma, start, end);
  325. preempt_enable();
  326. }
  327. static void flush_tlb_kernel_range_ipi(void *info)
  328. {
  329. struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
  330. local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
  331. }
  332. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  333. {
  334. struct flush_tlb_data fd;
  335. fd.addr1 = start;
  336. fd.addr2 = end;
  337. on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
  338. }
  339. static void flush_tlb_page_ipi(void *info)
  340. {
  341. struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
  342. local_flush_tlb_page(fd->vma, fd->addr1);
  343. }
  344. void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  345. {
  346. preempt_disable();
  347. if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
  348. struct flush_tlb_data fd;
  349. fd.vma = vma;
  350. fd.addr1 = page;
  351. smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
  352. } else {
  353. int i;
  354. for (i = 0; i < num_online_cpus(); i++)
  355. if (smp_processor_id() != i)
  356. cpu_context(i, vma->vm_mm) = 0;
  357. }
  358. local_flush_tlb_page(vma, page);
  359. preempt_enable();
  360. }
  361. static void flush_tlb_one_ipi(void *info)
  362. {
  363. unsigned long vaddr = (unsigned long) info;
  364. local_flush_tlb_one(vaddr);
  365. }
  366. void flush_tlb_one(unsigned long vaddr)
  367. {
  368. smp_call_function(flush_tlb_one_ipi, (void *) vaddr, 1, 1);
  369. local_flush_tlb_one(vaddr);
  370. }
  371. static DEFINE_PER_CPU(struct cpu, cpu_devices);
  372. static int __init topology_init(void)
  373. {
  374. int cpu;
  375. int ret;
  376. for_each_present_cpu(cpu) {
  377. ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu);
  378. if (ret)
  379. printk(KERN_WARNING "topology_init: register_cpu %d "
  380. "failed (%d)\n", cpu, ret);
  381. }
  382. return 0;
  383. }
  384. subsys_initcall(topology_init);
  385. EXPORT_SYMBOL(flush_tlb_page);
  386. EXPORT_SYMBOL(flush_tlb_one);