smp.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751
  1. /*
  2. * linux/arch/arm/kernel/smp.c
  3. *
  4. * Copyright (C) 2002 ARM Limited, All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/delay.h>
  12. #include <linux/init.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/sched.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/cache.h>
  17. #include <linux/profile.h>
  18. #include <linux/errno.h>
  19. #include <linux/mm.h>
  20. #include <linux/err.h>
  21. #include <linux/cpu.h>
  22. #include <linux/smp.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/irq.h>
  25. #include <linux/percpu.h>
  26. #include <linux/clockchips.h>
  27. #include <asm/atomic.h>
  28. #include <asm/cacheflush.h>
  29. #include <asm/cpu.h>
  30. #include <asm/cputype.h>
  31. #include <asm/mmu_context.h>
  32. #include <asm/pgtable.h>
  33. #include <asm/pgalloc.h>
  34. #include <asm/processor.h>
  35. #include <asm/sections.h>
  36. #include <asm/tlbflush.h>
  37. #include <asm/ptrace.h>
  38. #include <asm/localtimer.h>
  39. #include <asm/smp_plat.h>
  40. /*
  41. * as from 2.5, kernels no longer have an init_tasks structure
  42. * so we need some other way of telling a new secondary core
  43. * where to place its SVC stack
  44. */
  45. struct secondary_data secondary_data;
  46. /*
  47. * structures for inter-processor calls
  48. * - A collection of single bit ipi messages.
  49. */
  50. struct ipi_data {
  51. spinlock_t lock;
  52. unsigned long ipi_count;
  53. unsigned long bits;
  54. };
  55. static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
  56. .lock = SPIN_LOCK_UNLOCKED,
  57. };
  58. enum ipi_msg_type {
  59. IPI_TIMER,
  60. IPI_RESCHEDULE,
  61. IPI_CALL_FUNC,
  62. IPI_CALL_FUNC_SINGLE,
  63. IPI_CPU_STOP,
  64. };
  65. static inline void identity_mapping_add(pgd_t *pgd, unsigned long start,
  66. unsigned long end)
  67. {
  68. unsigned long addr, prot;
  69. pmd_t *pmd;
  70. prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
  71. if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
  72. prot |= PMD_BIT4;
  73. for (addr = start & PGDIR_MASK; addr < end;) {
  74. pmd = pmd_offset(pgd + pgd_index(addr), addr);
  75. pmd[0] = __pmd(addr | prot);
  76. addr += SECTION_SIZE;
  77. pmd[1] = __pmd(addr | prot);
  78. addr += SECTION_SIZE;
  79. flush_pmd_entry(pmd);
  80. outer_clean_range(__pa(pmd), __pa(pmd + 1));
  81. }
  82. }
  83. static inline void identity_mapping_del(pgd_t *pgd, unsigned long start,
  84. unsigned long end)
  85. {
  86. unsigned long addr;
  87. pmd_t *pmd;
  88. for (addr = start & PGDIR_MASK; addr < end; addr += PGDIR_SIZE) {
  89. pmd = pmd_offset(pgd + pgd_index(addr), addr);
  90. pmd[0] = __pmd(0);
  91. pmd[1] = __pmd(0);
  92. clean_pmd_entry(pmd);
  93. outer_clean_range(__pa(pmd), __pa(pmd + 1));
  94. }
  95. }
  96. int __cpuinit __cpu_up(unsigned int cpu)
  97. {
  98. struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
  99. struct task_struct *idle = ci->idle;
  100. pgd_t *pgd;
  101. int ret;
  102. /*
  103. * Spawn a new process manually, if not already done.
  104. * Grab a pointer to its task struct so we can mess with it
  105. */
  106. if (!idle) {
  107. idle = fork_idle(cpu);
  108. if (IS_ERR(idle)) {
  109. printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
  110. return PTR_ERR(idle);
  111. }
  112. ci->idle = idle;
  113. } else {
  114. /*
  115. * Since this idle thread is being re-used, call
  116. * init_idle() to reinitialize the thread structure.
  117. */
  118. init_idle(idle, cpu);
  119. }
  120. /*
  121. * Allocate initial page tables to allow the new CPU to
  122. * enable the MMU safely. This essentially means a set
  123. * of our "standard" page tables, with the addition of
  124. * a 1:1 mapping for the physical address of the kernel.
  125. */
  126. pgd = pgd_alloc(&init_mm);
  127. if (!pgd)
  128. return -ENOMEM;
  129. if (PHYS_OFFSET != PAGE_OFFSET) {
  130. #ifndef CONFIG_HOTPLUG_CPU
  131. identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
  132. #endif
  133. identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
  134. identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
  135. }
  136. /*
  137. * We need to tell the secondary core where to find
  138. * its stack and the page tables.
  139. */
  140. secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
  141. secondary_data.pgdir = virt_to_phys(pgd);
  142. __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
  143. outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
  144. /*
  145. * Now bring the CPU into our world.
  146. */
  147. ret = boot_secondary(cpu, idle);
  148. if (ret == 0) {
  149. unsigned long timeout;
  150. /*
  151. * CPU was successfully started, wait for it
  152. * to come online or time out.
  153. */
  154. timeout = jiffies + HZ;
  155. while (time_before(jiffies, timeout)) {
  156. if (cpu_online(cpu))
  157. break;
  158. udelay(10);
  159. barrier();
  160. }
  161. if (!cpu_online(cpu))
  162. ret = -EIO;
  163. }
  164. secondary_data.stack = NULL;
  165. secondary_data.pgdir = 0;
  166. if (PHYS_OFFSET != PAGE_OFFSET) {
  167. #ifndef CONFIG_HOTPLUG_CPU
  168. identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
  169. #endif
  170. identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
  171. identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
  172. }
  173. pgd_free(&init_mm, pgd);
  174. if (ret) {
  175. printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);
  176. /*
  177. * FIXME: We need to clean up the new idle thread. --rmk
  178. */
  179. }
  180. return ret;
  181. }
  182. #ifdef CONFIG_HOTPLUG_CPU
  183. /*
  184. * __cpu_disable runs on the processor to be shutdown.
  185. */
  186. int __cpu_disable(void)
  187. {
  188. unsigned int cpu = smp_processor_id();
  189. struct task_struct *p;
  190. int ret;
  191. ret = platform_cpu_disable(cpu);
  192. if (ret)
  193. return ret;
  194. /*
  195. * Take this CPU offline. Once we clear this, we can't return,
  196. * and we must not schedule until we're ready to give up the cpu.
  197. */
  198. set_cpu_online(cpu, false);
  199. /*
  200. * OK - migrate IRQs away from this CPU
  201. */
  202. migrate_irqs();
  203. /*
  204. * Stop the local timer for this CPU.
  205. */
  206. local_timer_stop();
  207. /*
  208. * Flush user cache and TLB mappings, and then remove this CPU
  209. * from the vm mask set of all processes.
  210. */
  211. flush_cache_all();
  212. local_flush_tlb_all();
  213. read_lock(&tasklist_lock);
  214. for_each_process(p) {
  215. if (p->mm)
  216. cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
  217. }
  218. read_unlock(&tasklist_lock);
  219. return 0;
  220. }
  221. /*
  222. * called on the thread which is asking for a CPU to be shutdown -
  223. * waits until shutdown has completed, or it is timed out.
  224. */
  225. void __cpu_die(unsigned int cpu)
  226. {
  227. if (!platform_cpu_kill(cpu))
  228. printk("CPU%u: unable to kill\n", cpu);
  229. }
  230. /*
  231. * Called from the idle thread for the CPU which has been shutdown.
  232. *
  233. * Note that we disable IRQs here, but do not re-enable them
  234. * before returning to the caller. This is also the behaviour
  235. * of the other hotplug-cpu capable cores, so presumably coming
  236. * out of idle fixes this.
  237. */
  238. void __ref cpu_die(void)
  239. {
  240. unsigned int cpu = smp_processor_id();
  241. local_irq_disable();
  242. idle_task_exit();
  243. /*
  244. * actual CPU shutdown procedure is at least platform (if not
  245. * CPU) specific
  246. */
  247. platform_cpu_die(cpu);
  248. /*
  249. * Do not return to the idle loop - jump back to the secondary
  250. * cpu initialisation. There's some initialisation which needs
  251. * to be repeated to undo the effects of taking the CPU offline.
  252. */
  253. __asm__("mov sp, %0\n"
  254. " b secondary_start_kernel"
  255. :
  256. : "r" (task_stack_page(current) + THREAD_SIZE - 8));
  257. }
  258. #endif /* CONFIG_HOTPLUG_CPU */
  259. /*
  260. * This is the secondary CPU boot entry. We're using this CPUs
  261. * idle thread stack, but a set of temporary page tables.
  262. */
  263. asmlinkage void __cpuinit secondary_start_kernel(void)
  264. {
  265. struct mm_struct *mm = &init_mm;
  266. unsigned int cpu = smp_processor_id();
  267. printk("CPU%u: Booted secondary processor\n", cpu);
  268. /*
  269. * All kernel threads share the same mm context; grab a
  270. * reference and switch to it.
  271. */
  272. atomic_inc(&mm->mm_count);
  273. current->active_mm = mm;
  274. cpumask_set_cpu(cpu, mm_cpumask(mm));
  275. cpu_switch_mm(mm->pgd, mm);
  276. enter_lazy_tlb(mm, current);
  277. local_flush_tlb_all();
  278. cpu_init();
  279. preempt_disable();
  280. /*
  281. * Give the platform a chance to do its own initialisation.
  282. */
  283. platform_secondary_init(cpu);
  284. /*
  285. * Enable local interrupts.
  286. */
  287. notify_cpu_starting(cpu);
  288. local_irq_enable();
  289. local_fiq_enable();
  290. /*
  291. * Setup the percpu timer for this CPU.
  292. */
  293. percpu_timer_setup();
  294. calibrate_delay();
  295. smp_store_cpu_info(cpu);
  296. /*
  297. * OK, now it's safe to let the boot CPU continue
  298. */
  299. set_cpu_online(cpu, true);
  300. /*
  301. * OK, it's off to the idle thread for us
  302. */
  303. cpu_idle();
  304. }
  305. /*
  306. * Called by both boot and secondaries to move global data into
  307. * per-processor storage.
  308. */
  309. void __cpuinit smp_store_cpu_info(unsigned int cpuid)
  310. {
  311. struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
  312. cpu_info->loops_per_jiffy = loops_per_jiffy;
  313. }
  314. void __init smp_cpus_done(unsigned int max_cpus)
  315. {
  316. int cpu;
  317. unsigned long bogosum = 0;
  318. for_each_online_cpu(cpu)
  319. bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
  320. printk(KERN_INFO "SMP: Total of %d processors activated "
  321. "(%lu.%02lu BogoMIPS).\n",
  322. num_online_cpus(),
  323. bogosum / (500000/HZ),
  324. (bogosum / (5000/HZ)) % 100);
  325. }
  326. void __init smp_prepare_boot_cpu(void)
  327. {
  328. unsigned int cpu = smp_processor_id();
  329. per_cpu(cpu_data, cpu).idle = current;
  330. }
  331. static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
  332. {
  333. unsigned long flags;
  334. unsigned int cpu;
  335. local_irq_save(flags);
  336. for_each_cpu(cpu, mask) {
  337. struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
  338. spin_lock(&ipi->lock);
  339. ipi->bits |= 1 << msg;
  340. spin_unlock(&ipi->lock);
  341. }
  342. /*
  343. * Call the platform specific cross-CPU call function.
  344. */
  345. smp_cross_call(mask);
  346. local_irq_restore(flags);
  347. }
  348. void arch_send_call_function_ipi_mask(const struct cpumask *mask)
  349. {
  350. send_ipi_message(mask, IPI_CALL_FUNC);
  351. }
  352. void arch_send_call_function_single_ipi(int cpu)
  353. {
  354. send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
  355. }
  356. void show_ipi_list(struct seq_file *p)
  357. {
  358. unsigned int cpu;
  359. seq_puts(p, "IPI:");
  360. for_each_present_cpu(cpu)
  361. seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
  362. seq_putc(p, '\n');
  363. }
  364. void show_local_irqs(struct seq_file *p)
  365. {
  366. unsigned int cpu;
  367. seq_printf(p, "LOC: ");
  368. for_each_present_cpu(cpu)
  369. seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs);
  370. seq_putc(p, '\n');
  371. }
  372. /*
  373. * Timer (local or broadcast) support
  374. */
  375. static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
  376. static void ipi_timer(void)
  377. {
  378. struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
  379. irq_enter();
  380. evt->event_handler(evt);
  381. irq_exit();
  382. }
  383. #ifdef CONFIG_LOCAL_TIMERS
  384. asmlinkage void __exception do_local_timer(struct pt_regs *regs)
  385. {
  386. struct pt_regs *old_regs = set_irq_regs(regs);
  387. int cpu = smp_processor_id();
  388. if (local_timer_ack()) {
  389. irq_stat[cpu].local_timer_irqs++;
  390. ipi_timer();
  391. }
  392. set_irq_regs(old_regs);
  393. }
  394. #endif
  395. #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
  396. static void smp_timer_broadcast(const struct cpumask *mask)
  397. {
  398. send_ipi_message(mask, IPI_TIMER);
  399. }
  400. #else
  401. #define smp_timer_broadcast NULL
  402. #endif
  403. #ifndef CONFIG_LOCAL_TIMERS
  404. static void broadcast_timer_set_mode(enum clock_event_mode mode,
  405. struct clock_event_device *evt)
  406. {
  407. }
  408. static void local_timer_setup(struct clock_event_device *evt)
  409. {
  410. evt->name = "dummy_timer";
  411. evt->features = CLOCK_EVT_FEAT_ONESHOT |
  412. CLOCK_EVT_FEAT_PERIODIC |
  413. CLOCK_EVT_FEAT_DUMMY;
  414. evt->rating = 400;
  415. evt->mult = 1;
  416. evt->set_mode = broadcast_timer_set_mode;
  417. clockevents_register_device(evt);
  418. }
  419. #endif
  420. void __cpuinit percpu_timer_setup(void)
  421. {
  422. unsigned int cpu = smp_processor_id();
  423. struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
  424. evt->cpumask = cpumask_of(cpu);
  425. evt->broadcast = smp_timer_broadcast;
  426. local_timer_setup(evt);
  427. }
  428. static DEFINE_SPINLOCK(stop_lock);
  429. /*
  430. * ipi_cpu_stop - handle IPI from smp_send_stop()
  431. */
  432. static void ipi_cpu_stop(unsigned int cpu)
  433. {
  434. if (system_state == SYSTEM_BOOTING ||
  435. system_state == SYSTEM_RUNNING) {
  436. spin_lock(&stop_lock);
  437. printk(KERN_CRIT "CPU%u: stopping\n", cpu);
  438. dump_stack();
  439. spin_unlock(&stop_lock);
  440. }
  441. set_cpu_online(cpu, false);
  442. local_fiq_disable();
  443. local_irq_disable();
  444. while (1)
  445. cpu_relax();
  446. }
  447. /*
  448. * Main handler for inter-processor interrupts
  449. *
  450. * For ARM, the ipimask now only identifies a single
  451. * category of IPI (Bit 1 IPIs have been replaced by a
  452. * different mechanism):
  453. *
  454. * Bit 0 - Inter-processor function call
  455. */
  456. asmlinkage void __exception do_IPI(struct pt_regs *regs)
  457. {
  458. unsigned int cpu = smp_processor_id();
  459. struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
  460. struct pt_regs *old_regs = set_irq_regs(regs);
  461. ipi->ipi_count++;
  462. for (;;) {
  463. unsigned long msgs;
  464. spin_lock(&ipi->lock);
  465. msgs = ipi->bits;
  466. ipi->bits = 0;
  467. spin_unlock(&ipi->lock);
  468. if (!msgs)
  469. break;
  470. do {
  471. unsigned nextmsg;
  472. nextmsg = msgs & -msgs;
  473. msgs &= ~nextmsg;
  474. nextmsg = ffz(~nextmsg);
  475. switch (nextmsg) {
  476. case IPI_TIMER:
  477. ipi_timer();
  478. break;
  479. case IPI_RESCHEDULE:
  480. /*
  481. * nothing more to do - eveything is
  482. * done on the interrupt return path
  483. */
  484. break;
  485. case IPI_CALL_FUNC:
  486. generic_smp_call_function_interrupt();
  487. break;
  488. case IPI_CALL_FUNC_SINGLE:
  489. generic_smp_call_function_single_interrupt();
  490. break;
  491. case IPI_CPU_STOP:
  492. ipi_cpu_stop(cpu);
  493. break;
  494. default:
  495. printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
  496. cpu, nextmsg);
  497. break;
  498. }
  499. } while (msgs);
  500. }
  501. set_irq_regs(old_regs);
  502. }
  503. void smp_send_reschedule(int cpu)
  504. {
  505. send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
  506. }
  507. void smp_send_stop(void)
  508. {
  509. cpumask_t mask = cpu_online_map;
  510. cpu_clear(smp_processor_id(), mask);
  511. if (!cpus_empty(mask))
  512. send_ipi_message(&mask, IPI_CPU_STOP);
  513. }
  514. /*
  515. * not supported here
  516. */
  517. int setup_profiling_timer(unsigned int multiplier)
  518. {
  519. return -EINVAL;
  520. }
  521. static void
  522. on_each_cpu_mask(void (*func)(void *), void *info, int wait,
  523. const struct cpumask *mask)
  524. {
  525. preempt_disable();
  526. smp_call_function_many(mask, func, info, wait);
  527. if (cpumask_test_cpu(smp_processor_id(), mask))
  528. func(info);
  529. preempt_enable();
  530. }
  531. /**********************************************************************/
  532. /*
  533. * TLB operations
  534. */
  535. struct tlb_args {
  536. struct vm_area_struct *ta_vma;
  537. unsigned long ta_start;
  538. unsigned long ta_end;
  539. };
  540. static inline void ipi_flush_tlb_all(void *ignored)
  541. {
  542. local_flush_tlb_all();
  543. }
  544. static inline void ipi_flush_tlb_mm(void *arg)
  545. {
  546. struct mm_struct *mm = (struct mm_struct *)arg;
  547. local_flush_tlb_mm(mm);
  548. }
  549. static inline void ipi_flush_tlb_page(void *arg)
  550. {
  551. struct tlb_args *ta = (struct tlb_args *)arg;
  552. local_flush_tlb_page(ta->ta_vma, ta->ta_start);
  553. }
  554. static inline void ipi_flush_tlb_kernel_page(void *arg)
  555. {
  556. struct tlb_args *ta = (struct tlb_args *)arg;
  557. local_flush_tlb_kernel_page(ta->ta_start);
  558. }
  559. static inline void ipi_flush_tlb_range(void *arg)
  560. {
  561. struct tlb_args *ta = (struct tlb_args *)arg;
  562. local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
  563. }
  564. static inline void ipi_flush_tlb_kernel_range(void *arg)
  565. {
  566. struct tlb_args *ta = (struct tlb_args *)arg;
  567. local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
  568. }
  569. void flush_tlb_all(void)
  570. {
  571. if (tlb_ops_need_broadcast())
  572. on_each_cpu(ipi_flush_tlb_all, NULL, 1);
  573. else
  574. local_flush_tlb_all();
  575. }
  576. void flush_tlb_mm(struct mm_struct *mm)
  577. {
  578. if (tlb_ops_need_broadcast())
  579. on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
  580. else
  581. local_flush_tlb_mm(mm);
  582. }
  583. void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
  584. {
  585. if (tlb_ops_need_broadcast()) {
  586. struct tlb_args ta;
  587. ta.ta_vma = vma;
  588. ta.ta_start = uaddr;
  589. on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
  590. } else
  591. local_flush_tlb_page(vma, uaddr);
  592. }
  593. void flush_tlb_kernel_page(unsigned long kaddr)
  594. {
  595. if (tlb_ops_need_broadcast()) {
  596. struct tlb_args ta;
  597. ta.ta_start = kaddr;
  598. on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
  599. } else
  600. local_flush_tlb_kernel_page(kaddr);
  601. }
  602. void flush_tlb_range(struct vm_area_struct *vma,
  603. unsigned long start, unsigned long end)
  604. {
  605. if (tlb_ops_need_broadcast()) {
  606. struct tlb_args ta;
  607. ta.ta_vma = vma;
  608. ta.ta_start = start;
  609. ta.ta_end = end;
  610. on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
  611. } else
  612. local_flush_tlb_range(vma, start, end);
  613. }
  614. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  615. {
  616. if (tlb_ops_need_broadcast()) {
  617. struct tlb_args ta;
  618. ta.ta_start = start;
  619. ta.ta_end = end;
  620. on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
  621. } else
  622. local_flush_tlb_kernel_range(start, end);
  623. }