voyager_smp.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804
  1. /* -*- mode: c; c-basic-offset: 8 -*- */
  2. /* Copyright (C) 1999,2001
  3. *
  4. * Author: J.E.J.Bottomley@HansenPartnership.com
  5. *
  6. * This file provides all the same external entries as smp.c but uses
  7. * the voyager hal to provide the functionality
  8. */
  9. #include <linux/cpu.h>
  10. #include <linux/module.h>
  11. #include <linux/mm.h>
  12. #include <linux/kernel_stat.h>
  13. #include <linux/delay.h>
  14. #include <linux/mc146818rtc.h>
  15. #include <linux/cache.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/init.h>
  18. #include <linux/kernel.h>
  19. #include <linux/bootmem.h>
  20. #include <linux/completion.h>
  21. #include <asm/desc.h>
  22. #include <asm/voyager.h>
  23. #include <asm/vic.h>
  24. #include <asm/mtrr.h>
  25. #include <asm/pgalloc.h>
  26. #include <asm/tlbflush.h>
  27. #include <asm/arch_hooks.h>
  28. #include <asm/trampoline.h>
  29. /* TLB state -- visible externally, indexed physically */
  30. DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 };
  31. /* CPU IRQ affinity -- set to all ones initially */
  32. static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned =
  33. {[0 ... NR_CPUS-1] = ~0UL };
  34. /* per CPU data structure (for /proc/cpuinfo et al), visible externally
  35. * indexed physically */
  36. DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
  37. EXPORT_PER_CPU_SYMBOL(cpu_info);
  38. /* physical ID of the CPU used to boot the system */
  39. unsigned char boot_cpu_id;
  40. /* The memory line addresses for the Quad CPIs */
  41. struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS] __cacheline_aligned;
  42. /* The masks for the Extended VIC processors, filled in by cat_init */
  43. __u32 voyager_extended_vic_processors = 0;
  44. /* Masks for the extended Quad processors which cannot be VIC booted */
  45. __u32 voyager_allowed_boot_processors = 0;
  46. /* The mask for the Quad Processors (both extended and non-extended) */
  47. __u32 voyager_quad_processors = 0;
  48. /* Total count of live CPUs, used in process.c to display
  49. * the CPU information and in irq.c for the per CPU irq
  50. * activity count. Finally exported by i386_ksyms.c */
  51. static int voyager_extended_cpus = 1;
  52. /* Used for the invalidate map that's also checked in the spinlock */
  53. static volatile unsigned long smp_invalidate_needed;
  54. /* Bitmask of CPUs present in the system - exported by i386_syms.c, used
  55. * by scheduler but indexed physically */
  56. cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
  57. /* The internal functions */
  58. static void send_CPI(__u32 cpuset, __u8 cpi);
  59. static void ack_CPI(__u8 cpi);
  60. static int ack_QIC_CPI(__u8 cpi);
  61. static void ack_special_QIC_CPI(__u8 cpi);
  62. static void ack_VIC_CPI(__u8 cpi);
  63. static void send_CPI_allbutself(__u8 cpi);
  64. static void mask_vic_irq(unsigned int irq);
  65. static void unmask_vic_irq(unsigned int irq);
  66. static unsigned int startup_vic_irq(unsigned int irq);
  67. static void enable_local_vic_irq(unsigned int irq);
  68. static void disable_local_vic_irq(unsigned int irq);
  69. static void before_handle_vic_irq(unsigned int irq);
  70. static void after_handle_vic_irq(unsigned int irq);
  71. static void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask);
  72. static void ack_vic_irq(unsigned int irq);
  73. static void vic_enable_cpi(void);
  74. static void do_boot_cpu(__u8 cpuid);
  75. static void do_quad_bootstrap(void);
  76. static void initialize_secondary(void);
  77. int hard_smp_processor_id(void);
  78. int safe_smp_processor_id(void);
  79. /* Inline functions */
  80. static inline void send_one_QIC_CPI(__u8 cpu, __u8 cpi)
  81. {
  82. voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi =
  83. (smp_processor_id() << 16) + cpi;
  84. }
  85. static inline void send_QIC_CPI(__u32 cpuset, __u8 cpi)
  86. {
  87. int cpu;
  88. for_each_online_cpu(cpu) {
  89. if (cpuset & (1 << cpu)) {
  90. #ifdef VOYAGER_DEBUG
  91. if (!cpu_online(cpu))
  92. VDEBUG(("CPU%d sending cpi %d to CPU%d not in "
  93. "cpu_online_map\n",
  94. hard_smp_processor_id(), cpi, cpu));
  95. #endif
  96. send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
  97. }
  98. }
  99. }
  100. static inline void wrapper_smp_local_timer_interrupt(void)
  101. {
  102. irq_enter();
  103. smp_local_timer_interrupt();
  104. irq_exit();
  105. }
  106. static inline void send_one_CPI(__u8 cpu, __u8 cpi)
  107. {
  108. if (voyager_quad_processors & (1 << cpu))
  109. send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
  110. else
  111. send_CPI(1 << cpu, cpi);
  112. }
  113. static inline void send_CPI_allbutself(__u8 cpi)
  114. {
  115. __u8 cpu = smp_processor_id();
  116. __u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu);
  117. send_CPI(mask, cpi);
  118. }
  119. static inline int is_cpu_quad(void)
  120. {
  121. __u8 cpumask = inb(VIC_PROC_WHO_AM_I);
  122. return ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER);
  123. }
  124. static inline int is_cpu_extended(void)
  125. {
  126. __u8 cpu = hard_smp_processor_id();
  127. return (voyager_extended_vic_processors & (1 << cpu));
  128. }
  129. static inline int is_cpu_vic_boot(void)
  130. {
  131. __u8 cpu = hard_smp_processor_id();
  132. return (voyager_extended_vic_processors
  133. & voyager_allowed_boot_processors & (1 << cpu));
  134. }
  135. static inline void ack_CPI(__u8 cpi)
  136. {
  137. switch (cpi) {
  138. case VIC_CPU_BOOT_CPI:
  139. if (is_cpu_quad() && !is_cpu_vic_boot())
  140. ack_QIC_CPI(cpi);
  141. else
  142. ack_VIC_CPI(cpi);
  143. break;
  144. case VIC_SYS_INT:
  145. case VIC_CMN_INT:
  146. /* These are slightly strange. Even on the Quad card,
  147. * They are vectored as VIC CPIs */
  148. if (is_cpu_quad())
  149. ack_special_QIC_CPI(cpi);
  150. else
  151. ack_VIC_CPI(cpi);
  152. break;
  153. default:
  154. printk("VOYAGER ERROR: CPI%d is in common CPI code\n", cpi);
  155. break;
  156. }
  157. }
  158. /* local variables */
  159. /* The VIC IRQ descriptors -- these look almost identical to the
  160. * 8259 IRQs except that masks and things must be kept per processor
  161. */
  162. static struct irq_chip vic_chip = {
  163. .name = "VIC",
  164. .startup = startup_vic_irq,
  165. .mask = mask_vic_irq,
  166. .unmask = unmask_vic_irq,
  167. .set_affinity = set_vic_irq_affinity,
  168. };
  169. /* used to count up as CPUs are brought on line (starts at 0) */
  170. static int cpucount = 0;
  171. /* The per cpu profile stuff - used in smp_local_timer_interrupt */
  172. static DEFINE_PER_CPU(int, prof_multiplier) = 1;
  173. static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
  174. static DEFINE_PER_CPU(int, prof_counter) = 1;
  175. /* the map used to check if a CPU has booted */
  176. static __u32 cpu_booted_map;
  177. /* the synchronize flag used to hold all secondary CPUs spinning in
  178. * a tight loop until the boot sequence is ready for them */
  179. static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
  180. /* This is for the new dynamic CPU boot code */
  181. /* The per processor IRQ masks (these are usually kept in sync) */
  182. static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
  183. /* the list of IRQs to be enabled by the VIC_ENABLE_IRQ_CPI */
  184. static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 };
  185. /* Lock for enable/disable of VIC interrupts */
  186. static __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock);
  187. /* The boot processor is correctly set up in PC mode when it
  188. * comes up, but the secondaries need their master/slave 8259
  189. * pairs initializing correctly */
  190. /* Interrupt counters (per cpu) and total - used to try to
  191. * even up the interrupt handling routines */
  192. static long vic_intr_total = 0;
  193. static long vic_intr_count[NR_CPUS] __cacheline_aligned = { 0 };
  194. static unsigned long vic_tick[NR_CPUS] __cacheline_aligned = { 0 };
  195. /* Since we can only use CPI0, we fake all the other CPIs */
  196. static unsigned long vic_cpi_mailbox[NR_CPUS] __cacheline_aligned;
  197. /* debugging routine to read the isr of the cpu's pic */
  198. static inline __u16 vic_read_isr(void)
  199. {
  200. __u16 isr;
  201. outb(0x0b, 0xa0);
  202. isr = inb(0xa0) << 8;
  203. outb(0x0b, 0x20);
  204. isr |= inb(0x20);
  205. return isr;
  206. }
  207. static __init void qic_setup(void)
  208. {
  209. if (!is_cpu_quad()) {
  210. /* not a quad, no setup */
  211. return;
  212. }
  213. outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0);
  214. outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1);
  215. if (is_cpu_extended()) {
  216. /* the QIC duplicate of the VIC base register */
  217. outb(VIC_DEFAULT_CPI_BASE, QIC_VIC_CPI_BASE_REGISTER);
  218. outb(QIC_DEFAULT_CPI_BASE, QIC_CPI_BASE_REGISTER);
  219. /* FIXME: should set up the QIC timer and memory parity
  220. * error vectors here */
  221. }
  222. }
  223. static __init void vic_setup_pic(void)
  224. {
  225. outb(1, VIC_REDIRECT_REGISTER_1);
  226. /* clear the claim registers for dynamic routing */
  227. outb(0, VIC_CLAIM_REGISTER_0);
  228. outb(0, VIC_CLAIM_REGISTER_1);
  229. outb(0, VIC_PRIORITY_REGISTER);
  230. /* Set the Primary and Secondary Microchannel vector
  231. * bases to be the same as the ordinary interrupts
  232. *
  233. * FIXME: This would be more efficient using separate
  234. * vectors. */
  235. outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE);
  236. outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE);
  237. /* Now initiallise the master PIC belonging to this CPU by
  238. * sending the four ICWs */
  239. /* ICW1: level triggered, ICW4 needed */
  240. outb(0x19, 0x20);
  241. /* ICW2: vector base */
  242. outb(FIRST_EXTERNAL_VECTOR, 0x21);
  243. /* ICW3: slave at line 2 */
  244. outb(0x04, 0x21);
  245. /* ICW4: 8086 mode */
  246. outb(0x01, 0x21);
  247. /* now the same for the slave PIC */
  248. /* ICW1: level trigger, ICW4 needed */
  249. outb(0x19, 0xA0);
  250. /* ICW2: slave vector base */
  251. outb(FIRST_EXTERNAL_VECTOR + 8, 0xA1);
  252. /* ICW3: slave ID */
  253. outb(0x02, 0xA1);
  254. /* ICW4: 8086 mode */
  255. outb(0x01, 0xA1);
  256. }
  257. static void do_quad_bootstrap(void)
  258. {
  259. if (is_cpu_quad() && is_cpu_vic_boot()) {
  260. int i;
  261. unsigned long flags;
  262. __u8 cpuid = hard_smp_processor_id();
  263. local_irq_save(flags);
  264. for (i = 0; i < 4; i++) {
  265. /* FIXME: this would be >>3 &0x7 on the 32 way */
  266. if (((cpuid >> 2) & 0x03) == i)
  267. /* don't lower our own mask! */
  268. continue;
  269. /* masquerade as local Quad CPU */
  270. outb(QIC_CPUID_ENABLE | i, QIC_PROCESSOR_ID);
  271. /* enable the startup CPI */
  272. outb(QIC_BOOT_CPI_MASK, QIC_MASK_REGISTER1);
  273. /* restore cpu id */
  274. outb(0, QIC_PROCESSOR_ID);
  275. }
  276. local_irq_restore(flags);
  277. }
  278. }
  279. void prefill_possible_map(void)
  280. {
  281. /* This is empty on voyager because we need a much
  282. * earlier detection which is done in find_smp_config */
  283. }
  284. /* Set up all the basic stuff: read the SMP config and make all the
  285. * SMP information reflect only the boot cpu. All others will be
  286. * brought on-line later. */
  287. void __init find_smp_config(void)
  288. {
  289. int i;
  290. boot_cpu_id = hard_smp_processor_id();
  291. printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id);
  292. /* initialize the CPU structures (moved from smp_boot_cpus) */
  293. for (i = 0; i < nr_cpu_ids; i++)
  294. cpu_irq_affinity[i] = ~0;
  295. cpu_online_map = cpumask_of_cpu(boot_cpu_id);
  296. /* The boot CPU must be extended */
  297. voyager_extended_vic_processors = 1 << boot_cpu_id;
  298. /* initially, all of the first 8 CPUs can boot */
  299. voyager_allowed_boot_processors = 0xff;
  300. /* set up everything for just this CPU, we can alter
  301. * this as we start the other CPUs later */
  302. /* now get the CPU disposition from the extended CMOS */
  303. cpus_addr(phys_cpu_present_map)[0] =
  304. voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK);
  305. cpus_addr(phys_cpu_present_map)[0] |=
  306. voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
  307. cpus_addr(phys_cpu_present_map)[0] |=
  308. voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
  309. 2) << 16;
  310. cpus_addr(phys_cpu_present_map)[0] |=
  311. voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
  312. 3) << 24;
  313. init_cpu_possible(&phys_cpu_present_map);
  314. printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n",
  315. cpus_addr(phys_cpu_present_map)[0]);
  316. /* Here we set up the VIC to enable SMP */
  317. /* enable the CPIs by writing the base vector to their register */
  318. outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER);
  319. outb(1, VIC_REDIRECT_REGISTER_1);
  320. /* set the claim registers for static routing --- Boot CPU gets
  321. * all interrupts untill all other CPUs started */
  322. outb(0xff, VIC_CLAIM_REGISTER_0);
  323. outb(0xff, VIC_CLAIM_REGISTER_1);
  324. /* Set the Primary and Secondary Microchannel vector
  325. * bases to be the same as the ordinary interrupts
  326. *
  327. * FIXME: This would be more efficient using separate
  328. * vectors. */
  329. outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE);
  330. outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE);
  331. /* Finally tell the firmware that we're driving */
  332. outb(inb(VOYAGER_SUS_IN_CONTROL_PORT) | VOYAGER_IN_CONTROL_FLAG,
  333. VOYAGER_SUS_IN_CONTROL_PORT);
  334. current_thread_info()->cpu = boot_cpu_id;
  335. percpu_write(cpu_number, boot_cpu_id);
  336. }
  337. /*
  338. * The bootstrap kernel entry code has set these up. Save them
  339. * for a given CPU, id is physical */
  340. void __init smp_store_cpu_info(int id)
  341. {
  342. struct cpuinfo_x86 *c = &cpu_data(id);
  343. *c = boot_cpu_data;
  344. c->cpu_index = id;
  345. identify_secondary_cpu(c);
  346. }
  347. /* Routine initially called when a non-boot CPU is brought online */
  348. static void __init start_secondary(void *unused)
  349. {
  350. __u8 cpuid = hard_smp_processor_id();
  351. cpu_init();
  352. /* OK, we're in the routine */
  353. ack_CPI(VIC_CPU_BOOT_CPI);
  354. /* setup the 8259 master slave pair belonging to this CPU ---
  355. * we won't actually receive any until the boot CPU
  356. * relinquishes it's static routing mask */
  357. vic_setup_pic();
  358. qic_setup();
  359. if (is_cpu_quad() && !is_cpu_vic_boot()) {
  360. /* clear the boot CPI */
  361. __u8 dummy;
  362. dummy =
  363. voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi;
  364. printk("read dummy %d\n", dummy);
  365. }
  366. /* lower the mask to receive CPIs */
  367. vic_enable_cpi();
  368. VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid, &cpuid));
  369. notify_cpu_starting(cpuid);
  370. /* enable interrupts */
  371. local_irq_enable();
  372. /* get our bogomips */
  373. calibrate_delay();
  374. /* save our processor parameters */
  375. smp_store_cpu_info(cpuid);
  376. /* if we're a quad, we may need to bootstrap other CPUs */
  377. do_quad_bootstrap();
  378. /* FIXME: this is rather a poor hack to prevent the CPU
  379. * activating softirqs while it's supposed to be waiting for
  380. * permission to proceed. Without this, the new per CPU stuff
  381. * in the softirqs will fail */
  382. local_irq_disable();
  383. cpu_set(cpuid, cpu_callin_map);
  384. /* signal that we're done */
  385. cpu_booted_map = 1;
  386. while (!cpu_isset(cpuid, smp_commenced_mask))
  387. rep_nop();
  388. local_irq_enable();
  389. local_flush_tlb();
  390. cpu_set(cpuid, cpu_online_map);
  391. wmb();
  392. cpu_idle();
  393. }
  394. /* Routine to kick start the given CPU and wait for it to report ready
  395. * (or timeout in startup). When this routine returns, the requested
  396. * CPU is either fully running and configured or known to be dead.
  397. *
  398. * We call this routine sequentially 1 CPU at a time, so no need for
  399. * locking */
  400. static void __init do_boot_cpu(__u8 cpu)
  401. {
  402. struct task_struct *idle;
  403. int timeout;
  404. unsigned long flags;
  405. int quad_boot = (1 << cpu) & voyager_quad_processors
  406. & ~(voyager_extended_vic_processors
  407. & voyager_allowed_boot_processors);
  408. /* This is the format of the CPI IDT gate (in real mode) which
  409. * we're hijacking to boot the CPU */
  410. union IDTFormat {
  411. struct seg {
  412. __u16 Offset;
  413. __u16 Segment;
  414. } idt;
  415. __u32 val;
  416. } hijack_source;
  417. __u32 *hijack_vector;
  418. __u32 start_phys_address = setup_trampoline();
  419. /* There's a clever trick to this: The linux trampoline is
  420. * compiled to begin at absolute location zero, so make the
  421. * address zero but have the data segment selector compensate
  422. * for the actual address */
  423. hijack_source.idt.Offset = start_phys_address & 0x000F;
  424. hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF;
  425. cpucount++;
  426. alternatives_smp_switch(1);
  427. idle = fork_idle(cpu);
  428. if (IS_ERR(idle))
  429. panic("failed fork for CPU%d", cpu);
  430. idle->thread.ip = (unsigned long)start_secondary;
  431. /* init_tasks (in sched.c) is indexed logically */
  432. stack_start.sp = (void *)idle->thread.sp;
  433. per_cpu(current_task, cpu) = idle;
  434. early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
  435. irq_ctx_init(cpu);
  436. /* Note: Don't modify initial ss override */
  437. VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu,
  438. (unsigned long)hijack_source.val, hijack_source.idt.Segment,
  439. hijack_source.idt.Offset, stack_start.sp));
  440. /* init lowmem identity mapping */
  441. clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
  442. min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
  443. flush_tlb_all();
  444. if (quad_boot) {
  445. printk("CPU %d: non extended Quad boot\n", cpu);
  446. hijack_vector =
  447. (__u32 *)
  448. phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE) * 4);
  449. *hijack_vector = hijack_source.val;
  450. } else {
  451. printk("CPU%d: extended VIC boot\n", cpu);
  452. hijack_vector =
  453. (__u32 *)
  454. phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE) * 4);
  455. *hijack_vector = hijack_source.val;
  456. /* VIC errata, may also receive interrupt at this address */
  457. hijack_vector =
  458. (__u32 *)
  459. phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI +
  460. VIC_DEFAULT_CPI_BASE) * 4);
  461. *hijack_vector = hijack_source.val;
  462. }
  463. /* All non-boot CPUs start with interrupts fully masked. Need
  464. * to lower the mask of the CPI we're about to send. We do
  465. * this in the VIC by masquerading as the processor we're
  466. * about to boot and lowering its interrupt mask */
  467. local_irq_save(flags);
  468. if (quad_boot) {
  469. send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI);
  470. } else {
  471. outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID);
  472. /* here we're altering registers belonging to `cpu' */
  473. outb(VIC_BOOT_INTERRUPT_MASK, 0x21);
  474. /* now go back to our original identity */
  475. outb(boot_cpu_id, VIC_PROCESSOR_ID);
  476. /* and boot the CPU */
  477. send_CPI((1 << cpu), VIC_CPU_BOOT_CPI);
  478. }
  479. cpu_booted_map = 0;
  480. local_irq_restore(flags);
  481. /* now wait for it to become ready (or timeout) */
  482. for (timeout = 0; timeout < 50000; timeout++) {
  483. if (cpu_booted_map)
  484. break;
  485. udelay(100);
  486. }
  487. /* reset the page table */
  488. zap_low_mappings();
  489. if (cpu_booted_map) {
  490. VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n",
  491. cpu, smp_processor_id()));
  492. printk("CPU%d: ", cpu);
  493. print_cpu_info(&cpu_data(cpu));
  494. wmb();
  495. cpu_set(cpu, cpu_callout_map);
  496. cpu_set(cpu, cpu_present_map);
  497. } else {
  498. printk("CPU%d FAILED TO BOOT: ", cpu);
  499. if (*
  500. ((volatile unsigned char *)phys_to_virt(start_phys_address))
  501. == 0xA5)
  502. printk("Stuck.\n");
  503. else
  504. printk("Not responding.\n");
  505. cpucount--;
  506. }
  507. }
  508. void __init smp_boot_cpus(void)
  509. {
  510. int i;
  511. /* CAT BUS initialisation must be done after the memory */
  512. /* FIXME: The L4 has a catbus too, it just needs to be
  513. * accessed in a totally different way */
  514. if (voyager_level == 5) {
  515. voyager_cat_init();
  516. /* now that the cat has probed the Voyager System Bus, sanity
  517. * check the cpu map */
  518. if (((voyager_quad_processors | voyager_extended_vic_processors)
  519. & cpus_addr(phys_cpu_present_map)[0]) !=
  520. cpus_addr(phys_cpu_present_map)[0]) {
  521. /* should panic */
  522. printk("\n\n***WARNING*** "
  523. "Sanity check of CPU present map FAILED\n");
  524. }
  525. } else if (voyager_level == 4)
  526. voyager_extended_vic_processors =
  527. cpus_addr(phys_cpu_present_map)[0];
  528. /* this sets up the idle task to run on the current cpu */
  529. voyager_extended_cpus = 1;
  530. /* Remove the global_irq_holder setting, it triggers a BUG() on
  531. * schedule at the moment */
  532. //global_irq_holder = boot_cpu_id;
  533. /* FIXME: Need to do something about this but currently only works
  534. * on CPUs with a tsc which none of mine have.
  535. smp_tune_scheduling();
  536. */
  537. smp_store_cpu_info(boot_cpu_id);
  538. /* setup the jump vector */
  539. initial_code = (unsigned long)initialize_secondary;
  540. printk("CPU%d: ", boot_cpu_id);
  541. print_cpu_info(&cpu_data(boot_cpu_id));
  542. if (is_cpu_quad()) {
  543. /* booting on a Quad CPU */
  544. printk("VOYAGER SMP: Boot CPU is Quad\n");
  545. qic_setup();
  546. do_quad_bootstrap();
  547. }
  548. /* enable our own CPIs */
  549. vic_enable_cpi();
  550. cpu_set(boot_cpu_id, cpu_online_map);
  551. cpu_set(boot_cpu_id, cpu_callout_map);
  552. /* loop over all the extended VIC CPUs and boot them. The
  553. * Quad CPUs must be bootstrapped by their extended VIC cpu */
  554. for (i = 0; i < nr_cpu_ids; i++) {
  555. if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map))
  556. continue;
  557. do_boot_cpu(i);
  558. /* This udelay seems to be needed for the Quad boots
  559. * don't remove unless you know what you're doing */
  560. udelay(1000);
  561. }
  562. /* we could compute the total bogomips here, but why bother?,
  563. * Code added from smpboot.c */
  564. {
  565. unsigned long bogosum = 0;
  566. for_each_online_cpu(i)
  567. bogosum += cpu_data(i).loops_per_jiffy;
  568. printk(KERN_INFO "Total of %d processors activated "
  569. "(%lu.%02lu BogoMIPS).\n",
  570. cpucount + 1, bogosum / (500000 / HZ),
  571. (bogosum / (5000 / HZ)) % 100);
  572. }
  573. voyager_extended_cpus = hweight32(voyager_extended_vic_processors);
  574. printk("VOYAGER: Extended (interrupt handling CPUs): "
  575. "%d, non-extended: %d\n", voyager_extended_cpus,
  576. num_booting_cpus() - voyager_extended_cpus);
  577. /* that's it, switch to symmetric mode */
  578. outb(0, VIC_PRIORITY_REGISTER);
  579. outb(0, VIC_CLAIM_REGISTER_0);
  580. outb(0, VIC_CLAIM_REGISTER_1);
  581. VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus()));
  582. }
  583. /* Reload the secondary CPUs task structure (this function does not
  584. * return ) */
  585. static void __init initialize_secondary(void)
  586. {
  587. #if 0
  588. // AC kernels only
  589. set_current(hard_get_current());
  590. #endif
  591. /*
  592. * We don't actually need to load the full TSS,
  593. * basically just the stack pointer and the eip.
  594. */
  595. asm volatile ("movl %0,%%esp\n\t"
  596. "jmp *%1"::"r" (current->thread.sp),
  597. "r"(current->thread.ip));
  598. }
  599. /* handle a Voyager SYS_INT -- If we don't, the base board will
  600. * panic the system.
  601. *
  602. * System interrupts occur because some problem was detected on the
  603. * various busses. To find out what you have to probe all the
  604. * hardware via the CAT bus. FIXME: At the moment we do nothing. */
  605. void smp_vic_sys_interrupt(struct pt_regs *regs)
  606. {
  607. ack_CPI(VIC_SYS_INT);
  608. printk("Voyager SYSTEM INTERRUPT\n");
  609. }
  610. /* Handle a voyager CMN_INT; These interrupts occur either because of
  611. * a system status change or because a single bit memory error
  612. * occurred. FIXME: At the moment, ignore all this. */
  613. void smp_vic_cmn_interrupt(struct pt_regs *regs)
  614. {
  615. static __u8 in_cmn_int = 0;
  616. static DEFINE_SPINLOCK(cmn_int_lock);
  617. /* common ints are broadcast, so make sure we only do this once */
  618. _raw_spin_lock(&cmn_int_lock);
  619. if (in_cmn_int)
  620. goto unlock_end;
  621. in_cmn_int++;
  622. _raw_spin_unlock(&cmn_int_lock);
  623. VDEBUG(("Voyager COMMON INTERRUPT\n"));
  624. if (voyager_level == 5)
  625. voyager_cat_do_common_interrupt();
  626. _raw_spin_lock(&cmn_int_lock);
  627. in_cmn_int = 0;
  628. unlock_end:
  629. _raw_spin_unlock(&cmn_int_lock);
  630. ack_CPI(VIC_CMN_INT);
  631. }
  632. /*
  633. * Reschedule call back. Nothing to do, all the work is done
  634. * automatically when we return from the interrupt. */
  635. static void smp_reschedule_interrupt(void)
  636. {
  637. /* do nothing */
  638. }
  639. static struct mm_struct *flush_mm;
  640. static unsigned long flush_va;
  641. static DEFINE_SPINLOCK(tlbstate_lock);
  642. /*
  643. * We cannot call mmdrop() because we are in interrupt context,
  644. * instead update mm->cpu_vm_mask.
  645. *
  646. * We need to reload %cr3 since the page tables may be going
  647. * away from under us..
  648. */
  649. static inline void voyager_leave_mm(unsigned long cpu)
  650. {
  651. if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
  652. BUG();
  653. cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
  654. load_cr3(swapper_pg_dir);
  655. }
  656. /*
  657. * Invalidate call-back
  658. */
  659. static void smp_invalidate_interrupt(void)
  660. {
  661. __u8 cpu = smp_processor_id();
  662. if (!test_bit(cpu, &smp_invalidate_needed))
  663. return;
  664. /* This will flood messages. Don't uncomment unless you see
  665. * Problems with cross cpu invalidation
  666. VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n",
  667. smp_processor_id()));
  668. */
  669. if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
  670. if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
  671. if (flush_va == TLB_FLUSH_ALL)
  672. local_flush_tlb();
  673. else
  674. __flush_tlb_one(flush_va);
  675. } else
  676. voyager_leave_mm(cpu);
  677. }
  678. smp_mb__before_clear_bit();
  679. clear_bit(cpu, &smp_invalidate_needed);
  680. smp_mb__after_clear_bit();
  681. }
  682. /* All the new flush operations for 2.4 */
  683. /* This routine is called with a physical cpu mask */
  684. static void
  685. voyager_flush_tlb_others(unsigned long cpumask, struct mm_struct *mm,
  686. unsigned long va)
  687. {
  688. int stuck = 50000;
  689. if (!cpumask)
  690. BUG();
  691. if ((cpumask & cpus_addr(cpu_online_map)[0]) != cpumask)
  692. BUG();
  693. if (cpumask & (1 << smp_processor_id()))
  694. BUG();
  695. if (!mm)
  696. BUG();
  697. spin_lock(&tlbstate_lock);
  698. flush_mm = mm;
  699. flush_va = va;
  700. atomic_set_mask(cpumask, &smp_invalidate_needed);
  701. /*
  702. * We have to send the CPI only to
  703. * CPUs affected.
  704. */
  705. send_CPI(cpumask, VIC_INVALIDATE_CPI);
  706. while (smp_invalidate_needed) {
  707. mb();
  708. if (--stuck == 0) {
  709. printk("***WARNING*** Stuck doing invalidate CPI "
  710. "(CPU%d)\n", smp_processor_id());
  711. break;
  712. }
  713. }
  714. /* Uncomment only to debug invalidation problems
  715. VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu));
  716. */
  717. flush_mm = NULL;
  718. flush_va = 0;
  719. spin_unlock(&tlbstate_lock);
  720. }
  721. void flush_tlb_current_task(void)
  722. {
  723. struct mm_struct *mm = current->mm;
  724. unsigned long cpu_mask;
  725. preempt_disable();
  726. cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
  727. local_flush_tlb();
  728. if (cpu_mask)
  729. voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
  730. preempt_enable();
  731. }
  732. void flush_tlb_mm(struct mm_struct *mm)
  733. {
  734. unsigned long cpu_mask;
  735. preempt_disable();
  736. cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
  737. if (current->active_mm == mm) {
  738. if (current->mm)
  739. local_flush_tlb();
  740. else
  741. voyager_leave_mm(smp_processor_id());
  742. }
  743. if (cpu_mask)
  744. voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
  745. preempt_enable();
  746. }
  747. void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
  748. {
  749. struct mm_struct *mm = vma->vm_mm;
  750. unsigned long cpu_mask;
  751. preempt_disable();
  752. cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
  753. if (current->active_mm == mm) {
  754. if (current->mm)
  755. __flush_tlb_one(va);
  756. else
  757. voyager_leave_mm(smp_processor_id());
  758. }
  759. if (cpu_mask)
  760. voyager_flush_tlb_others(cpu_mask, mm, va);
  761. preempt_enable();
  762. }
  763. EXPORT_SYMBOL(flush_tlb_page);
  764. /* enable the requested IRQs */
  765. static void smp_enable_irq_interrupt(void)
  766. {
  767. __u8 irq;
  768. __u8 cpu = get_cpu();
  769. VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu,
  770. vic_irq_enable_mask[cpu]));
  771. spin_lock(&vic_irq_lock);
  772. for (irq = 0; irq < 16; irq++) {
  773. if (vic_irq_enable_mask[cpu] & (1 << irq))
  774. enable_local_vic_irq(irq);
  775. }
  776. vic_irq_enable_mask[cpu] = 0;
  777. spin_unlock(&vic_irq_lock);
  778. put_cpu_no_resched();
  779. }
  780. /*
  781. * CPU halt call-back
  782. */
  783. static void smp_stop_cpu_function(void *dummy)
  784. {
  785. VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id()));
  786. cpu_clear(smp_processor_id(), cpu_online_map);
  787. local_irq_disable();
  788. for (;;)
  789. halt();
  790. }
  791. /* execute a thread on a new CPU. The function to be called must be
  792. * previously set up. This is used to schedule a function for
  793. * execution on all CPUs - set up the function then broadcast a
  794. * function_interrupt CPI to come here on each CPU */
  795. static void smp_call_function_interrupt(void)
  796. {
  797. irq_enter();
  798. generic_smp_call_function_interrupt();
  799. __get_cpu_var(irq_stat).irq_call_count++;
  800. irq_exit();
  801. }
  802. static void smp_call_function_single_interrupt(void)
  803. {
  804. irq_enter();
  805. generic_smp_call_function_single_interrupt();
  806. __get_cpu_var(irq_stat).irq_call_count++;
  807. irq_exit();
  808. }
  809. /* Sorry about the name. In an APIC based system, the APICs
  810. * themselves are programmed to send a timer interrupt. This is used
  811. * by linux to reschedule the processor. Voyager doesn't have this,
  812. * so we use the system clock to interrupt one processor, which in
  813. * turn, broadcasts a timer CPI to all the others --- we receive that
  814. * CPI here. We don't use this actually for counting so losing
  815. * ticks doesn't matter
  816. *
  817. * FIXME: For those CPUs which actually have a local APIC, we could
  818. * try to use it to trigger this interrupt instead of having to
  819. * broadcast the timer tick. Unfortunately, all my pentium DYADs have
  820. * no local APIC, so I can't do this
  821. *
  822. * This function is currently a placeholder and is unused in the code */
  823. void smp_apic_timer_interrupt(struct pt_regs *regs)
  824. {
  825. struct pt_regs *old_regs = set_irq_regs(regs);
  826. wrapper_smp_local_timer_interrupt();
  827. set_irq_regs(old_regs);
  828. }
  829. /* All of the QUAD interrupt GATES */
  830. void smp_qic_timer_interrupt(struct pt_regs *regs)
  831. {
  832. struct pt_regs *old_regs = set_irq_regs(regs);
  833. ack_QIC_CPI(QIC_TIMER_CPI);
  834. wrapper_smp_local_timer_interrupt();
  835. set_irq_regs(old_regs);
  836. }
  837. void smp_qic_invalidate_interrupt(struct pt_regs *regs)
  838. {
  839. ack_QIC_CPI(QIC_INVALIDATE_CPI);
  840. smp_invalidate_interrupt();
  841. }
  842. void smp_qic_reschedule_interrupt(struct pt_regs *regs)
  843. {
  844. ack_QIC_CPI(QIC_RESCHEDULE_CPI);
  845. smp_reschedule_interrupt();
  846. }
  847. void smp_qic_enable_irq_interrupt(struct pt_regs *regs)
  848. {
  849. ack_QIC_CPI(QIC_ENABLE_IRQ_CPI);
  850. smp_enable_irq_interrupt();
  851. }
  852. void smp_qic_call_function_interrupt(struct pt_regs *regs)
  853. {
  854. ack_QIC_CPI(QIC_CALL_FUNCTION_CPI);
  855. smp_call_function_interrupt();
  856. }
  857. void smp_qic_call_function_single_interrupt(struct pt_regs *regs)
  858. {
  859. ack_QIC_CPI(QIC_CALL_FUNCTION_SINGLE_CPI);
  860. smp_call_function_single_interrupt();
  861. }
  862. void smp_vic_cpi_interrupt(struct pt_regs *regs)
  863. {
  864. struct pt_regs *old_regs = set_irq_regs(regs);
  865. __u8 cpu = smp_processor_id();
  866. if (is_cpu_quad())
  867. ack_QIC_CPI(VIC_CPI_LEVEL0);
  868. else
  869. ack_VIC_CPI(VIC_CPI_LEVEL0);
  870. if (test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu]))
  871. wrapper_smp_local_timer_interrupt();
  872. if (test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu]))
  873. smp_invalidate_interrupt();
  874. if (test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu]))
  875. smp_reschedule_interrupt();
  876. if (test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu]))
  877. smp_enable_irq_interrupt();
  878. if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
  879. smp_call_function_interrupt();
  880. if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI, &vic_cpi_mailbox[cpu]))
  881. smp_call_function_single_interrupt();
  882. set_irq_regs(old_regs);
  883. }
  884. static void do_flush_tlb_all(void *info)
  885. {
  886. unsigned long cpu = smp_processor_id();
  887. __flush_tlb_all();
  888. if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
  889. voyager_leave_mm(cpu);
  890. }
  891. /* flush the TLB of every active CPU in the system */
  892. void flush_tlb_all(void)
  893. {
  894. on_each_cpu(do_flush_tlb_all, 0, 1);
  895. }
  896. /* send a reschedule CPI to one CPU by physical CPU number*/
  897. static void voyager_smp_send_reschedule(int cpu)
  898. {
  899. send_one_CPI(cpu, VIC_RESCHEDULE_CPI);
  900. }
  901. int hard_smp_processor_id(void)
  902. {
  903. __u8 i;
  904. __u8 cpumask = inb(VIC_PROC_WHO_AM_I);
  905. if ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER)
  906. return cpumask & 0x1F;
  907. for (i = 0; i < 8; i++) {
  908. if (cpumask & (1 << i))
  909. return i;
  910. }
  911. printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask);
  912. return 0;
  913. }
  914. int safe_smp_processor_id(void)
  915. {
  916. return hard_smp_processor_id();
  917. }
  918. /* broadcast a halt to all other CPUs */
  919. static void voyager_smp_send_stop(void)
  920. {
  921. smp_call_function(smp_stop_cpu_function, NULL, 1);
  922. }
  923. /* this function is triggered in time.c when a clock tick fires
  924. * we need to re-broadcast the tick to all CPUs */
  925. void smp_vic_timer_interrupt(void)
  926. {
  927. send_CPI_allbutself(VIC_TIMER_CPI);
  928. smp_local_timer_interrupt();
  929. }
  930. /* local (per CPU) timer interrupt. It does both profiling and
  931. * process statistics/rescheduling.
  932. *
  933. * We do profiling in every local tick, statistics/rescheduling
  934. * happen only every 'profiling multiplier' ticks. The default
  935. * multiplier is 1 and it can be changed by writing the new multiplier
  936. * value into /proc/profile.
  937. */
  938. void smp_local_timer_interrupt(void)
  939. {
  940. int cpu = smp_processor_id();
  941. long weight;
  942. profile_tick(CPU_PROFILING);
  943. if (--per_cpu(prof_counter, cpu) <= 0) {
  944. /*
  945. * The multiplier may have changed since the last time we got
  946. * to this point as a result of the user writing to
  947. * /proc/profile. In this case we need to adjust the APIC
  948. * timer accordingly.
  949. *
  950. * Interrupts are already masked off at this point.
  951. */
  952. per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
  953. if (per_cpu(prof_counter, cpu) !=
  954. per_cpu(prof_old_multiplier, cpu)) {
  955. /* FIXME: need to update the vic timer tick here */
  956. per_cpu(prof_old_multiplier, cpu) =
  957. per_cpu(prof_counter, cpu);
  958. }
  959. update_process_times(user_mode_vm(get_irq_regs()));
  960. }
  961. if (((1 << cpu) & voyager_extended_vic_processors) == 0)
  962. /* only extended VIC processors participate in
  963. * interrupt distribution */
  964. return;
  965. /*
  966. * We take the 'long' return path, and there every subsystem
  967. * grabs the appropriate locks (kernel lock/ irq lock).
  968. *
  969. * we might want to decouple profiling from the 'long path',
  970. * and do the profiling totally in assembly.
  971. *
  972. * Currently this isn't too much of an issue (performance wise),
  973. * we can take more than 100K local irqs per second on a 100 MHz P5.
  974. */
  975. if ((++vic_tick[cpu] & 0x7) != 0)
  976. return;
  977. /* get here every 16 ticks (about every 1/6 of a second) */
  978. /* Change our priority to give someone else a chance at getting
  979. * the IRQ. The algorithm goes like this:
  980. *
  981. * In the VIC, the dynamically routed interrupt is always
  982. * handled by the lowest priority eligible (i.e. receiving
  983. * interrupts) CPU. If >1 eligible CPUs are equal lowest, the
  984. * lowest processor number gets it.
  985. *
  986. * The priority of a CPU is controlled by a special per-CPU
  987. * VIC priority register which is 3 bits wide 0 being lowest
  988. * and 7 highest priority..
  989. *
  990. * Therefore we subtract the average number of interrupts from
  991. * the number we've fielded. If this number is negative, we
  992. * lower the activity count and if it is positive, we raise
  993. * it.
  994. *
  995. * I'm afraid this still leads to odd looking interrupt counts:
  996. * the totals are all roughly equal, but the individual ones
  997. * look rather skewed.
  998. *
  999. * FIXME: This algorithm is total crap when mixed with SMP
  1000. * affinity code since we now try to even up the interrupt
  1001. * counts when an affinity binding is keeping them on a
  1002. * particular CPU*/
  1003. weight = (vic_intr_count[cpu] * voyager_extended_cpus
  1004. - vic_intr_total) >> 4;
  1005. weight += 4;
  1006. if (weight > 7)
  1007. weight = 7;
  1008. if (weight < 0)
  1009. weight = 0;
  1010. outb((__u8) weight, VIC_PRIORITY_REGISTER);
  1011. #ifdef VOYAGER_DEBUG
  1012. if ((vic_tick[cpu] & 0xFFF) == 0) {
  1013. /* print this message roughly every 25 secs */
  1014. printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n",
  1015. cpu, vic_tick[cpu], weight);
  1016. }
  1017. #endif
  1018. }
  1019. /* setup the profiling timer */
  1020. int setup_profiling_timer(unsigned int multiplier)
  1021. {
  1022. int i;
  1023. if ((!multiplier))
  1024. return -EINVAL;
  1025. /*
  1026. * Set the new multiplier for each CPU. CPUs don't start using the
  1027. * new values until the next timer interrupt in which they do process
  1028. * accounting.
  1029. */
  1030. for (i = 0; i < nr_cpu_ids; ++i)
  1031. per_cpu(prof_multiplier, i) = multiplier;
  1032. return 0;
  1033. }
  1034. /* This is a bit of a mess, but forced on us by the genirq changes
  1035. * there's no genirq handler that really does what voyager wants
  1036. * so hack it up with the simple IRQ handler */
  1037. static void handle_vic_irq(unsigned int irq, struct irq_desc *desc)
  1038. {
  1039. before_handle_vic_irq(irq);
  1040. handle_simple_irq(irq, desc);
  1041. after_handle_vic_irq(irq);
  1042. }
  1043. /* The CPIs are handled in the per cpu 8259s, so they must be
  1044. * enabled to be received: FIX: enabling the CPIs in the early
  1045. * boot sequence interferes with bug checking; enable them later
  1046. * on in smp_init */
  1047. #define VIC_SET_GATE(cpi, vector) \
  1048. set_intr_gate((cpi) + VIC_DEFAULT_CPI_BASE, (vector))
  1049. #define QIC_SET_GATE(cpi, vector) \
  1050. set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector))
  1051. void __init voyager_smp_intr_init(void)
  1052. {
  1053. int i;
  1054. /* initialize the per cpu irq mask to all disabled */
  1055. for (i = 0; i < nr_cpu_ids; i++)
  1056. vic_irq_mask[i] = 0xFFFF;
  1057. VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt);
  1058. VIC_SET_GATE(VIC_SYS_INT, vic_sys_interrupt);
  1059. VIC_SET_GATE(VIC_CMN_INT, vic_cmn_interrupt);
  1060. QIC_SET_GATE(QIC_TIMER_CPI, qic_timer_interrupt);
  1061. QIC_SET_GATE(QIC_INVALIDATE_CPI, qic_invalidate_interrupt);
  1062. QIC_SET_GATE(QIC_RESCHEDULE_CPI, qic_reschedule_interrupt);
  1063. QIC_SET_GATE(QIC_ENABLE_IRQ_CPI, qic_enable_irq_interrupt);
  1064. QIC_SET_GATE(QIC_CALL_FUNCTION_CPI, qic_call_function_interrupt);
  1065. /* now put the VIC descriptor into the first 48 IRQs
  1066. *
  1067. * This is for later: first 16 correspond to PC IRQs; next 16
  1068. * are Primary MC IRQs and final 16 are Secondary MC IRQs */
  1069. for (i = 0; i < 48; i++)
  1070. set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq);
  1071. }
  1072. /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per
  1073. * processor to receive CPI */
  1074. static void send_CPI(__u32 cpuset, __u8 cpi)
  1075. {
  1076. int cpu;
  1077. __u32 quad_cpuset = (cpuset & voyager_quad_processors);
  1078. if (cpi < VIC_START_FAKE_CPI) {
  1079. /* fake CPI are only used for booting, so send to the
  1080. * extended quads as well---Quads must be VIC booted */
  1081. outb((__u8) (cpuset), VIC_CPI_Registers[cpi]);
  1082. return;
  1083. }
  1084. if (quad_cpuset)
  1085. send_QIC_CPI(quad_cpuset, cpi);
  1086. cpuset &= ~quad_cpuset;
  1087. cpuset &= 0xff; /* only first 8 CPUs vaild for VIC CPI */
  1088. if (cpuset == 0)
  1089. return;
  1090. for_each_online_cpu(cpu) {
  1091. if (cpuset & (1 << cpu))
  1092. set_bit(cpi, &vic_cpi_mailbox[cpu]);
  1093. }
  1094. if (cpuset)
  1095. outb((__u8) cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]);
  1096. }
  1097. /* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and
  1098. * set the cache line to shared by reading it.
  1099. *
  1100. * DON'T make this inline otherwise the cache line read will be
  1101. * optimised away
  1102. * */
  1103. static int ack_QIC_CPI(__u8 cpi)
  1104. {
  1105. __u8 cpu = hard_smp_processor_id();
  1106. cpi &= 7;
  1107. outb(1 << cpi, QIC_INTERRUPT_CLEAR1);
  1108. return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi;
  1109. }
  1110. static void ack_special_QIC_CPI(__u8 cpi)
  1111. {
  1112. switch (cpi) {
  1113. case VIC_CMN_INT:
  1114. outb(QIC_CMN_INT, QIC_INTERRUPT_CLEAR0);
  1115. break;
  1116. case VIC_SYS_INT:
  1117. outb(QIC_SYS_INT, QIC_INTERRUPT_CLEAR0);
  1118. break;
  1119. }
  1120. /* also clear at the VIC, just in case (nop for non-extended proc) */
  1121. ack_VIC_CPI(cpi);
  1122. }
  1123. /* Acknowledge receipt of CPI in the VIC (essentially an EOI) */
  1124. static void ack_VIC_CPI(__u8 cpi)
  1125. {
  1126. #ifdef VOYAGER_DEBUG
  1127. unsigned long flags;
  1128. __u16 isr;
  1129. __u8 cpu = smp_processor_id();
  1130. local_irq_save(flags);
  1131. isr = vic_read_isr();
  1132. if ((isr & (1 << (cpi & 7))) == 0) {
  1133. printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi);
  1134. }
  1135. #endif
  1136. /* send specific EOI; the two system interrupts have
  1137. * bit 4 set for a separate vector but behave as the
  1138. * corresponding 3 bit intr */
  1139. outb_p(0x60 | (cpi & 7), 0x20);
  1140. #ifdef VOYAGER_DEBUG
  1141. if ((vic_read_isr() & (1 << (cpi & 7))) != 0) {
  1142. printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi);
  1143. }
  1144. local_irq_restore(flags);
  1145. #endif
  1146. }
  1147. /* cribbed with thanks from irq.c */
  1148. #define __byte(x,y) (((unsigned char *)&(y))[x])
  1149. #define cached_21(cpu) (__byte(0,vic_irq_mask[cpu]))
  1150. #define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu]))
  1151. static unsigned int startup_vic_irq(unsigned int irq)
  1152. {
  1153. unmask_vic_irq(irq);
  1154. return 0;
  1155. }
  1156. /* The enable and disable routines. This is where we run into
  1157. * conflicting architectural philosophy. Fundamentally, the voyager
  1158. * architecture does not expect to have to disable interrupts globally
  1159. * (the IRQ controllers belong to each CPU). The processor masquerade
  1160. * which is used to start the system shouldn't be used in a running OS
  1161. * since it will cause great confusion if two separate CPUs drive to
  1162. * the same IRQ controller (I know, I've tried it).
  1163. *
  1164. * The solution is a variant on the NCR lazy SPL design:
  1165. *
  1166. * 1) To disable an interrupt, do nothing (other than set the
  1167. * IRQ_DISABLED flag). This dares the interrupt actually to arrive.
  1168. *
  1169. * 2) If the interrupt dares to come in, raise the local mask against
  1170. * it (this will result in all the CPU masks being raised
  1171. * eventually).
  1172. *
  1173. * 3) To enable the interrupt, lower the mask on the local CPU and
  1174. * broadcast an Interrupt enable CPI which causes all other CPUs to
  1175. * adjust their masks accordingly. */
  1176. static void unmask_vic_irq(unsigned int irq)
  1177. {
  1178. /* linux doesn't to processor-irq affinity, so enable on
  1179. * all CPUs we know about */
  1180. int cpu = smp_processor_id(), real_cpu;
  1181. __u16 mask = (1 << irq);
  1182. __u32 processorList = 0;
  1183. unsigned long flags;
  1184. VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n",
  1185. irq, cpu, cpu_irq_affinity[cpu]));
  1186. spin_lock_irqsave(&vic_irq_lock, flags);
  1187. for_each_online_cpu(real_cpu) {
  1188. if (!(voyager_extended_vic_processors & (1 << real_cpu)))
  1189. continue;
  1190. if (!(cpu_irq_affinity[real_cpu] & mask)) {
  1191. /* irq has no affinity for this CPU, ignore */
  1192. continue;
  1193. }
  1194. if (real_cpu == cpu) {
  1195. enable_local_vic_irq(irq);
  1196. } else if (vic_irq_mask[real_cpu] & mask) {
  1197. vic_irq_enable_mask[real_cpu] |= mask;
  1198. processorList |= (1 << real_cpu);
  1199. }
  1200. }
  1201. spin_unlock_irqrestore(&vic_irq_lock, flags);
  1202. if (processorList)
  1203. send_CPI(processorList, VIC_ENABLE_IRQ_CPI);
  1204. }
  1205. static void mask_vic_irq(unsigned int irq)
  1206. {
  1207. /* lazy disable, do nothing */
  1208. }
  1209. static void enable_local_vic_irq(unsigned int irq)
  1210. {
  1211. __u8 cpu = smp_processor_id();
  1212. __u16 mask = ~(1 << irq);
  1213. __u16 old_mask = vic_irq_mask[cpu];
  1214. vic_irq_mask[cpu] &= mask;
  1215. if (vic_irq_mask[cpu] == old_mask)
  1216. return;
  1217. VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n",
  1218. irq, cpu));
  1219. if (irq & 8) {
  1220. outb_p(cached_A1(cpu), 0xA1);
  1221. (void)inb_p(0xA1);
  1222. } else {
  1223. outb_p(cached_21(cpu), 0x21);
  1224. (void)inb_p(0x21);
  1225. }
  1226. }
  1227. static void disable_local_vic_irq(unsigned int irq)
  1228. {
  1229. __u8 cpu = smp_processor_id();
  1230. __u16 mask = (1 << irq);
  1231. __u16 old_mask = vic_irq_mask[cpu];
  1232. if (irq == 7)
  1233. return;
  1234. vic_irq_mask[cpu] |= mask;
  1235. if (old_mask == vic_irq_mask[cpu])
  1236. return;
  1237. VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n",
  1238. irq, cpu));
  1239. if (irq & 8) {
  1240. outb_p(cached_A1(cpu), 0xA1);
  1241. (void)inb_p(0xA1);
  1242. } else {
  1243. outb_p(cached_21(cpu), 0x21);
  1244. (void)inb_p(0x21);
  1245. }
  1246. }
  1247. /* The VIC is level triggered, so the ack can only be issued after the
  1248. * interrupt completes. However, we do Voyager lazy interrupt
  1249. * handling here: It is an extremely expensive operation to mask an
  1250. * interrupt in the vic, so we merely set a flag (IRQ_DISABLED). If
  1251. * this interrupt actually comes in, then we mask and ack here to push
  1252. * the interrupt off to another CPU */
  1253. static void before_handle_vic_irq(unsigned int irq)
  1254. {
  1255. irq_desc_t *desc = irq_to_desc(irq);
  1256. __u8 cpu = smp_processor_id();
  1257. _raw_spin_lock(&vic_irq_lock);
  1258. vic_intr_total++;
  1259. vic_intr_count[cpu]++;
  1260. if (!(cpu_irq_affinity[cpu] & (1 << irq))) {
  1261. /* The irq is not in our affinity mask, push it off
  1262. * onto another CPU */
  1263. VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d "
  1264. "on cpu %d\n", irq, cpu));
  1265. disable_local_vic_irq(irq);
  1266. /* set IRQ_INPROGRESS to prevent the handler in irq.c from
  1267. * actually calling the interrupt routine */
  1268. desc->status |= IRQ_REPLAY | IRQ_INPROGRESS;
  1269. } else if (desc->status & IRQ_DISABLED) {
  1270. /* Damn, the interrupt actually arrived, do the lazy
  1271. * disable thing. The interrupt routine in irq.c will
  1272. * not handle a IRQ_DISABLED interrupt, so nothing more
  1273. * need be done here */
  1274. VDEBUG(("VOYAGER DEBUG: lazy disable of irq %d on CPU %d\n",
  1275. irq, cpu));
  1276. disable_local_vic_irq(irq);
  1277. desc->status |= IRQ_REPLAY;
  1278. } else {
  1279. desc->status &= ~IRQ_REPLAY;
  1280. }
  1281. _raw_spin_unlock(&vic_irq_lock);
  1282. }
  1283. /* Finish the VIC interrupt: basically mask */
  1284. static void after_handle_vic_irq(unsigned int irq)
  1285. {
  1286. irq_desc_t *desc = irq_to_desc(irq);
  1287. _raw_spin_lock(&vic_irq_lock);
  1288. {
  1289. unsigned int status = desc->status & ~IRQ_INPROGRESS;
  1290. #ifdef VOYAGER_DEBUG
  1291. __u16 isr;
  1292. #endif
  1293. desc->status = status;
  1294. if ((status & IRQ_DISABLED))
  1295. disable_local_vic_irq(irq);
  1296. #ifdef VOYAGER_DEBUG
  1297. /* DEBUG: before we ack, check what's in progress */
  1298. isr = vic_read_isr();
  1299. if ((isr & (1 << irq) && !(status & IRQ_REPLAY)) == 0) {
  1300. int i;
  1301. __u8 cpu = smp_processor_id();
  1302. __u8 real_cpu;
  1303. int mask; /* Um... initialize me??? --RR */
  1304. printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
  1305. cpu, irq);
  1306. for_each_possible_cpu(real_cpu, mask) {
  1307. outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu,
  1308. VIC_PROCESSOR_ID);
  1309. isr = vic_read_isr();
  1310. if (isr & (1 << irq)) {
  1311. printk
  1312. ("VOYAGER SMP: CPU%d ack irq %d\n",
  1313. real_cpu, irq);
  1314. ack_vic_irq(irq);
  1315. }
  1316. outb(cpu, VIC_PROCESSOR_ID);
  1317. }
  1318. }
  1319. #endif /* VOYAGER_DEBUG */
  1320. /* as soon as we ack, the interrupt is eligible for
  1321. * receipt by another CPU so everything must be in
  1322. * order here */
  1323. ack_vic_irq(irq);
  1324. if (status & IRQ_REPLAY) {
  1325. /* replay is set if we disable the interrupt
  1326. * in the before_handle_vic_irq() routine, so
  1327. * clear the in progress bit here to allow the
  1328. * next CPU to handle this correctly */
  1329. desc->status &= ~(IRQ_REPLAY | IRQ_INPROGRESS);
  1330. }
  1331. #ifdef VOYAGER_DEBUG
  1332. isr = vic_read_isr();
  1333. if ((isr & (1 << irq)) != 0)
  1334. printk("VOYAGER SMP: after_handle_vic_irq() after "
  1335. "ack irq=%d, isr=0x%x\n", irq, isr);
  1336. #endif /* VOYAGER_DEBUG */
  1337. }
  1338. _raw_spin_unlock(&vic_irq_lock);
  1339. /* All code after this point is out of the main path - the IRQ
  1340. * may be intercepted by another CPU if reasserted */
  1341. }
  1342. /* Linux processor - interrupt affinity manipulations.
  1343. *
  1344. * For each processor, we maintain a 32 bit irq affinity mask.
  1345. * Initially it is set to all 1's so every processor accepts every
  1346. * interrupt. In this call, we change the processor's affinity mask:
  1347. *
  1348. * Change from enable to disable:
  1349. *
  1350. * If the interrupt ever comes in to the processor, we will disable it
  1351. * and ack it to push it off to another CPU, so just accept the mask here.
  1352. *
  1353. * Change from disable to enable:
  1354. *
  1355. * change the mask and then do an interrupt enable CPI to re-enable on
  1356. * the selected processors */
  1357. void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask)
  1358. {
  1359. /* Only extended processors handle interrupts */
  1360. unsigned long real_mask;
  1361. unsigned long irq_mask = 1 << irq;
  1362. int cpu;
  1363. real_mask = cpus_addr(*mask)[0] & voyager_extended_vic_processors;
  1364. if (cpus_addr(*mask)[0] == 0)
  1365. /* can't have no CPUs to accept the interrupt -- extremely
  1366. * bad things will happen */
  1367. return;
  1368. if (irq == 0)
  1369. /* can't change the affinity of the timer IRQ. This
  1370. * is due to the constraint in the voyager
  1371. * architecture that the CPI also comes in on and IRQ
  1372. * line and we have chosen IRQ0 for this. If you
  1373. * raise the mask on this interrupt, the processor
  1374. * will no-longer be able to accept VIC CPIs */
  1375. return;
  1376. if (irq >= 32)
  1377. /* You can only have 32 interrupts in a voyager system
  1378. * (and 32 only if you have a secondary microchannel
  1379. * bus) */
  1380. return;
  1381. for_each_online_cpu(cpu) {
  1382. unsigned long cpu_mask = 1 << cpu;
  1383. if (cpu_mask & real_mask) {
  1384. /* enable the interrupt for this cpu */
  1385. cpu_irq_affinity[cpu] |= irq_mask;
  1386. } else {
  1387. /* disable the interrupt for this cpu */
  1388. cpu_irq_affinity[cpu] &= ~irq_mask;
  1389. }
  1390. }
  1391. /* this is magic, we now have the correct affinity maps, so
  1392. * enable the interrupt. This will send an enable CPI to
  1393. * those CPUs who need to enable it in their local masks,
  1394. * causing them to correct for the new affinity . If the
  1395. * interrupt is currently globally disabled, it will simply be
  1396. * disabled again as it comes in (voyager lazy disable). If
  1397. * the affinity map is tightened to disable the interrupt on a
  1398. * cpu, it will be pushed off when it comes in */
  1399. unmask_vic_irq(irq);
  1400. }
  1401. static void ack_vic_irq(unsigned int irq)
  1402. {
  1403. if (irq & 8) {
  1404. outb(0x62, 0x20); /* Specific EOI to cascade */
  1405. outb(0x60 | (irq & 7), 0xA0);
  1406. } else {
  1407. outb(0x60 | (irq & 7), 0x20);
  1408. }
  1409. }
  1410. /* enable the CPIs. In the VIC, the CPIs are delivered by the 8259
  1411. * but are not vectored by it. This means that the 8259 mask must be
  1412. * lowered to receive them */
  1413. static __init void vic_enable_cpi(void)
  1414. {
  1415. __u8 cpu = smp_processor_id();
  1416. /* just take a copy of the current mask (nop for boot cpu) */
  1417. vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id];
  1418. enable_local_vic_irq(VIC_CPI_LEVEL0);
  1419. enable_local_vic_irq(VIC_CPI_LEVEL1);
  1420. /* for sys int and cmn int */
  1421. enable_local_vic_irq(7);
  1422. if (is_cpu_quad()) {
  1423. outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0);
  1424. outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1);
  1425. VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n",
  1426. cpu, QIC_CPI_ENABLE));
  1427. }
  1428. VDEBUG(("VOYAGER SMP: ENABLE CPI: CPU%d: MASK 0x%x\n",
  1429. cpu, vic_irq_mask[cpu]));
  1430. }
  1431. void voyager_smp_dump()
  1432. {
  1433. int old_cpu = smp_processor_id(), cpu;
  1434. /* dump the interrupt masks of each processor */
  1435. for_each_online_cpu(cpu) {
  1436. __u16 imr, isr, irr;
  1437. unsigned long flags;
  1438. local_irq_save(flags);
  1439. outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID);
  1440. imr = (inb(0xa1) << 8) | inb(0x21);
  1441. outb(0x0a, 0xa0);
  1442. irr = inb(0xa0) << 8;
  1443. outb(0x0a, 0x20);
  1444. irr |= inb(0x20);
  1445. outb(0x0b, 0xa0);
  1446. isr = inb(0xa0) << 8;
  1447. outb(0x0b, 0x20);
  1448. isr |= inb(0x20);
  1449. outb(old_cpu, VIC_PROCESSOR_ID);
  1450. local_irq_restore(flags);
  1451. printk("\tCPU%d: mask=0x%x, IMR=0x%x, IRR=0x%x, ISR=0x%x\n",
  1452. cpu, vic_irq_mask[cpu], imr, irr, isr);
  1453. #if 0
  1454. /* These lines are put in to try to unstick an un ack'd irq */
  1455. if (isr != 0) {
  1456. int irq;
  1457. for (irq = 0; irq < 16; irq++) {
  1458. if (isr & (1 << irq)) {
  1459. printk("\tCPU%d: ack irq %d\n",
  1460. cpu, irq);
  1461. local_irq_save(flags);
  1462. outb(VIC_CPU_MASQUERADE_ENABLE | cpu,
  1463. VIC_PROCESSOR_ID);
  1464. ack_vic_irq(irq);
  1465. outb(old_cpu, VIC_PROCESSOR_ID);
  1466. local_irq_restore(flags);
  1467. }
  1468. }
  1469. }
  1470. #endif
  1471. }
  1472. }
  1473. void smp_voyager_power_off(void *dummy)
  1474. {
  1475. if (smp_processor_id() == boot_cpu_id)
  1476. voyager_power_off();
  1477. else
  1478. smp_stop_cpu_function(NULL);
  1479. }
  1480. static void __init voyager_smp_prepare_cpus(unsigned int max_cpus)
  1481. {
  1482. /* FIXME: ignore max_cpus for now */
  1483. smp_boot_cpus();
  1484. }
  1485. static void __cpuinit voyager_smp_prepare_boot_cpu(void)
  1486. {
  1487. switch_to_new_gdt();
  1488. cpu_online_map = cpumask_of_cpu(smp_processor_id());
  1489. cpu_callout_map = cpumask_of_cpu(smp_processor_id());
  1490. cpu_callin_map = CPU_MASK_NONE;
  1491. cpu_present_map = cpumask_of_cpu(smp_processor_id());
  1492. }
  1493. static int __cpuinit voyager_cpu_up(unsigned int cpu)
  1494. {
  1495. /* This only works at boot for x86. See "rewrite" above. */
  1496. if (cpu_isset(cpu, smp_commenced_mask))
  1497. return -ENOSYS;
  1498. /* In case one didn't come up */
  1499. if (!cpu_isset(cpu, cpu_callin_map))
  1500. return -EIO;
  1501. /* Unleash the CPU! */
  1502. cpu_set(cpu, smp_commenced_mask);
  1503. while (!cpu_online(cpu))
  1504. mb();
  1505. return 0;
  1506. }
  1507. static void __init voyager_smp_cpus_done(unsigned int max_cpus)
  1508. {
  1509. zap_low_mappings();
  1510. }
  1511. void __init smp_setup_processor_id(void)
  1512. {
  1513. current_thread_info()->cpu = hard_smp_processor_id();
  1514. }
  1515. static void voyager_send_call_func(const struct cpumask *callmask)
  1516. {
  1517. __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id());
  1518. send_CPI(mask, VIC_CALL_FUNCTION_CPI);
  1519. }
  1520. static void voyager_send_call_func_single(int cpu)
  1521. {
  1522. send_CPI(1 << cpu, VIC_CALL_FUNCTION_SINGLE_CPI);
  1523. }
  1524. struct smp_ops smp_ops = {
  1525. .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
  1526. .smp_prepare_cpus = voyager_smp_prepare_cpus,
  1527. .cpu_up = voyager_cpu_up,
  1528. .smp_cpus_done = voyager_smp_cpus_done,
  1529. .smp_send_stop = voyager_smp_send_stop,
  1530. .smp_send_reschedule = voyager_smp_send_reschedule,
  1531. .send_call_func_ipi = voyager_send_call_func,
  1532. .send_call_func_single_ipi = voyager_send_call_func_single,
  1533. };