apic_64.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844
  1. /*
  2. * Local APIC handling, local APIC timers
  3. *
  4. * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
  5. *
  6. * Fixes
  7. * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
  8. * thanks to Eric Gilmore
  9. * and Rolf G. Tews
  10. * for testing these extensively.
  11. * Maciej W. Rozycki : Various updates and fixes.
  12. * Mikael Pettersson : Power Management for UP-APIC.
  13. * Pavel Machek and
  14. * Mikael Pettersson : PM converted to driver model.
  15. */
  16. #include <linux/init.h>
  17. #include <linux/mm.h>
  18. #include <linux/delay.h>
  19. #include <linux/bootmem.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/mc146818rtc.h>
  22. #include <linux/kernel_stat.h>
  23. #include <linux/sysdev.h>
  24. #include <linux/ioport.h>
  25. #include <linux/clockchips.h>
  26. #include <linux/acpi_pmtmr.h>
  27. #include <linux/module.h>
  28. #include <linux/dmar.h>
  29. #include <asm/atomic.h>
  30. #include <asm/smp.h>
  31. #include <asm/mtrr.h>
  32. #include <asm/mpspec.h>
  33. #include <asm/hpet.h>
  34. #include <asm/pgalloc.h>
  35. #include <asm/nmi.h>
  36. #include <asm/idle.h>
  37. #include <asm/proto.h>
  38. #include <asm/timex.h>
  39. #include <asm/apic.h>
  40. #include <asm/i8259.h>
  41. #include <mach_ipi.h>
  42. #include <mach_apic.h>
  43. /* Disable local APIC timer from the kernel commandline or via dmi quirk */
  44. static int disable_apic_timer __cpuinitdata;
  45. static int apic_calibrate_pmtmr __initdata;
  46. int disable_apic;
  47. int disable_x2apic;
  48. int x2apic;
  49. /* x2apic enabled before OS handover */
  50. int x2apic_preenabled;
  51. /* Local APIC timer works in C2 */
  52. int local_apic_timer_c2_ok;
  53. EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
  54. /*
  55. * Debug level, exported for io_apic.c
  56. */
  57. unsigned int apic_verbosity;
  58. /* Have we found an MP table */
  59. int smp_found_config;
  60. static struct resource lapic_resource = {
  61. .name = "Local APIC",
  62. .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
  63. };
  64. static unsigned int calibration_result;
  65. static int lapic_next_event(unsigned long delta,
  66. struct clock_event_device *evt);
  67. static void lapic_timer_setup(enum clock_event_mode mode,
  68. struct clock_event_device *evt);
  69. static void lapic_timer_broadcast(cpumask_t mask);
  70. static void apic_pm_activate(void);
  71. /*
  72. * The local apic timer can be used for any function which is CPU local.
  73. */
  74. static struct clock_event_device lapic_clockevent = {
  75. .name = "lapic",
  76. .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
  77. | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
  78. .shift = 32,
  79. .set_mode = lapic_timer_setup,
  80. .set_next_event = lapic_next_event,
  81. .broadcast = lapic_timer_broadcast,
  82. .rating = 100,
  83. .irq = -1,
  84. };
  85. static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
  86. static unsigned long apic_phys;
  87. unsigned long mp_lapic_addr;
  88. /*
  89. * Get the LAPIC version
  90. */
  91. static inline int lapic_get_version(void)
  92. {
  93. return GET_APIC_VERSION(apic_read(APIC_LVR));
  94. }
  95. /*
  96. * Check, if the APIC is integrated or a separate chip
  97. */
  98. static inline int lapic_is_integrated(void)
  99. {
  100. #ifdef CONFIG_X86_64
  101. return 1;
  102. #else
  103. return APIC_INTEGRATED(lapic_get_version());
  104. #endif
  105. }
  106. /*
  107. * Check, whether this is a modern or a first generation APIC
  108. */
  109. static int modern_apic(void)
  110. {
  111. /* AMD systems use old APIC versions, so check the CPU */
  112. if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
  113. boot_cpu_data.x86 >= 0xf)
  114. return 1;
  115. return lapic_get_version() >= 0x14;
  116. }
  117. /*
  118. * Paravirt kernels also might be using these below ops. So we still
  119. * use generic apic_read()/apic_write(), which might be pointing to different
  120. * ops in PARAVIRT case.
  121. */
  122. void xapic_wait_icr_idle(void)
  123. {
  124. while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
  125. cpu_relax();
  126. }
  127. u32 safe_xapic_wait_icr_idle(void)
  128. {
  129. u32 send_status;
  130. int timeout;
  131. timeout = 0;
  132. do {
  133. send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
  134. if (!send_status)
  135. break;
  136. udelay(100);
  137. } while (timeout++ < 1000);
  138. return send_status;
  139. }
  140. void xapic_icr_write(u32 low, u32 id)
  141. {
  142. apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
  143. apic_write(APIC_ICR, low);
  144. }
  145. u64 xapic_icr_read(void)
  146. {
  147. u32 icr1, icr2;
  148. icr2 = apic_read(APIC_ICR2);
  149. icr1 = apic_read(APIC_ICR);
  150. return icr1 | ((u64)icr2 << 32);
  151. }
  152. static struct apic_ops xapic_ops = {
  153. .read = native_apic_mem_read,
  154. .write = native_apic_mem_write,
  155. .icr_read = xapic_icr_read,
  156. .icr_write = xapic_icr_write,
  157. .wait_icr_idle = xapic_wait_icr_idle,
  158. .safe_wait_icr_idle = safe_xapic_wait_icr_idle,
  159. };
  160. struct apic_ops __read_mostly *apic_ops = &xapic_ops;
  161. EXPORT_SYMBOL_GPL(apic_ops);
  162. static void x2apic_wait_icr_idle(void)
  163. {
  164. /* no need to wait for icr idle in x2apic */
  165. return;
  166. }
  167. static u32 safe_x2apic_wait_icr_idle(void)
  168. {
  169. /* no need to wait for icr idle in x2apic */
  170. return 0;
  171. }
  172. void x2apic_icr_write(u32 low, u32 id)
  173. {
  174. wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
  175. }
  176. u64 x2apic_icr_read(void)
  177. {
  178. unsigned long val;
  179. rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
  180. return val;
  181. }
  182. static struct apic_ops x2apic_ops = {
  183. .read = native_apic_msr_read,
  184. .write = native_apic_msr_write,
  185. .icr_read = x2apic_icr_read,
  186. .icr_write = x2apic_icr_write,
  187. .wait_icr_idle = x2apic_wait_icr_idle,
  188. .safe_wait_icr_idle = safe_x2apic_wait_icr_idle,
  189. };
  190. /**
  191. * enable_NMI_through_LVT0 - enable NMI through local vector table 0
  192. */
  193. void __cpuinit enable_NMI_through_LVT0(void)
  194. {
  195. unsigned int v;
  196. /* unmask and set to NMI */
  197. v = APIC_DM_NMI;
  198. /* Level triggered for 82489DX (32bit mode) */
  199. if (!lapic_is_integrated())
  200. v |= APIC_LVT_LEVEL_TRIGGER;
  201. apic_write(APIC_LVT0, v);
  202. }
  203. /**
  204. * lapic_get_maxlvt - get the maximum number of local vector table entries
  205. */
  206. int lapic_get_maxlvt(void)
  207. {
  208. unsigned int v;
  209. v = apic_read(APIC_LVR);
  210. /*
  211. * - we always have APIC integrated on 64bit mode
  212. * - 82489DXs do not report # of LVT entries
  213. */
  214. return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2;
  215. }
  216. /*
  217. * Local APIC timer
  218. */
  219. /* Clock divisor */
  220. #ifdef CONFG_X86_64
  221. #define APIC_DIVISOR 1
  222. #else
  223. #define APIC_DIVISOR 16
  224. #endif
  225. /*
  226. * This function sets up the local APIC timer, with a timeout of
  227. * 'clocks' APIC bus clock. During calibration we actually call
  228. * this function twice on the boot CPU, once with a bogus timeout
  229. * value, second time for real. The other (noncalibrating) CPUs
  230. * call this function only once, with the real, calibrated value.
  231. *
  232. * We do reads before writes even if unnecessary, to get around the
  233. * P5 APIC double write bug.
  234. */
  235. static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
  236. {
  237. unsigned int lvtt_value, tmp_value;
  238. lvtt_value = LOCAL_TIMER_VECTOR;
  239. if (!oneshot)
  240. lvtt_value |= APIC_LVT_TIMER_PERIODIC;
  241. if (!lapic_is_integrated())
  242. lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
  243. if (!irqen)
  244. lvtt_value |= APIC_LVT_MASKED;
  245. apic_write(APIC_LVTT, lvtt_value);
  246. /*
  247. * Divide PICLK by 16
  248. */
  249. tmp_value = apic_read(APIC_TDCR);
  250. apic_write(APIC_TDCR,
  251. (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
  252. APIC_TDR_DIV_16);
  253. if (!oneshot)
  254. apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
  255. }
  256. /*
  257. * Setup extended LVT, AMD specific (K8, family 10h)
  258. *
  259. * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
  260. * MCE interrupts are supported. Thus MCE offset must be set to 0.
  261. */
  262. #define APIC_EILVT_LVTOFF_MCE 0
  263. #define APIC_EILVT_LVTOFF_IBS 1
  264. static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
  265. {
  266. unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
  267. unsigned int v = (mask << 16) | (msg_type << 8) | vector;
  268. apic_write(reg, v);
  269. }
  270. u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
  271. {
  272. setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
  273. return APIC_EILVT_LVTOFF_MCE;
  274. }
  275. u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
  276. {
  277. setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
  278. return APIC_EILVT_LVTOFF_IBS;
  279. }
  280. /*
  281. * Program the next event, relative to now
  282. */
  283. static int lapic_next_event(unsigned long delta,
  284. struct clock_event_device *evt)
  285. {
  286. apic_write(APIC_TMICT, delta);
  287. return 0;
  288. }
  289. /*
  290. * Setup the lapic timer in periodic or oneshot mode
  291. */
  292. static void lapic_timer_setup(enum clock_event_mode mode,
  293. struct clock_event_device *evt)
  294. {
  295. unsigned long flags;
  296. unsigned int v;
  297. /* Lapic used as dummy for broadcast ? */
  298. if (evt->features & CLOCK_EVT_FEAT_DUMMY)
  299. return;
  300. local_irq_save(flags);
  301. switch (mode) {
  302. case CLOCK_EVT_MODE_PERIODIC:
  303. case CLOCK_EVT_MODE_ONESHOT:
  304. __setup_APIC_LVTT(calibration_result,
  305. mode != CLOCK_EVT_MODE_PERIODIC, 1);
  306. break;
  307. case CLOCK_EVT_MODE_UNUSED:
  308. case CLOCK_EVT_MODE_SHUTDOWN:
  309. v = apic_read(APIC_LVTT);
  310. v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
  311. apic_write(APIC_LVTT, v);
  312. break;
  313. case CLOCK_EVT_MODE_RESUME:
  314. /* Nothing to do here */
  315. break;
  316. }
  317. local_irq_restore(flags);
  318. }
  319. /*
  320. * Local APIC timer broadcast function
  321. */
  322. static void lapic_timer_broadcast(cpumask_t mask)
  323. {
  324. #ifdef CONFIG_SMP
  325. send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
  326. #endif
  327. }
  328. /*
  329. * Setup the local APIC timer for this CPU. Copy the initilized values
  330. * of the boot CPU and register the clock event in the framework.
  331. */
  332. static void setup_APIC_timer(void)
  333. {
  334. struct clock_event_device *levt = &__get_cpu_var(lapic_events);
  335. memcpy(levt, &lapic_clockevent, sizeof(*levt));
  336. levt->cpumask = cpumask_of_cpu(smp_processor_id());
  337. clockevents_register_device(levt);
  338. }
  339. /*
  340. * In this function we calibrate APIC bus clocks to the external
  341. * timer. Unfortunately we cannot use jiffies and the timer irq
  342. * to calibrate, since some later bootup code depends on getting
  343. * the first irq? Ugh.
  344. *
  345. * We want to do the calibration only once since we
  346. * want to have local timer irqs syncron. CPUs connected
  347. * by the same APIC bus have the very same bus frequency.
  348. * And we want to have irqs off anyways, no accidental
  349. * APIC irq that way.
  350. */
  351. #define TICK_COUNT 100000000
  352. static int __init calibrate_APIC_clock(void)
  353. {
  354. unsigned apic, apic_start;
  355. unsigned long tsc, tsc_start;
  356. int result;
  357. local_irq_disable();
  358. /*
  359. * Put whatever arbitrary (but long enough) timeout
  360. * value into the APIC clock, we just want to get the
  361. * counter running for calibration.
  362. *
  363. * No interrupt enable !
  364. */
  365. __setup_APIC_LVTT(250000000, 0, 0);
  366. apic_start = apic_read(APIC_TMCCT);
  367. #ifdef CONFIG_X86_PM_TIMER
  368. if (apic_calibrate_pmtmr && pmtmr_ioport) {
  369. pmtimer_wait(5000); /* 5ms wait */
  370. apic = apic_read(APIC_TMCCT);
  371. result = (apic_start - apic) * 1000L / 5;
  372. } else
  373. #endif
  374. {
  375. rdtscll(tsc_start);
  376. do {
  377. apic = apic_read(APIC_TMCCT);
  378. rdtscll(tsc);
  379. } while ((tsc - tsc_start) < TICK_COUNT &&
  380. (apic_start - apic) < TICK_COUNT);
  381. result = (apic_start - apic) * 1000L * tsc_khz /
  382. (tsc - tsc_start);
  383. }
  384. local_irq_enable();
  385. printk(KERN_DEBUG "APIC timer calibration result %d\n", result);
  386. printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
  387. result / 1000 / 1000, result / 1000 % 1000);
  388. /* Calculate the scaled math multiplication factor */
  389. lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC,
  390. lapic_clockevent.shift);
  391. lapic_clockevent.max_delta_ns =
  392. clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
  393. lapic_clockevent.min_delta_ns =
  394. clockevent_delta2ns(0xF, &lapic_clockevent);
  395. calibration_result = (result * APIC_DIVISOR) / HZ;
  396. /*
  397. * Do a sanity check on the APIC calibration result
  398. */
  399. if (calibration_result < (1000000 / HZ)) {
  400. printk(KERN_WARNING
  401. "APIC frequency too slow, disabling apic timer\n");
  402. return -1;
  403. }
  404. return 0;
  405. }
  406. /*
  407. * Setup the boot APIC
  408. *
  409. * Calibrate and verify the result.
  410. */
  411. void __init setup_boot_APIC_clock(void)
  412. {
  413. /*
  414. * The local apic timer can be disabled via the kernel
  415. * commandline or from the CPU detection code. Register the lapic
  416. * timer as a dummy clock event source on SMP systems, so the
  417. * broadcast mechanism is used. On UP systems simply ignore it.
  418. */
  419. if (disable_apic_timer) {
  420. printk(KERN_INFO "Disabling APIC timer\n");
  421. /* No broadcast on UP ! */
  422. if (num_possible_cpus() > 1) {
  423. lapic_clockevent.mult = 1;
  424. setup_APIC_timer();
  425. }
  426. return;
  427. }
  428. apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
  429. "calibrating APIC timer ...\n");
  430. if (calibrate_APIC_clock()) {
  431. /* No broadcast on UP ! */
  432. if (num_possible_cpus() > 1)
  433. setup_APIC_timer();
  434. return;
  435. }
  436. /*
  437. * If nmi_watchdog is set to IO_APIC, we need the
  438. * PIT/HPET going. Otherwise register lapic as a dummy
  439. * device.
  440. */
  441. if (nmi_watchdog != NMI_IO_APIC)
  442. lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
  443. else
  444. printk(KERN_WARNING "APIC timer registered as dummy,"
  445. " due to nmi_watchdog=%d!\n", nmi_watchdog);
  446. /* Setup the lapic or request the broadcast */
  447. setup_APIC_timer();
  448. }
  449. void __cpuinit setup_secondary_APIC_clock(void)
  450. {
  451. setup_APIC_timer();
  452. }
  453. /*
  454. * The guts of the apic timer interrupt
  455. */
  456. static void local_apic_timer_interrupt(void)
  457. {
  458. int cpu = smp_processor_id();
  459. struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
  460. /*
  461. * Normally we should not be here till LAPIC has been initialized but
  462. * in some cases like kdump, its possible that there is a pending LAPIC
  463. * timer interrupt from previous kernel's context and is delivered in
  464. * new kernel the moment interrupts are enabled.
  465. *
  466. * Interrupts are enabled early and LAPIC is setup much later, hence
  467. * its possible that when we get here evt->event_handler is NULL.
  468. * Check for event_handler being NULL and discard the interrupt as
  469. * spurious.
  470. */
  471. if (!evt->event_handler) {
  472. printk(KERN_WARNING
  473. "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
  474. /* Switch it off */
  475. lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
  476. return;
  477. }
  478. /*
  479. * the NMI deadlock-detector uses this.
  480. */
  481. #ifdef CONFIG_X86_64
  482. add_pda(apic_timer_irqs, 1);
  483. #else
  484. per_cpu(irq_stat, cpu).apic_timer_irqs++;
  485. #endif
  486. evt->event_handler(evt);
  487. }
  488. /*
  489. * Local APIC timer interrupt. This is the most natural way for doing
  490. * local interrupts, but local timer interrupts can be emulated by
  491. * broadcast interrupts too. [in case the hw doesn't support APIC timers]
  492. *
  493. * [ if a single-CPU system runs an SMP kernel then we call the local
  494. * interrupt as well. Thus we cannot inline the local irq ... ]
  495. */
  496. void smp_apic_timer_interrupt(struct pt_regs *regs)
  497. {
  498. struct pt_regs *old_regs = set_irq_regs(regs);
  499. /*
  500. * NOTE! We'd better ACK the irq immediately,
  501. * because timer handling can be slow.
  502. */
  503. ack_APIC_irq();
  504. /*
  505. * update_process_times() expects us to have done irq_enter().
  506. * Besides, if we don't timer interrupts ignore the global
  507. * interrupt lock, which is the WrongThing (tm) to do.
  508. */
  509. exit_idle();
  510. irq_enter();
  511. local_apic_timer_interrupt();
  512. irq_exit();
  513. set_irq_regs(old_regs);
  514. }
  515. int setup_profiling_timer(unsigned int multiplier)
  516. {
  517. return -EINVAL;
  518. }
  519. /*
  520. * Local APIC start and shutdown
  521. */
  522. /**
  523. * clear_local_APIC - shutdown the local APIC
  524. *
  525. * This is called, when a CPU is disabled and before rebooting, so the state of
  526. * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
  527. * leftovers during boot.
  528. */
  529. void clear_local_APIC(void)
  530. {
  531. int maxlvt;
  532. u32 v;
  533. /* APIC hasn't been mapped yet */
  534. if (!apic_phys)
  535. return;
  536. maxlvt = lapic_get_maxlvt();
  537. /*
  538. * Masking an LVT entry can trigger a local APIC error
  539. * if the vector is zero. Mask LVTERR first to prevent this.
  540. */
  541. if (maxlvt >= 3) {
  542. v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
  543. apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
  544. }
  545. /*
  546. * Careful: we have to set masks only first to deassert
  547. * any level-triggered sources.
  548. */
  549. v = apic_read(APIC_LVTT);
  550. apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
  551. v = apic_read(APIC_LVT0);
  552. apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
  553. v = apic_read(APIC_LVT1);
  554. apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
  555. if (maxlvt >= 4) {
  556. v = apic_read(APIC_LVTPC);
  557. apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
  558. }
  559. /* lets not touch this if we didn't frob it */
  560. #if defined(CONFIG_X86_MCE_P4THERMAL) || defined(X86_MCE_INTEL)
  561. if (maxlvt >= 5) {
  562. v = apic_read(APIC_LVTTHMR);
  563. apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
  564. }
  565. #endif
  566. /*
  567. * Clean APIC state for other OSs:
  568. */
  569. apic_write(APIC_LVTT, APIC_LVT_MASKED);
  570. apic_write(APIC_LVT0, APIC_LVT_MASKED);
  571. apic_write(APIC_LVT1, APIC_LVT_MASKED);
  572. if (maxlvt >= 3)
  573. apic_write(APIC_LVTERR, APIC_LVT_MASKED);
  574. if (maxlvt >= 4)
  575. apic_write(APIC_LVTPC, APIC_LVT_MASKED);
  576. /* Integrated APIC (!82489DX) ? */
  577. if (lapic_is_integrated()) {
  578. if (maxlvt > 3)
  579. /* Clear ESR due to Pentium errata 3AP and 11AP */
  580. apic_write(APIC_ESR, 0);
  581. apic_read(APIC_ESR);
  582. }
  583. }
  584. /**
  585. * disable_local_APIC - clear and disable the local APIC
  586. */
  587. void disable_local_APIC(void)
  588. {
  589. unsigned int value;
  590. clear_local_APIC();
  591. /*
  592. * Disable APIC (implies clearing of registers
  593. * for 82489DX!).
  594. */
  595. value = apic_read(APIC_SPIV);
  596. value &= ~APIC_SPIV_APIC_ENABLED;
  597. apic_write(APIC_SPIV, value);
  598. #ifdef CONFIG_X86_32
  599. /*
  600. * When LAPIC was disabled by the BIOS and enabled by the kernel,
  601. * restore the disabled state.
  602. */
  603. if (enabled_via_apicbase) {
  604. unsigned int l, h;
  605. rdmsr(MSR_IA32_APICBASE, l, h);
  606. l &= ~MSR_IA32_APICBASE_ENABLE;
  607. wrmsr(MSR_IA32_APICBASE, l, h);
  608. }
  609. #endif
  610. }
  611. /*
  612. * If Linux enabled the LAPIC against the BIOS default disable it down before
  613. * re-entering the BIOS on shutdown. Otherwise the BIOS may get confused and
  614. * not power-off. Additionally clear all LVT entries before disable_local_APIC
  615. * for the case where Linux didn't enable the LAPIC.
  616. */
  617. void lapic_shutdown(void)
  618. {
  619. unsigned long flags;
  620. if (!cpu_has_apic)
  621. return;
  622. local_irq_save(flags);
  623. #ifdef CONFIG_X86_32
  624. if (!enabled_via_apicbase)
  625. clear_local_APIC();
  626. else
  627. #endif
  628. disable_local_APIC();
  629. local_irq_restore(flags);
  630. }
  631. /*
  632. * This is to verify that we're looking at a real local APIC.
  633. * Check these against your board if the CPUs aren't getting
  634. * started for no apparent reason.
  635. */
  636. int __init verify_local_APIC(void)
  637. {
  638. unsigned int reg0, reg1;
  639. /*
  640. * The version register is read-only in a real APIC.
  641. */
  642. reg0 = apic_read(APIC_LVR);
  643. apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
  644. apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
  645. reg1 = apic_read(APIC_LVR);
  646. apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
  647. /*
  648. * The two version reads above should print the same
  649. * numbers. If the second one is different, then we
  650. * poke at a non-APIC.
  651. */
  652. if (reg1 != reg0)
  653. return 0;
  654. /*
  655. * Check if the version looks reasonably.
  656. */
  657. reg1 = GET_APIC_VERSION(reg0);
  658. if (reg1 == 0x00 || reg1 == 0xff)
  659. return 0;
  660. reg1 = lapic_get_maxlvt();
  661. if (reg1 < 0x02 || reg1 == 0xff)
  662. return 0;
  663. /*
  664. * The ID register is read/write in a real APIC.
  665. */
  666. reg0 = apic_read(APIC_ID);
  667. apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
  668. apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
  669. reg1 = apic_read(APIC_ID);
  670. apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
  671. apic_write(APIC_ID, reg0);
  672. if (reg1 != (reg0 ^ APIC_ID_MASK))
  673. return 0;
  674. /*
  675. * The next two are just to see if we have sane values.
  676. * They're only really relevant if we're in Virtual Wire
  677. * compatibility mode, but most boxes are anymore.
  678. */
  679. reg0 = apic_read(APIC_LVT0);
  680. apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
  681. reg1 = apic_read(APIC_LVT1);
  682. apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
  683. return 1;
  684. }
  685. /**
  686. * sync_Arb_IDs - synchronize APIC bus arbitration IDs
  687. */
  688. void __init sync_Arb_IDs(void)
  689. {
  690. /*
  691. * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
  692. * needed on AMD.
  693. */
  694. if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
  695. return;
  696. /*
  697. * Wait for idle.
  698. */
  699. apic_wait_icr_idle();
  700. apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
  701. apic_write(APIC_ICR, APIC_DEST_ALLINC |
  702. APIC_INT_LEVELTRIG | APIC_DM_INIT);
  703. }
  704. /*
  705. * An initial setup of the virtual wire mode.
  706. */
  707. void __init init_bsp_APIC(void)
  708. {
  709. unsigned int value;
  710. /*
  711. * Don't do the setup now if we have a SMP BIOS as the
  712. * through-I/O-APIC virtual wire mode might be active.
  713. */
  714. if (smp_found_config || !cpu_has_apic)
  715. return;
  716. /*
  717. * Do not trust the local APIC being empty at bootup.
  718. */
  719. clear_local_APIC();
  720. /*
  721. * Enable APIC.
  722. */
  723. value = apic_read(APIC_SPIV);
  724. value &= ~APIC_VECTOR_MASK;
  725. value |= APIC_SPIV_APIC_ENABLED;
  726. #ifdef CONFIG_X86_32
  727. /* This bit is reserved on P4/Xeon and should be cleared */
  728. if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
  729. (boot_cpu_data.x86 == 15))
  730. value &= ~APIC_SPIV_FOCUS_DISABLED;
  731. else
  732. #endif
  733. value |= APIC_SPIV_FOCUS_DISABLED;
  734. value |= SPURIOUS_APIC_VECTOR;
  735. apic_write(APIC_SPIV, value);
  736. /*
  737. * Set up the virtual wire mode.
  738. */
  739. apic_write(APIC_LVT0, APIC_DM_EXTINT);
  740. value = APIC_DM_NMI;
  741. if (!lapic_is_integrated()) /* 82489DX */
  742. value |= APIC_LVT_LEVEL_TRIGGER;
  743. apic_write(APIC_LVT1, value);
  744. }
  745. static void __cpuinit lapic_setup_esr(void)
  746. {
  747. unsigned long oldvalue, value, maxlvt;
  748. if (lapic_is_integrated() && !esr_disable) {
  749. if (esr_disable) {
  750. /*
  751. * Something untraceable is creating bad interrupts on
  752. * secondary quads ... for the moment, just leave the
  753. * ESR disabled - we can't do anything useful with the
  754. * errors anyway - mbligh
  755. */
  756. printk(KERN_INFO "Leaving ESR disabled.\n");
  757. return;
  758. }
  759. /* !82489DX */
  760. maxlvt = lapic_get_maxlvt();
  761. if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
  762. apic_write(APIC_ESR, 0);
  763. oldvalue = apic_read(APIC_ESR);
  764. /* enables sending errors */
  765. value = ERROR_APIC_VECTOR;
  766. apic_write(APIC_LVTERR, value);
  767. /*
  768. * spec says clear errors after enabling vector.
  769. */
  770. if (maxlvt > 3)
  771. apic_write(APIC_ESR, 0);
  772. value = apic_read(APIC_ESR);
  773. if (value != oldvalue)
  774. apic_printk(APIC_VERBOSE, "ESR value before enabling "
  775. "vector: 0x%08lx after: 0x%08lx\n",
  776. oldvalue, value);
  777. } else {
  778. printk(KERN_INFO "No ESR for 82489DX.\n");
  779. }
  780. }
  781. /**
  782. * setup_local_APIC - setup the local APIC
  783. */
  784. void __cpuinit setup_local_APIC(void)
  785. {
  786. unsigned int value;
  787. int i, j;
  788. preempt_disable();
  789. value = apic_read(APIC_LVR);
  790. BUILD_BUG_ON((SPURIOUS_APIC_VECTOR & 0x0f) != 0x0f);
  791. /*
  792. * Double-check whether this APIC is really registered.
  793. * This is meaningless in clustered apic mode, so we skip it.
  794. */
  795. if (!apic_id_registered())
  796. BUG();
  797. /*
  798. * Intel recommends to set DFR, LDR and TPR before enabling
  799. * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
  800. * document number 292116). So here it goes...
  801. */
  802. init_apic_ldr();
  803. /*
  804. * Set Task Priority to 'accept all'. We never change this
  805. * later on.
  806. */
  807. value = apic_read(APIC_TASKPRI);
  808. value &= ~APIC_TPRI_MASK;
  809. apic_write(APIC_TASKPRI, value);
  810. /*
  811. * After a crash, we no longer service the interrupts and a pending
  812. * interrupt from previous kernel might still have ISR bit set.
  813. *
  814. * Most probably by now CPU has serviced that pending interrupt and
  815. * it might not have done the ack_APIC_irq() because it thought,
  816. * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
  817. * does not clear the ISR bit and cpu thinks it has already serivced
  818. * the interrupt. Hence a vector might get locked. It was noticed
  819. * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
  820. */
  821. for (i = APIC_ISR_NR - 1; i >= 0; i--) {
  822. value = apic_read(APIC_ISR + i*0x10);
  823. for (j = 31; j >= 0; j--) {
  824. if (value & (1<<j))
  825. ack_APIC_irq();
  826. }
  827. }
  828. /*
  829. * Now that we are all set up, enable the APIC
  830. */
  831. value = apic_read(APIC_SPIV);
  832. value &= ~APIC_VECTOR_MASK;
  833. /*
  834. * Enable APIC
  835. */
  836. value |= APIC_SPIV_APIC_ENABLED;
  837. /* We always use processor focus */
  838. /*
  839. * Set spurious IRQ vector
  840. */
  841. value |= SPURIOUS_APIC_VECTOR;
  842. apic_write(APIC_SPIV, value);
  843. /*
  844. * Set up LVT0, LVT1:
  845. *
  846. * set up through-local-APIC on the BP's LINT0. This is not
  847. * strictly necessary in pure symmetric-IO mode, but sometimes
  848. * we delegate interrupts to the 8259A.
  849. */
  850. /*
  851. * TODO: set up through-local-APIC from through-I/O-APIC? --macro
  852. */
  853. value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
  854. if (!smp_processor_id() && !value) {
  855. value = APIC_DM_EXTINT;
  856. apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
  857. smp_processor_id());
  858. } else {
  859. value = APIC_DM_EXTINT | APIC_LVT_MASKED;
  860. apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
  861. smp_processor_id());
  862. }
  863. apic_write(APIC_LVT0, value);
  864. /*
  865. * only the BP should see the LINT1 NMI signal, obviously.
  866. */
  867. if (!smp_processor_id())
  868. value = APIC_DM_NMI;
  869. else
  870. value = APIC_DM_NMI | APIC_LVT_MASKED;
  871. apic_write(APIC_LVT1, value);
  872. preempt_enable();
  873. }
  874. void __cpuinit end_local_APIC_setup(void)
  875. {
  876. lapic_setup_esr();
  877. #ifdef CONFIG_X86_32
  878. {
  879. unsigned int value;
  880. /* Disable the local apic timer */
  881. value = apic_read(APIC_LVTT);
  882. value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
  883. apic_write(APIC_LVTT, value);
  884. }
  885. #endif
  886. setup_apic_nmi_watchdog(NULL);
  887. apic_pm_activate();
  888. }
  889. void check_x2apic(void)
  890. {
  891. int msr, msr2;
  892. rdmsr(MSR_IA32_APICBASE, msr, msr2);
  893. if (msr & X2APIC_ENABLE) {
  894. printk("x2apic enabled by BIOS, switching to x2apic ops\n");
  895. x2apic_preenabled = x2apic = 1;
  896. apic_ops = &x2apic_ops;
  897. }
  898. }
  899. void enable_x2apic(void)
  900. {
  901. int msr, msr2;
  902. rdmsr(MSR_IA32_APICBASE, msr, msr2);
  903. if (!(msr & X2APIC_ENABLE)) {
  904. printk("Enabling x2apic\n");
  905. wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
  906. }
  907. }
  908. void enable_IR_x2apic(void)
  909. {
  910. #ifdef CONFIG_INTR_REMAP
  911. int ret;
  912. unsigned long flags;
  913. if (!cpu_has_x2apic)
  914. return;
  915. if (!x2apic_preenabled && disable_x2apic) {
  916. printk(KERN_INFO
  917. "Skipped enabling x2apic and Interrupt-remapping "
  918. "because of nox2apic\n");
  919. return;
  920. }
  921. if (x2apic_preenabled && disable_x2apic)
  922. panic("Bios already enabled x2apic, can't enforce nox2apic");
  923. if (!x2apic_preenabled && skip_ioapic_setup) {
  924. printk(KERN_INFO
  925. "Skipped enabling x2apic and Interrupt-remapping "
  926. "because of skipping io-apic setup\n");
  927. return;
  928. }
  929. ret = dmar_table_init();
  930. if (ret) {
  931. printk(KERN_INFO
  932. "dmar_table_init() failed with %d:\n", ret);
  933. if (x2apic_preenabled)
  934. panic("x2apic enabled by bios. But IR enabling failed");
  935. else
  936. printk(KERN_INFO
  937. "Not enabling x2apic,Intr-remapping\n");
  938. return;
  939. }
  940. local_irq_save(flags);
  941. mask_8259A();
  942. save_mask_IO_APIC_setup();
  943. ret = enable_intr_remapping(1);
  944. if (ret && x2apic_preenabled) {
  945. local_irq_restore(flags);
  946. panic("x2apic enabled by bios. But IR enabling failed");
  947. }
  948. if (ret)
  949. goto end;
  950. if (!x2apic) {
  951. x2apic = 1;
  952. apic_ops = &x2apic_ops;
  953. enable_x2apic();
  954. }
  955. end:
  956. if (ret)
  957. /*
  958. * IR enabling failed
  959. */
  960. restore_IO_APIC_setup();
  961. else
  962. reinit_intr_remapped_IO_APIC(x2apic_preenabled);
  963. unmask_8259A();
  964. local_irq_restore(flags);
  965. if (!ret) {
  966. if (!x2apic_preenabled)
  967. printk(KERN_INFO
  968. "Enabled x2apic and interrupt-remapping\n");
  969. else
  970. printk(KERN_INFO
  971. "Enabled Interrupt-remapping\n");
  972. } else
  973. printk(KERN_ERR
  974. "Failed to enable Interrupt-remapping and x2apic\n");
  975. #else
  976. if (!cpu_has_x2apic)
  977. return;
  978. if (x2apic_preenabled)
  979. panic("x2apic enabled prior OS handover,"
  980. " enable CONFIG_INTR_REMAP");
  981. printk(KERN_INFO "Enable CONFIG_INTR_REMAP for enabling intr-remapping "
  982. " and x2apic\n");
  983. #endif
  984. return;
  985. }
  986. /*
  987. * Detect and enable local APICs on non-SMP boards.
  988. * Original code written by Keir Fraser.
  989. * On AMD64 we trust the BIOS - if it says no APIC it is likely
  990. * not correctly set up (usually the APIC timer won't work etc.)
  991. */
  992. static int __init detect_init_APIC(void)
  993. {
  994. if (!cpu_has_apic) {
  995. printk(KERN_INFO "No local APIC present\n");
  996. return -1;
  997. }
  998. mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
  999. boot_cpu_physical_apicid = 0;
  1000. return 0;
  1001. }
  1002. void __init early_init_lapic_mapping(void)
  1003. {
  1004. unsigned long phys_addr;
  1005. /*
  1006. * If no local APIC can be found then go out
  1007. * : it means there is no mpatable and MADT
  1008. */
  1009. if (!smp_found_config)
  1010. return;
  1011. phys_addr = mp_lapic_addr;
  1012. set_fixmap_nocache(FIX_APIC_BASE, phys_addr);
  1013. apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
  1014. APIC_BASE, phys_addr);
  1015. /*
  1016. * Fetch the APIC ID of the BSP in case we have a
  1017. * default configuration (or the MP table is broken).
  1018. */
  1019. boot_cpu_physical_apicid = read_apic_id();
  1020. }
  1021. /**
  1022. * init_apic_mappings - initialize APIC mappings
  1023. */
  1024. void __init init_apic_mappings(void)
  1025. {
  1026. if (x2apic) {
  1027. boot_cpu_physical_apicid = read_apic_id();
  1028. return;
  1029. }
  1030. /*
  1031. * If no local APIC can be found then set up a fake all
  1032. * zeroes page to simulate the local APIC and another
  1033. * one for the IO-APIC.
  1034. */
  1035. if (!smp_found_config && detect_init_APIC()) {
  1036. apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
  1037. apic_phys = __pa(apic_phys);
  1038. } else
  1039. apic_phys = mp_lapic_addr;
  1040. set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
  1041. apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
  1042. APIC_BASE, apic_phys);
  1043. /*
  1044. * Fetch the APIC ID of the BSP in case we have a
  1045. * default configuration (or the MP table is broken).
  1046. */
  1047. boot_cpu_physical_apicid = read_apic_id();
  1048. }
  1049. /*
  1050. * This initializes the IO-APIC and APIC hardware if this is
  1051. * a UP kernel.
  1052. */
  1053. int apic_version[MAX_APICS];
  1054. int __init APIC_init_uniprocessor(void)
  1055. {
  1056. if (disable_apic) {
  1057. printk(KERN_INFO "Apic disabled\n");
  1058. return -1;
  1059. }
  1060. if (!cpu_has_apic) {
  1061. disable_apic = 1;
  1062. printk(KERN_INFO "Apic disabled by BIOS\n");
  1063. return -1;
  1064. }
  1065. enable_IR_x2apic();
  1066. setup_apic_routing();
  1067. verify_local_APIC();
  1068. connect_bsp_APIC();
  1069. physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
  1070. apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
  1071. setup_local_APIC();
  1072. /*
  1073. * Now enable IO-APICs, actually call clear_IO_APIC
  1074. * We need clear_IO_APIC before enabling vector on BP
  1075. */
  1076. if (!skip_ioapic_setup && nr_ioapics)
  1077. enable_IO_APIC();
  1078. if (!smp_found_config || skip_ioapic_setup || !nr_ioapics)
  1079. localise_nmi_watchdog();
  1080. end_local_APIC_setup();
  1081. if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
  1082. setup_IO_APIC();
  1083. else
  1084. nr_ioapics = 0;
  1085. setup_boot_APIC_clock();
  1086. check_nmi_watchdog();
  1087. return 0;
  1088. }
  1089. /*
  1090. * Local APIC interrupts
  1091. */
  1092. /*
  1093. * This interrupt should _never_ happen with our APIC/SMP architecture
  1094. */
  1095. asmlinkage void smp_spurious_interrupt(void)
  1096. {
  1097. unsigned int v;
  1098. exit_idle();
  1099. irq_enter();
  1100. /*
  1101. * Check if this really is a spurious interrupt and ACK it
  1102. * if it is a vectored one. Just in case...
  1103. * Spurious interrupts should not be ACKed.
  1104. */
  1105. v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
  1106. if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
  1107. ack_APIC_irq();
  1108. add_pda(irq_spurious_count, 1);
  1109. irq_exit();
  1110. }
  1111. /*
  1112. * This interrupt should never happen with our APIC/SMP architecture
  1113. */
  1114. asmlinkage void smp_error_interrupt(void)
  1115. {
  1116. unsigned int v, v1;
  1117. exit_idle();
  1118. irq_enter();
  1119. /* First tickle the hardware, only then report what went on. -- REW */
  1120. v = apic_read(APIC_ESR);
  1121. apic_write(APIC_ESR, 0);
  1122. v1 = apic_read(APIC_ESR);
  1123. ack_APIC_irq();
  1124. atomic_inc(&irq_err_count);
  1125. /* Here is what the APIC error bits mean:
  1126. 0: Send CS error
  1127. 1: Receive CS error
  1128. 2: Send accept error
  1129. 3: Receive accept error
  1130. 4: Reserved
  1131. 5: Send illegal vector
  1132. 6: Received illegal vector
  1133. 7: Illegal register address
  1134. */
  1135. printk(KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
  1136. smp_processor_id(), v , v1);
  1137. irq_exit();
  1138. }
  1139. /**
  1140. * connect_bsp_APIC - attach the APIC to the interrupt system
  1141. */
  1142. void __init connect_bsp_APIC(void)
  1143. {
  1144. #ifdef CONFIG_X86_32
  1145. if (pic_mode) {
  1146. /*
  1147. * Do not trust the local APIC being empty at bootup.
  1148. */
  1149. clear_local_APIC();
  1150. /*
  1151. * PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's
  1152. * local APIC to INT and NMI lines.
  1153. */
  1154. apic_printk(APIC_VERBOSE, "leaving PIC mode, "
  1155. "enabling APIC mode.\n");
  1156. outb(0x70, 0x22);
  1157. outb(0x01, 0x23);
  1158. }
  1159. #endif
  1160. enable_apic_mode();
  1161. }
  1162. /**
  1163. * disconnect_bsp_APIC - detach the APIC from the interrupt system
  1164. * @virt_wire_setup: indicates, whether virtual wire mode is selected
  1165. *
  1166. * Virtual wire mode is necessary to deliver legacy interrupts even when the
  1167. * APIC is disabled.
  1168. */
  1169. void disconnect_bsp_APIC(int virt_wire_setup)
  1170. {
  1171. unsigned int value;
  1172. #ifdef CONFIG_X86_32
  1173. if (pic_mode) {
  1174. /*
  1175. * Put the board back into PIC mode (has an effect only on
  1176. * certain older boards). Note that APIC interrupts, including
  1177. * IPIs, won't work beyond this point! The only exception are
  1178. * INIT IPIs.
  1179. */
  1180. apic_printk(APIC_VERBOSE, "disabling APIC mode, "
  1181. "entering PIC mode.\n");
  1182. outb(0x70, 0x22);
  1183. outb(0x00, 0x23);
  1184. return;
  1185. }
  1186. #endif
  1187. /* Go back to Virtual Wire compatibility mode */
  1188. /* For the spurious interrupt use vector F, and enable it */
  1189. value = apic_read(APIC_SPIV);
  1190. value &= ~APIC_VECTOR_MASK;
  1191. value |= APIC_SPIV_APIC_ENABLED;
  1192. value |= 0xf;
  1193. apic_write(APIC_SPIV, value);
  1194. if (!virt_wire_setup) {
  1195. /*
  1196. * For LVT0 make it edge triggered, active high,
  1197. * external and enabled
  1198. */
  1199. value = apic_read(APIC_LVT0);
  1200. value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
  1201. APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
  1202. APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
  1203. value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
  1204. value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
  1205. apic_write(APIC_LVT0, value);
  1206. } else {
  1207. /* Disable LVT0 */
  1208. apic_write(APIC_LVT0, APIC_LVT_MASKED);
  1209. }
  1210. /*
  1211. * For LVT1 make it edge triggered, active high,
  1212. * nmi and enabled
  1213. */
  1214. value = apic_read(APIC_LVT1);
  1215. value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
  1216. APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
  1217. APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
  1218. value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
  1219. value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
  1220. apic_write(APIC_LVT1, value);
  1221. }
  1222. void __cpuinit generic_processor_info(int apicid, int version)
  1223. {
  1224. int cpu;
  1225. cpumask_t tmp_map;
  1226. /*
  1227. * Validate version
  1228. */
  1229. if (version == 0x0) {
  1230. printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
  1231. "fixing up to 0x10. (tell your hw vendor)\n",
  1232. version);
  1233. version = 0x10;
  1234. }
  1235. apic_version[apicid] = version;
  1236. if (num_processors >= NR_CPUS) {
  1237. printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
  1238. " Processor ignored.\n", NR_CPUS);
  1239. return;
  1240. }
  1241. num_processors++;
  1242. cpus_complement(tmp_map, cpu_present_map);
  1243. cpu = first_cpu(tmp_map);
  1244. physid_set(apicid, phys_cpu_present_map);
  1245. if (apicid == boot_cpu_physical_apicid) {
  1246. /*
  1247. * x86_bios_cpu_apicid is required to have processors listed
  1248. * in same order as logical cpu numbers. Hence the first
  1249. * entry is BSP, and so on.
  1250. */
  1251. cpu = 0;
  1252. }
  1253. if (apicid > max_physical_apicid)
  1254. max_physical_apicid = apicid;
  1255. #ifdef CONFIG_X86_32
  1256. /*
  1257. * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
  1258. * but we need to work other dependencies like SMP_SUSPEND etc
  1259. * before this can be done without some confusion.
  1260. * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
  1261. * - Ashok Raj <ashok.raj@intel.com>
  1262. */
  1263. if (max_physical_apicid >= 8) {
  1264. switch (boot_cpu_data.x86_vendor) {
  1265. case X86_VENDOR_INTEL:
  1266. if (!APIC_XAPIC(version)) {
  1267. def_to_bigsmp = 0;
  1268. break;
  1269. }
  1270. /* If P4 and above fall through */
  1271. case X86_VENDOR_AMD:
  1272. def_to_bigsmp = 1;
  1273. }
  1274. }
  1275. #endif
  1276. #if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64)
  1277. /* are we being called early in kernel startup? */
  1278. if (early_per_cpu_ptr(x86_cpu_to_apicid)) {
  1279. u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
  1280. u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
  1281. cpu_to_apicid[cpu] = apicid;
  1282. bios_cpu_apicid[cpu] = apicid;
  1283. } else {
  1284. per_cpu(x86_cpu_to_apicid, cpu) = apicid;
  1285. per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
  1286. }
  1287. #endif
  1288. cpu_set(cpu, cpu_possible_map);
  1289. cpu_set(cpu, cpu_present_map);
  1290. }
  1291. int hard_smp_processor_id(void)
  1292. {
  1293. return read_apic_id();
  1294. }
  1295. /*
  1296. * Power management
  1297. */
  1298. #ifdef CONFIG_PM
  1299. static struct {
  1300. /*
  1301. * 'active' is true if the local APIC was enabled by us and
  1302. * not the BIOS; this signifies that we are also responsible
  1303. * for disabling it before entering apm/acpi suspend
  1304. */
  1305. int active;
  1306. /* r/w apic fields */
  1307. unsigned int apic_id;
  1308. unsigned int apic_taskpri;
  1309. unsigned int apic_ldr;
  1310. unsigned int apic_dfr;
  1311. unsigned int apic_spiv;
  1312. unsigned int apic_lvtt;
  1313. unsigned int apic_lvtpc;
  1314. unsigned int apic_lvt0;
  1315. unsigned int apic_lvt1;
  1316. unsigned int apic_lvterr;
  1317. unsigned int apic_tmict;
  1318. unsigned int apic_tdcr;
  1319. unsigned int apic_thmr;
  1320. } apic_pm_state;
  1321. static int lapic_suspend(struct sys_device *dev, pm_message_t state)
  1322. {
  1323. unsigned long flags;
  1324. int maxlvt;
  1325. if (!apic_pm_state.active)
  1326. return 0;
  1327. maxlvt = lapic_get_maxlvt();
  1328. apic_pm_state.apic_id = apic_read(APIC_ID);
  1329. apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
  1330. apic_pm_state.apic_ldr = apic_read(APIC_LDR);
  1331. apic_pm_state.apic_dfr = apic_read(APIC_DFR);
  1332. apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
  1333. apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
  1334. if (maxlvt >= 4)
  1335. apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
  1336. apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
  1337. apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
  1338. apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
  1339. apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
  1340. apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
  1341. #if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
  1342. if (maxlvt >= 5)
  1343. apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
  1344. #endif
  1345. local_irq_save(flags);
  1346. disable_local_APIC();
  1347. local_irq_restore(flags);
  1348. return 0;
  1349. }
  1350. static int lapic_resume(struct sys_device *dev)
  1351. {
  1352. unsigned int l, h;
  1353. unsigned long flags;
  1354. int maxlvt;
  1355. if (!apic_pm_state.active)
  1356. return 0;
  1357. maxlvt = lapic_get_maxlvt();
  1358. local_irq_save(flags);
  1359. #ifdef CONFIG_X86_64
  1360. if (x2apic)
  1361. enable_x2apic();
  1362. else
  1363. #endif
  1364. {
  1365. /*
  1366. * Make sure the APICBASE points to the right address
  1367. *
  1368. * FIXME! This will be wrong if we ever support suspend on
  1369. * SMP! We'll need to do this as part of the CPU restore!
  1370. */
  1371. rdmsr(MSR_IA32_APICBASE, l, h);
  1372. l &= ~MSR_IA32_APICBASE_BASE;
  1373. l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
  1374. wrmsr(MSR_IA32_APICBASE, l, h);
  1375. }
  1376. apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
  1377. apic_write(APIC_ID, apic_pm_state.apic_id);
  1378. apic_write(APIC_DFR, apic_pm_state.apic_dfr);
  1379. apic_write(APIC_LDR, apic_pm_state.apic_ldr);
  1380. apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
  1381. apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
  1382. apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
  1383. apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
  1384. #if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
  1385. if (maxlvt >= 5)
  1386. apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
  1387. #endif
  1388. if (maxlvt >= 4)
  1389. apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
  1390. apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
  1391. apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
  1392. apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
  1393. apic_write(APIC_ESR, 0);
  1394. apic_read(APIC_ESR);
  1395. apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
  1396. apic_write(APIC_ESR, 0);
  1397. apic_read(APIC_ESR);
  1398. local_irq_restore(flags);
  1399. return 0;
  1400. }
  1401. /*
  1402. * This device has no shutdown method - fully functioning local APICs
  1403. * are needed on every CPU up until machine_halt/restart/poweroff.
  1404. */
  1405. static struct sysdev_class lapic_sysclass = {
  1406. .name = "lapic",
  1407. .resume = lapic_resume,
  1408. .suspend = lapic_suspend,
  1409. };
  1410. static struct sys_device device_lapic = {
  1411. .id = 0,
  1412. .cls = &lapic_sysclass,
  1413. };
  1414. static void __cpuinit apic_pm_activate(void)
  1415. {
  1416. apic_pm_state.active = 1;
  1417. }
  1418. static int __init init_lapic_sysfs(void)
  1419. {
  1420. int error;
  1421. if (!cpu_has_apic)
  1422. return 0;
  1423. /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
  1424. error = sysdev_class_register(&lapic_sysclass);
  1425. if (!error)
  1426. error = sysdev_register(&device_lapic);
  1427. return error;
  1428. }
  1429. device_initcall(init_lapic_sysfs);
  1430. #else /* CONFIG_PM */
  1431. static void apic_pm_activate(void) { }
  1432. #endif /* CONFIG_PM */
  1433. /*
  1434. * apic_is_clustered_box() -- Check if we can expect good TSC
  1435. *
  1436. * Thus far, the major user of this is IBM's Summit2 series:
  1437. *
  1438. * Clustered boxes may have unsynced TSC problems if they are
  1439. * multi-chassis. Use available data to take a good guess.
  1440. * If in doubt, go HPET.
  1441. */
  1442. __cpuinit int apic_is_clustered_box(void)
  1443. {
  1444. int i, clusters, zeros;
  1445. unsigned id;
  1446. u16 *bios_cpu_apicid;
  1447. DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
  1448. /*
  1449. * there is not this kind of box with AMD CPU yet.
  1450. * Some AMD box with quadcore cpu and 8 sockets apicid
  1451. * will be [4, 0x23] or [8, 0x27] could be thought to
  1452. * vsmp box still need checking...
  1453. */
  1454. if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
  1455. return 0;
  1456. bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
  1457. bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
  1458. for (i = 0; i < NR_CPUS; i++) {
  1459. /* are we being called early in kernel startup? */
  1460. if (bios_cpu_apicid) {
  1461. id = bios_cpu_apicid[i];
  1462. }
  1463. else if (i < nr_cpu_ids) {
  1464. if (cpu_present(i))
  1465. id = per_cpu(x86_bios_cpu_apicid, i);
  1466. else
  1467. continue;
  1468. }
  1469. else
  1470. break;
  1471. if (id != BAD_APICID)
  1472. __set_bit(APIC_CLUSTERID(id), clustermap);
  1473. }
  1474. /* Problem: Partially populated chassis may not have CPUs in some of
  1475. * the APIC clusters they have been allocated. Only present CPUs have
  1476. * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
  1477. * Since clusters are allocated sequentially, count zeros only if
  1478. * they are bounded by ones.
  1479. */
  1480. clusters = 0;
  1481. zeros = 0;
  1482. for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
  1483. if (test_bit(i, clustermap)) {
  1484. clusters += 1 + zeros;
  1485. zeros = 0;
  1486. } else
  1487. ++zeros;
  1488. }
  1489. /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
  1490. * not guaranteed to be synced between boards
  1491. */
  1492. if (is_vsmp_box() && clusters > 1)
  1493. return 1;
  1494. /*
  1495. * If clusters > 2, then should be multi-chassis.
  1496. * May have to revisit this when multi-core + hyperthreaded CPUs come
  1497. * out, but AFAIK this will work even for them.
  1498. */
  1499. return (clusters > 2);
  1500. }
  1501. static __init int setup_nox2apic(char *str)
  1502. {
  1503. disable_x2apic = 1;
  1504. clear_cpu_cap(&boot_cpu_data, X86_FEATURE_X2APIC);
  1505. return 0;
  1506. }
  1507. early_param("nox2apic", setup_nox2apic);
  1508. /*
  1509. * APIC command line parameters
  1510. */
  1511. static int __init setup_disableapic(char *arg)
  1512. {
  1513. disable_apic = 1;
  1514. setup_clear_cpu_cap(X86_FEATURE_APIC);
  1515. return 0;
  1516. }
  1517. early_param("disableapic", setup_disableapic);
  1518. /* same as disableapic, for compatibility */
  1519. static int __init setup_nolapic(char *arg)
  1520. {
  1521. return setup_disableapic(arg);
  1522. }
  1523. early_param("nolapic", setup_nolapic);
  1524. static int __init parse_lapic_timer_c2_ok(char *arg)
  1525. {
  1526. local_apic_timer_c2_ok = 1;
  1527. return 0;
  1528. }
  1529. early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
  1530. static int __init parse_disable_apic_timer(char *arg)
  1531. {
  1532. disable_apic_timer = 1;
  1533. return 0;
  1534. }
  1535. early_param("noapictimer", parse_disable_apic_timer);
  1536. static int __init parse_nolapic_timer(char *arg)
  1537. {
  1538. disable_apic_timer = 1;
  1539. return 0;
  1540. }
  1541. early_param("nolapic_timer", parse_nolapic_timer);
  1542. static __init int setup_apicpmtimer(char *s)
  1543. {
  1544. apic_calibrate_pmtmr = 1;
  1545. notsc_setup(NULL);
  1546. return 0;
  1547. }
  1548. __setup("apicpmtimer", setup_apicpmtimer);
  1549. static int __init apic_set_verbosity(char *arg)
  1550. {
  1551. if (!arg) {
  1552. #ifdef CONFIG_X86_64
  1553. skip_ioapic_setup = 0;
  1554. ioapic_force = 1;
  1555. return 0;
  1556. #endif
  1557. return -EINVAL;
  1558. }
  1559. if (strcmp("debug", arg) == 0)
  1560. apic_verbosity = APIC_DEBUG;
  1561. else if (strcmp("verbose", arg) == 0)
  1562. apic_verbosity = APIC_VERBOSE;
  1563. else {
  1564. printk(KERN_WARNING "APIC Verbosity level %s not recognised"
  1565. " use apic=verbose or apic=debug\n", arg);
  1566. return -EINVAL;
  1567. }
  1568. return 0;
  1569. }
  1570. early_param("apic", apic_set_verbosity);
  1571. static int __init lapic_insert_resource(void)
  1572. {
  1573. if (!apic_phys)
  1574. return -1;
  1575. /* Put local APIC into the resource map. */
  1576. lapic_resource.start = apic_phys;
  1577. lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
  1578. insert_resource(&iomem_resource, &lapic_resource);
  1579. return 0;
  1580. }
  1581. /*
  1582. * need call insert after e820_reserve_resources()
  1583. * that is using request_resource
  1584. */
  1585. late_initcall(lapic_insert_resource);