io_apic.c 98 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180
  1. /*
  2. * Intel IO-APIC support for multi-Pentium hosts.
  3. *
  4. * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
  5. *
  6. * Many thanks to Stig Venaas for trying out countless experimental
  7. * patches and reporting/debugging problems patiently!
  8. *
  9. * (c) 1999, Multiple IO-APIC support, developed by
  10. * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
  11. * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
  12. * further tested and cleaned up by Zach Brown <zab@redhat.com>
  13. * and Ingo Molnar <mingo@redhat.com>
  14. *
  15. * Fixes
  16. * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
  17. * thanks to Eric Gilmore
  18. * and Rolf G. Tews
  19. * for testing these extensively
  20. * Paul Diefenbaugh : Added full ACPI support
  21. */
  22. #include <linux/mm.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/init.h>
  25. #include <linux/delay.h>
  26. #include <linux/sched.h>
  27. #include <linux/pci.h>
  28. #include <linux/mc146818rtc.h>
  29. #include <linux/compiler.h>
  30. #include <linux/acpi.h>
  31. #include <linux/module.h>
  32. #include <linux/sysdev.h>
  33. #include <linux/msi.h>
  34. #include <linux/htirq.h>
  35. #include <linux/freezer.h>
  36. #include <linux/kthread.h>
  37. #include <linux/jiffies.h> /* time_after() */
  38. #ifdef CONFIG_ACPI
  39. #include <acpi/acpi_bus.h>
  40. #endif
  41. #include <linux/bootmem.h>
  42. #include <linux/dmar.h>
  43. #include <linux/hpet.h>
  44. #include <asm/idle.h>
  45. #include <asm/io.h>
  46. #include <asm/smp.h>
  47. #include <asm/desc.h>
  48. #include <asm/proto.h>
  49. #include <asm/acpi.h>
  50. #include <asm/dma.h>
  51. #include <asm/timer.h>
  52. #include <asm/i8259.h>
  53. #include <asm/nmi.h>
  54. #include <asm/msidef.h>
  55. #include <asm/hypertransport.h>
  56. #include <asm/setup.h>
  57. #include <asm/irq_remapping.h>
  58. #include <asm/hpet.h>
  59. #include <asm/uv/uv_hub.h>
  60. #include <asm/uv/uv_irq.h>
  61. #include <mach_ipi.h>
  62. #include <mach_apic.h>
  63. #include <mach_apicdef.h>
  64. #define __apicdebuginit(type) static type __init
  65. /*
  66. * Is the SiS APIC rmw bug present ?
  67. * -1 = don't know, 0 = no, 1 = yes
  68. */
  69. int sis_apic_bug = -1;
  70. static DEFINE_SPINLOCK(ioapic_lock);
  71. static DEFINE_SPINLOCK(vector_lock);
  72. /*
  73. * # of IRQ routing registers
  74. */
  75. int nr_ioapic_registers[MAX_IO_APICS];
  76. /* I/O APIC entries */
  77. struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
  78. int nr_ioapics;
  79. /* MP IRQ source entries */
  80. struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
  81. /* # of MP IRQ source entries */
  82. int mp_irq_entries;
  83. #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
  84. int mp_bus_id_to_type[MAX_MP_BUSSES];
  85. #endif
  86. DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
  87. int skip_ioapic_setup;
  88. static int __init parse_noapic(char *str)
  89. {
  90. /* disable IO-APIC */
  91. disable_ioapic_setup();
  92. return 0;
  93. }
  94. early_param("noapic", parse_noapic);
  95. struct irq_pin_list;
  96. /*
  97. * This is performance-critical, we want to do it O(1)
  98. *
  99. * the indexing order of this array favors 1:1 mappings
  100. * between pins and IRQs.
  101. */
  102. struct irq_pin_list {
  103. int apic, pin;
  104. struct irq_pin_list *next;
  105. };
  106. static struct irq_pin_list *get_one_free_irq_2_pin(int cpu)
  107. {
  108. struct irq_pin_list *pin;
  109. int node;
  110. node = cpu_to_node(cpu);
  111. pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node);
  112. printk(KERN_DEBUG " alloc irq_2_pin on cpu %d node %d\n", cpu, node);
  113. return pin;
  114. }
  115. struct irq_cfg {
  116. struct irq_pin_list *irq_2_pin;
  117. cpumask_t domain;
  118. cpumask_t old_domain;
  119. unsigned move_cleanup_count;
  120. u8 vector;
  121. u8 move_in_progress : 1;
  122. #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
  123. u8 move_desc_pending : 1;
  124. #endif
  125. };
  126. /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
  127. #ifdef CONFIG_SPARSE_IRQ
  128. static struct irq_cfg irq_cfgx[] = {
  129. #else
  130. static struct irq_cfg irq_cfgx[NR_IRQS] = {
  131. #endif
  132. [0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
  133. [1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
  134. [2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
  135. [3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
  136. [4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
  137. [5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
  138. [6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
  139. [7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
  140. [8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
  141. [9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
  142. [10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
  143. [11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
  144. [12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
  145. [13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
  146. [14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
  147. [15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
  148. };
  149. void __init arch_early_irq_init(void)
  150. {
  151. struct irq_cfg *cfg;
  152. struct irq_desc *desc;
  153. int count;
  154. int i;
  155. cfg = irq_cfgx;
  156. count = ARRAY_SIZE(irq_cfgx);
  157. for (i = 0; i < count; i++) {
  158. desc = irq_to_desc(i);
  159. desc->chip_data = &cfg[i];
  160. }
  161. }
  162. #ifdef CONFIG_SPARSE_IRQ
  163. static struct irq_cfg *irq_cfg(unsigned int irq)
  164. {
  165. struct irq_cfg *cfg = NULL;
  166. struct irq_desc *desc;
  167. desc = irq_to_desc(irq);
  168. if (desc)
  169. cfg = desc->chip_data;
  170. return cfg;
  171. }
  172. static struct irq_cfg *get_one_free_irq_cfg(int cpu)
  173. {
  174. struct irq_cfg *cfg;
  175. int node;
  176. node = cpu_to_node(cpu);
  177. cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
  178. printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node);
  179. return cfg;
  180. }
  181. void arch_init_chip_data(struct irq_desc *desc, int cpu)
  182. {
  183. struct irq_cfg *cfg;
  184. cfg = desc->chip_data;
  185. if (!cfg) {
  186. desc->chip_data = get_one_free_irq_cfg(cpu);
  187. if (!desc->chip_data) {
  188. printk(KERN_ERR "can not alloc irq_cfg\n");
  189. BUG_ON(1);
  190. }
  191. }
  192. }
  193. #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
  194. static void
  195. init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int cpu)
  196. {
  197. struct irq_pin_list *old_entry, *head, *tail, *entry;
  198. cfg->irq_2_pin = NULL;
  199. old_entry = old_cfg->irq_2_pin;
  200. if (!old_entry)
  201. return;
  202. entry = get_one_free_irq_2_pin(cpu);
  203. if (!entry)
  204. return;
  205. entry->apic = old_entry->apic;
  206. entry->pin = old_entry->pin;
  207. head = entry;
  208. tail = entry;
  209. old_entry = old_entry->next;
  210. while (old_entry) {
  211. entry = get_one_free_irq_2_pin(cpu);
  212. if (!entry) {
  213. entry = head;
  214. while (entry) {
  215. head = entry->next;
  216. kfree(entry);
  217. entry = head;
  218. }
  219. /* still use the old one */
  220. return;
  221. }
  222. entry->apic = old_entry->apic;
  223. entry->pin = old_entry->pin;
  224. tail->next = entry;
  225. tail = entry;
  226. old_entry = old_entry->next;
  227. }
  228. tail->next = NULL;
  229. cfg->irq_2_pin = head;
  230. }
  231. static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg)
  232. {
  233. struct irq_pin_list *entry, *next;
  234. if (old_cfg->irq_2_pin == cfg->irq_2_pin)
  235. return;
  236. entry = old_cfg->irq_2_pin;
  237. while (entry) {
  238. next = entry->next;
  239. kfree(entry);
  240. entry = next;
  241. }
  242. old_cfg->irq_2_pin = NULL;
  243. }
  244. void arch_init_copy_chip_data(struct irq_desc *old_desc,
  245. struct irq_desc *desc, int cpu)
  246. {
  247. struct irq_cfg *cfg;
  248. struct irq_cfg *old_cfg;
  249. cfg = get_one_free_irq_cfg(cpu);
  250. if (!cfg)
  251. return;
  252. desc->chip_data = cfg;
  253. old_cfg = old_desc->chip_data;
  254. memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
  255. init_copy_irq_2_pin(old_cfg, cfg, cpu);
  256. }
  257. static void free_irq_cfg(struct irq_cfg *old_cfg)
  258. {
  259. kfree(old_cfg);
  260. }
  261. void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
  262. {
  263. struct irq_cfg *old_cfg, *cfg;
  264. old_cfg = old_desc->chip_data;
  265. cfg = desc->chip_data;
  266. if (old_cfg == cfg)
  267. return;
  268. if (old_cfg) {
  269. free_irq_2_pin(old_cfg, cfg);
  270. free_irq_cfg(old_cfg);
  271. old_desc->chip_data = NULL;
  272. }
  273. }
  274. static void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask)
  275. {
  276. struct irq_cfg *cfg = desc->chip_data;
  277. if (!cfg->move_in_progress) {
  278. /* it means that domain is not changed */
  279. if (!cpus_intersects(desc->affinity, mask))
  280. cfg->move_desc_pending = 1;
  281. }
  282. }
  283. #endif
  284. #else
  285. static struct irq_cfg *irq_cfg(unsigned int irq)
  286. {
  287. return irq < nr_irqs ? irq_cfgx + irq : NULL;
  288. }
  289. #endif
  290. #ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC
  291. static inline void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask)
  292. {
  293. }
  294. #endif
  295. struct io_apic {
  296. unsigned int index;
  297. unsigned int unused[3];
  298. unsigned int data;
  299. };
  300. static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
  301. {
  302. return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
  303. + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
  304. }
  305. static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
  306. {
  307. struct io_apic __iomem *io_apic = io_apic_base(apic);
  308. writel(reg, &io_apic->index);
  309. return readl(&io_apic->data);
  310. }
  311. static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
  312. {
  313. struct io_apic __iomem *io_apic = io_apic_base(apic);
  314. writel(reg, &io_apic->index);
  315. writel(value, &io_apic->data);
  316. }
  317. /*
  318. * Re-write a value: to be used for read-modify-write
  319. * cycles where the read already set up the index register.
  320. *
  321. * Older SiS APIC requires we rewrite the index register
  322. */
  323. static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
  324. {
  325. struct io_apic __iomem *io_apic = io_apic_base(apic);
  326. if (sis_apic_bug)
  327. writel(reg, &io_apic->index);
  328. writel(value, &io_apic->data);
  329. }
  330. static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
  331. {
  332. struct irq_pin_list *entry;
  333. unsigned long flags;
  334. spin_lock_irqsave(&ioapic_lock, flags);
  335. entry = cfg->irq_2_pin;
  336. for (;;) {
  337. unsigned int reg;
  338. int pin;
  339. if (!entry)
  340. break;
  341. pin = entry->pin;
  342. reg = io_apic_read(entry->apic, 0x10 + pin*2);
  343. /* Is the remote IRR bit set? */
  344. if (reg & IO_APIC_REDIR_REMOTE_IRR) {
  345. spin_unlock_irqrestore(&ioapic_lock, flags);
  346. return true;
  347. }
  348. if (!entry->next)
  349. break;
  350. entry = entry->next;
  351. }
  352. spin_unlock_irqrestore(&ioapic_lock, flags);
  353. return false;
  354. }
  355. union entry_union {
  356. struct { u32 w1, w2; };
  357. struct IO_APIC_route_entry entry;
  358. };
  359. static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
  360. {
  361. union entry_union eu;
  362. unsigned long flags;
  363. spin_lock_irqsave(&ioapic_lock, flags);
  364. eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
  365. eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
  366. spin_unlock_irqrestore(&ioapic_lock, flags);
  367. return eu.entry;
  368. }
  369. /*
  370. * When we write a new IO APIC routing entry, we need to write the high
  371. * word first! If the mask bit in the low word is clear, we will enable
  372. * the interrupt, and we need to make sure the entry is fully populated
  373. * before that happens.
  374. */
  375. static void
  376. __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
  377. {
  378. union entry_union eu;
  379. eu.entry = e;
  380. io_apic_write(apic, 0x11 + 2*pin, eu.w2);
  381. io_apic_write(apic, 0x10 + 2*pin, eu.w1);
  382. }
  383. static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
  384. {
  385. unsigned long flags;
  386. spin_lock_irqsave(&ioapic_lock, flags);
  387. __ioapic_write_entry(apic, pin, e);
  388. spin_unlock_irqrestore(&ioapic_lock, flags);
  389. }
  390. /*
  391. * When we mask an IO APIC routing entry, we need to write the low
  392. * word first, in order to set the mask bit before we change the
  393. * high bits!
  394. */
  395. static void ioapic_mask_entry(int apic, int pin)
  396. {
  397. unsigned long flags;
  398. union entry_union eu = { .entry.mask = 1 };
  399. spin_lock_irqsave(&ioapic_lock, flags);
  400. io_apic_write(apic, 0x10 + 2*pin, eu.w1);
  401. io_apic_write(apic, 0x11 + 2*pin, eu.w2);
  402. spin_unlock_irqrestore(&ioapic_lock, flags);
  403. }
  404. #ifdef CONFIG_SMP
  405. static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
  406. {
  407. int apic, pin;
  408. struct irq_pin_list *entry;
  409. u8 vector = cfg->vector;
  410. entry = cfg->irq_2_pin;
  411. for (;;) {
  412. unsigned int reg;
  413. if (!entry)
  414. break;
  415. apic = entry->apic;
  416. pin = entry->pin;
  417. #ifdef CONFIG_INTR_REMAP
  418. /*
  419. * With interrupt-remapping, destination information comes
  420. * from interrupt-remapping table entry.
  421. */
  422. if (!irq_remapped(irq))
  423. io_apic_write(apic, 0x11 + pin*2, dest);
  424. #else
  425. io_apic_write(apic, 0x11 + pin*2, dest);
  426. #endif
  427. reg = io_apic_read(apic, 0x10 + pin*2);
  428. reg &= ~IO_APIC_REDIR_VECTOR_MASK;
  429. reg |= vector;
  430. io_apic_modify(apic, 0x10 + pin*2, reg);
  431. if (!entry->next)
  432. break;
  433. entry = entry->next;
  434. }
  435. }
  436. static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask);
  437. static void set_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask)
  438. {
  439. struct irq_cfg *cfg;
  440. unsigned long flags;
  441. unsigned int dest;
  442. cpumask_t tmp;
  443. unsigned int irq;
  444. cpus_and(tmp, mask, cpu_online_map);
  445. if (cpus_empty(tmp))
  446. return;
  447. irq = desc->irq;
  448. cfg = desc->chip_data;
  449. if (assign_irq_vector(irq, cfg, mask))
  450. return;
  451. set_extra_move_desc(desc, mask);
  452. cpus_and(tmp, cfg->domain, mask);
  453. dest = cpu_mask_to_apicid(tmp);
  454. /*
  455. * Only the high 8 bits are valid.
  456. */
  457. dest = SET_APIC_LOGICAL_ID(dest);
  458. spin_lock_irqsave(&ioapic_lock, flags);
  459. __target_IO_APIC_irq(irq, dest, cfg);
  460. desc->affinity = mask;
  461. spin_unlock_irqrestore(&ioapic_lock, flags);
  462. }
  463. static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
  464. {
  465. struct irq_desc *desc;
  466. desc = irq_to_desc(irq);
  467. set_ioapic_affinity_irq_desc(desc, mask);
  468. }
  469. #endif /* CONFIG_SMP */
  470. /*
  471. * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
  472. * shared ISA-space IRQs, so we have to support them. We are super
  473. * fast in the common case, and fast for shared ISA-space IRQs.
  474. */
  475. static void add_pin_to_irq_cpu(struct irq_cfg *cfg, int cpu, int apic, int pin)
  476. {
  477. struct irq_pin_list *entry;
  478. entry = cfg->irq_2_pin;
  479. if (!entry) {
  480. entry = get_one_free_irq_2_pin(cpu);
  481. if (!entry) {
  482. printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n",
  483. apic, pin);
  484. return;
  485. }
  486. cfg->irq_2_pin = entry;
  487. entry->apic = apic;
  488. entry->pin = pin;
  489. return;
  490. }
  491. while (entry->next) {
  492. /* not again, please */
  493. if (entry->apic == apic && entry->pin == pin)
  494. return;
  495. entry = entry->next;
  496. }
  497. entry->next = get_one_free_irq_2_pin(cpu);
  498. entry = entry->next;
  499. entry->apic = apic;
  500. entry->pin = pin;
  501. }
  502. /*
  503. * Reroute an IRQ to a different pin.
  504. */
  505. static void __init replace_pin_at_irq_cpu(struct irq_cfg *cfg, int cpu,
  506. int oldapic, int oldpin,
  507. int newapic, int newpin)
  508. {
  509. struct irq_pin_list *entry = cfg->irq_2_pin;
  510. int replaced = 0;
  511. while (entry) {
  512. if (entry->apic == oldapic && entry->pin == oldpin) {
  513. entry->apic = newapic;
  514. entry->pin = newpin;
  515. replaced = 1;
  516. /* every one is different, right? */
  517. break;
  518. }
  519. entry = entry->next;
  520. }
  521. /* why? call replace before add? */
  522. if (!replaced)
  523. add_pin_to_irq_cpu(cfg, cpu, newapic, newpin);
  524. }
  525. static inline void io_apic_modify_irq(struct irq_cfg *cfg,
  526. int mask_and, int mask_or,
  527. void (*final)(struct irq_pin_list *entry))
  528. {
  529. int pin;
  530. struct irq_pin_list *entry;
  531. for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) {
  532. unsigned int reg;
  533. pin = entry->pin;
  534. reg = io_apic_read(entry->apic, 0x10 + pin * 2);
  535. reg &= mask_and;
  536. reg |= mask_or;
  537. io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
  538. if (final)
  539. final(entry);
  540. }
  541. }
  542. static void __unmask_IO_APIC_irq(struct irq_cfg *cfg)
  543. {
  544. io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
  545. }
  546. #ifdef CONFIG_X86_64
  547. void io_apic_sync(struct irq_pin_list *entry)
  548. {
  549. /*
  550. * Synchronize the IO-APIC and the CPU by doing
  551. * a dummy read from the IO-APIC
  552. */
  553. struct io_apic __iomem *io_apic;
  554. io_apic = io_apic_base(entry->apic);
  555. readl(&io_apic->data);
  556. }
  557. static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
  558. {
  559. io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
  560. }
  561. #else /* CONFIG_X86_32 */
  562. static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
  563. {
  564. io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, NULL);
  565. }
  566. static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg)
  567. {
  568. io_apic_modify_irq(cfg, ~IO_APIC_REDIR_LEVEL_TRIGGER,
  569. IO_APIC_REDIR_MASKED, NULL);
  570. }
  571. static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg)
  572. {
  573. io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED,
  574. IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
  575. }
  576. #endif /* CONFIG_X86_32 */
  577. static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
  578. {
  579. struct irq_cfg *cfg = desc->chip_data;
  580. unsigned long flags;
  581. BUG_ON(!cfg);
  582. spin_lock_irqsave(&ioapic_lock, flags);
  583. __mask_IO_APIC_irq(cfg);
  584. spin_unlock_irqrestore(&ioapic_lock, flags);
  585. }
  586. static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
  587. {
  588. struct irq_cfg *cfg = desc->chip_data;
  589. unsigned long flags;
  590. spin_lock_irqsave(&ioapic_lock, flags);
  591. __unmask_IO_APIC_irq(cfg);
  592. spin_unlock_irqrestore(&ioapic_lock, flags);
  593. }
  594. static void mask_IO_APIC_irq(unsigned int irq)
  595. {
  596. struct irq_desc *desc = irq_to_desc(irq);
  597. mask_IO_APIC_irq_desc(desc);
  598. }
  599. static void unmask_IO_APIC_irq(unsigned int irq)
  600. {
  601. struct irq_desc *desc = irq_to_desc(irq);
  602. unmask_IO_APIC_irq_desc(desc);
  603. }
  604. static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
  605. {
  606. struct IO_APIC_route_entry entry;
  607. /* Check delivery_mode to be sure we're not clearing an SMI pin */
  608. entry = ioapic_read_entry(apic, pin);
  609. if (entry.delivery_mode == dest_SMI)
  610. return;
  611. /*
  612. * Disable it in the IO-APIC irq-routing table:
  613. */
  614. ioapic_mask_entry(apic, pin);
  615. }
  616. static void clear_IO_APIC (void)
  617. {
  618. int apic, pin;
  619. for (apic = 0; apic < nr_ioapics; apic++)
  620. for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
  621. clear_IO_APIC_pin(apic, pin);
  622. }
  623. #if !defined(CONFIG_SMP) && defined(CONFIG_X86_32)
  624. void send_IPI_self(int vector)
  625. {
  626. unsigned int cfg;
  627. /*
  628. * Wait for idle.
  629. */
  630. apic_wait_icr_idle();
  631. cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
  632. /*
  633. * Send the IPI. The write to APIC_ICR fires this off.
  634. */
  635. apic_write(APIC_ICR, cfg);
  636. }
  637. #endif /* !CONFIG_SMP && CONFIG_X86_32*/
  638. #ifdef CONFIG_X86_32
  639. /*
  640. * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
  641. * specific CPU-side IRQs.
  642. */
  643. #define MAX_PIRQS 8
  644. static int pirq_entries [MAX_PIRQS];
  645. static int pirqs_enabled;
  646. static int __init ioapic_pirq_setup(char *str)
  647. {
  648. int i, max;
  649. int ints[MAX_PIRQS+1];
  650. get_options(str, ARRAY_SIZE(ints), ints);
  651. for (i = 0; i < MAX_PIRQS; i++)
  652. pirq_entries[i] = -1;
  653. pirqs_enabled = 1;
  654. apic_printk(APIC_VERBOSE, KERN_INFO
  655. "PIRQ redirection, working around broken MP-BIOS.\n");
  656. max = MAX_PIRQS;
  657. if (ints[0] < MAX_PIRQS)
  658. max = ints[0];
  659. for (i = 0; i < max; i++) {
  660. apic_printk(APIC_VERBOSE, KERN_DEBUG
  661. "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
  662. /*
  663. * PIRQs are mapped upside down, usually.
  664. */
  665. pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
  666. }
  667. return 1;
  668. }
  669. __setup("pirq=", ioapic_pirq_setup);
  670. #endif /* CONFIG_X86_32 */
  671. #ifdef CONFIG_INTR_REMAP
  672. /* I/O APIC RTE contents at the OS boot up */
  673. static struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
  674. /*
  675. * Saves and masks all the unmasked IO-APIC RTE's
  676. */
  677. int save_mask_IO_APIC_setup(void)
  678. {
  679. union IO_APIC_reg_01 reg_01;
  680. unsigned long flags;
  681. int apic, pin;
  682. /*
  683. * The number of IO-APIC IRQ registers (== #pins):
  684. */
  685. for (apic = 0; apic < nr_ioapics; apic++) {
  686. spin_lock_irqsave(&ioapic_lock, flags);
  687. reg_01.raw = io_apic_read(apic, 1);
  688. spin_unlock_irqrestore(&ioapic_lock, flags);
  689. nr_ioapic_registers[apic] = reg_01.bits.entries+1;
  690. }
  691. for (apic = 0; apic < nr_ioapics; apic++) {
  692. early_ioapic_entries[apic] =
  693. kzalloc(sizeof(struct IO_APIC_route_entry) *
  694. nr_ioapic_registers[apic], GFP_KERNEL);
  695. if (!early_ioapic_entries[apic])
  696. goto nomem;
  697. }
  698. for (apic = 0; apic < nr_ioapics; apic++)
  699. for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
  700. struct IO_APIC_route_entry entry;
  701. entry = early_ioapic_entries[apic][pin] =
  702. ioapic_read_entry(apic, pin);
  703. if (!entry.mask) {
  704. entry.mask = 1;
  705. ioapic_write_entry(apic, pin, entry);
  706. }
  707. }
  708. return 0;
  709. nomem:
  710. while (apic >= 0)
  711. kfree(early_ioapic_entries[apic--]);
  712. memset(early_ioapic_entries, 0,
  713. ARRAY_SIZE(early_ioapic_entries));
  714. return -ENOMEM;
  715. }
  716. void restore_IO_APIC_setup(void)
  717. {
  718. int apic, pin;
  719. for (apic = 0; apic < nr_ioapics; apic++) {
  720. if (!early_ioapic_entries[apic])
  721. break;
  722. for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
  723. ioapic_write_entry(apic, pin,
  724. early_ioapic_entries[apic][pin]);
  725. kfree(early_ioapic_entries[apic]);
  726. early_ioapic_entries[apic] = NULL;
  727. }
  728. }
  729. void reinit_intr_remapped_IO_APIC(int intr_remapping)
  730. {
  731. /*
  732. * for now plain restore of previous settings.
  733. * TBD: In the case of OS enabling interrupt-remapping,
  734. * IO-APIC RTE's need to be setup to point to interrupt-remapping
  735. * table entries. for now, do a plain restore, and wait for
  736. * the setup_IO_APIC_irqs() to do proper initialization.
  737. */
  738. restore_IO_APIC_setup();
  739. }
  740. #endif
  741. /*
  742. * Find the IRQ entry number of a certain pin.
  743. */
  744. static int find_irq_entry(int apic, int pin, int type)
  745. {
  746. int i;
  747. for (i = 0; i < mp_irq_entries; i++)
  748. if (mp_irqs[i].mp_irqtype == type &&
  749. (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
  750. mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
  751. mp_irqs[i].mp_dstirq == pin)
  752. return i;
  753. return -1;
  754. }
  755. /*
  756. * Find the pin to which IRQ[irq] (ISA) is connected
  757. */
  758. static int __init find_isa_irq_pin(int irq, int type)
  759. {
  760. int i;
  761. for (i = 0; i < mp_irq_entries; i++) {
  762. int lbus = mp_irqs[i].mp_srcbus;
  763. if (test_bit(lbus, mp_bus_not_pci) &&
  764. (mp_irqs[i].mp_irqtype == type) &&
  765. (mp_irqs[i].mp_srcbusirq == irq))
  766. return mp_irqs[i].mp_dstirq;
  767. }
  768. return -1;
  769. }
  770. static int __init find_isa_irq_apic(int irq, int type)
  771. {
  772. int i;
  773. for (i = 0; i < mp_irq_entries; i++) {
  774. int lbus = mp_irqs[i].mp_srcbus;
  775. if (test_bit(lbus, mp_bus_not_pci) &&
  776. (mp_irqs[i].mp_irqtype == type) &&
  777. (mp_irqs[i].mp_srcbusirq == irq))
  778. break;
  779. }
  780. if (i < mp_irq_entries) {
  781. int apic;
  782. for(apic = 0; apic < nr_ioapics; apic++) {
  783. if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
  784. return apic;
  785. }
  786. }
  787. return -1;
  788. }
  789. /*
  790. * Find a specific PCI IRQ entry.
  791. * Not an __init, possibly needed by modules
  792. */
  793. static int pin_2_irq(int idx, int apic, int pin);
  794. int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
  795. {
  796. int apic, i, best_guess = -1;
  797. apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
  798. bus, slot, pin);
  799. if (test_bit(bus, mp_bus_not_pci)) {
  800. apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
  801. return -1;
  802. }
  803. for (i = 0; i < mp_irq_entries; i++) {
  804. int lbus = mp_irqs[i].mp_srcbus;
  805. for (apic = 0; apic < nr_ioapics; apic++)
  806. if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
  807. mp_irqs[i].mp_dstapic == MP_APIC_ALL)
  808. break;
  809. if (!test_bit(lbus, mp_bus_not_pci) &&
  810. !mp_irqs[i].mp_irqtype &&
  811. (bus == lbus) &&
  812. (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
  813. int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
  814. if (!(apic || IO_APIC_IRQ(irq)))
  815. continue;
  816. if (pin == (mp_irqs[i].mp_srcbusirq & 3))
  817. return irq;
  818. /*
  819. * Use the first all-but-pin matching entry as a
  820. * best-guess fuzzy result for broken mptables.
  821. */
  822. if (best_guess < 0)
  823. best_guess = irq;
  824. }
  825. }
  826. return best_guess;
  827. }
  828. EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
  829. #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
  830. /*
  831. * EISA Edge/Level control register, ELCR
  832. */
  833. static int EISA_ELCR(unsigned int irq)
  834. {
  835. if (irq < NR_IRQS_LEGACY) {
  836. unsigned int port = 0x4d0 + (irq >> 3);
  837. return (inb(port) >> (irq & 7)) & 1;
  838. }
  839. apic_printk(APIC_VERBOSE, KERN_INFO
  840. "Broken MPtable reports ISA irq %d\n", irq);
  841. return 0;
  842. }
  843. #endif
  844. /* ISA interrupts are always polarity zero edge triggered,
  845. * when listed as conforming in the MP table. */
  846. #define default_ISA_trigger(idx) (0)
  847. #define default_ISA_polarity(idx) (0)
  848. /* EISA interrupts are always polarity zero and can be edge or level
  849. * trigger depending on the ELCR value. If an interrupt is listed as
  850. * EISA conforming in the MP table, that means its trigger type must
  851. * be read in from the ELCR */
  852. #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
  853. #define default_EISA_polarity(idx) default_ISA_polarity(idx)
  854. /* PCI interrupts are always polarity one level triggered,
  855. * when listed as conforming in the MP table. */
  856. #define default_PCI_trigger(idx) (1)
  857. #define default_PCI_polarity(idx) (1)
  858. /* MCA interrupts are always polarity zero level triggered,
  859. * when listed as conforming in the MP table. */
  860. #define default_MCA_trigger(idx) (1)
  861. #define default_MCA_polarity(idx) default_ISA_polarity(idx)
  862. static int MPBIOS_polarity(int idx)
  863. {
  864. int bus = mp_irqs[idx].mp_srcbus;
  865. int polarity;
  866. /*
  867. * Determine IRQ line polarity (high active or low active):
  868. */
  869. switch (mp_irqs[idx].mp_irqflag & 3)
  870. {
  871. case 0: /* conforms, ie. bus-type dependent polarity */
  872. if (test_bit(bus, mp_bus_not_pci))
  873. polarity = default_ISA_polarity(idx);
  874. else
  875. polarity = default_PCI_polarity(idx);
  876. break;
  877. case 1: /* high active */
  878. {
  879. polarity = 0;
  880. break;
  881. }
  882. case 2: /* reserved */
  883. {
  884. printk(KERN_WARNING "broken BIOS!!\n");
  885. polarity = 1;
  886. break;
  887. }
  888. case 3: /* low active */
  889. {
  890. polarity = 1;
  891. break;
  892. }
  893. default: /* invalid */
  894. {
  895. printk(KERN_WARNING "broken BIOS!!\n");
  896. polarity = 1;
  897. break;
  898. }
  899. }
  900. return polarity;
  901. }
  902. static int MPBIOS_trigger(int idx)
  903. {
  904. int bus = mp_irqs[idx].mp_srcbus;
  905. int trigger;
  906. /*
  907. * Determine IRQ trigger mode (edge or level sensitive):
  908. */
  909. switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
  910. {
  911. case 0: /* conforms, ie. bus-type dependent */
  912. if (test_bit(bus, mp_bus_not_pci))
  913. trigger = default_ISA_trigger(idx);
  914. else
  915. trigger = default_PCI_trigger(idx);
  916. #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
  917. switch (mp_bus_id_to_type[bus]) {
  918. case MP_BUS_ISA: /* ISA pin */
  919. {
  920. /* set before the switch */
  921. break;
  922. }
  923. case MP_BUS_EISA: /* EISA pin */
  924. {
  925. trigger = default_EISA_trigger(idx);
  926. break;
  927. }
  928. case MP_BUS_PCI: /* PCI pin */
  929. {
  930. /* set before the switch */
  931. break;
  932. }
  933. case MP_BUS_MCA: /* MCA pin */
  934. {
  935. trigger = default_MCA_trigger(idx);
  936. break;
  937. }
  938. default:
  939. {
  940. printk(KERN_WARNING "broken BIOS!!\n");
  941. trigger = 1;
  942. break;
  943. }
  944. }
  945. #endif
  946. break;
  947. case 1: /* edge */
  948. {
  949. trigger = 0;
  950. break;
  951. }
  952. case 2: /* reserved */
  953. {
  954. printk(KERN_WARNING "broken BIOS!!\n");
  955. trigger = 1;
  956. break;
  957. }
  958. case 3: /* level */
  959. {
  960. trigger = 1;
  961. break;
  962. }
  963. default: /* invalid */
  964. {
  965. printk(KERN_WARNING "broken BIOS!!\n");
  966. trigger = 0;
  967. break;
  968. }
  969. }
  970. return trigger;
  971. }
  972. static inline int irq_polarity(int idx)
  973. {
  974. return MPBIOS_polarity(idx);
  975. }
  976. static inline int irq_trigger(int idx)
  977. {
  978. return MPBIOS_trigger(idx);
  979. }
  980. int (*ioapic_renumber_irq)(int ioapic, int irq);
  981. static int pin_2_irq(int idx, int apic, int pin)
  982. {
  983. int irq, i;
  984. int bus = mp_irqs[idx].mp_srcbus;
  985. /*
  986. * Debugging check, we are in big trouble if this message pops up!
  987. */
  988. if (mp_irqs[idx].mp_dstirq != pin)
  989. printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
  990. if (test_bit(bus, mp_bus_not_pci)) {
  991. irq = mp_irqs[idx].mp_srcbusirq;
  992. } else {
  993. /*
  994. * PCI IRQs are mapped in order
  995. */
  996. i = irq = 0;
  997. while (i < apic)
  998. irq += nr_ioapic_registers[i++];
  999. irq += pin;
  1000. /*
  1001. * For MPS mode, so far only needed by ES7000 platform
  1002. */
  1003. if (ioapic_renumber_irq)
  1004. irq = ioapic_renumber_irq(apic, irq);
  1005. }
  1006. #ifdef CONFIG_X86_32
  1007. /*
  1008. * PCI IRQ command line redirection. Yes, limits are hardcoded.
  1009. */
  1010. if ((pin >= 16) && (pin <= 23)) {
  1011. if (pirq_entries[pin-16] != -1) {
  1012. if (!pirq_entries[pin-16]) {
  1013. apic_printk(APIC_VERBOSE, KERN_DEBUG
  1014. "disabling PIRQ%d\n", pin-16);
  1015. } else {
  1016. irq = pirq_entries[pin-16];
  1017. apic_printk(APIC_VERBOSE, KERN_DEBUG
  1018. "using PIRQ%d -> IRQ %d\n",
  1019. pin-16, irq);
  1020. }
  1021. }
  1022. }
  1023. #endif
  1024. return irq;
  1025. }
  1026. void lock_vector_lock(void)
  1027. {
  1028. /* Used to the online set of cpus does not change
  1029. * during assign_irq_vector.
  1030. */
  1031. spin_lock(&vector_lock);
  1032. }
  1033. void unlock_vector_lock(void)
  1034. {
  1035. spin_unlock(&vector_lock);
  1036. }
  1037. static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
  1038. {
  1039. /*
  1040. * NOTE! The local APIC isn't very good at handling
  1041. * multiple interrupts at the same interrupt level.
  1042. * As the interrupt level is determined by taking the
  1043. * vector number and shifting that right by 4, we
  1044. * want to spread these out a bit so that they don't
  1045. * all fall in the same interrupt level.
  1046. *
  1047. * Also, we've got to be careful not to trash gate
  1048. * 0x80, because int 0x80 is hm, kind of importantish. ;)
  1049. */
  1050. static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
  1051. unsigned int old_vector;
  1052. int cpu;
  1053. if ((cfg->move_in_progress) || cfg->move_cleanup_count)
  1054. return -EBUSY;
  1055. /* Only try and allocate irqs on cpus that are present */
  1056. cpus_and(mask, mask, cpu_online_map);
  1057. old_vector = cfg->vector;
  1058. if (old_vector) {
  1059. cpumask_t tmp;
  1060. cpus_and(tmp, cfg->domain, mask);
  1061. if (!cpus_empty(tmp))
  1062. return 0;
  1063. }
  1064. for_each_cpu_mask_nr(cpu, mask) {
  1065. cpumask_t domain, new_mask;
  1066. int new_cpu;
  1067. int vector, offset;
  1068. domain = vector_allocation_domain(cpu);
  1069. cpus_and(new_mask, domain, cpu_online_map);
  1070. vector = current_vector;
  1071. offset = current_offset;
  1072. next:
  1073. vector += 8;
  1074. if (vector >= first_system_vector) {
  1075. /* If we run out of vectors on large boxen, must share them. */
  1076. offset = (offset + 1) % 8;
  1077. vector = FIRST_DEVICE_VECTOR + offset;
  1078. }
  1079. if (unlikely(current_vector == vector))
  1080. continue;
  1081. #ifdef CONFIG_X86_64
  1082. if (vector == IA32_SYSCALL_VECTOR)
  1083. goto next;
  1084. #else
  1085. if (vector == SYSCALL_VECTOR)
  1086. goto next;
  1087. #endif
  1088. for_each_cpu_mask_nr(new_cpu, new_mask)
  1089. if (per_cpu(vector_irq, new_cpu)[vector] != -1)
  1090. goto next;
  1091. /* Found one! */
  1092. current_vector = vector;
  1093. current_offset = offset;
  1094. if (old_vector) {
  1095. cfg->move_in_progress = 1;
  1096. cfg->old_domain = cfg->domain;
  1097. }
  1098. for_each_cpu_mask_nr(new_cpu, new_mask)
  1099. per_cpu(vector_irq, new_cpu)[vector] = irq;
  1100. cfg->vector = vector;
  1101. cfg->domain = domain;
  1102. return 0;
  1103. }
  1104. return -ENOSPC;
  1105. }
  1106. static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
  1107. {
  1108. int err;
  1109. unsigned long flags;
  1110. spin_lock_irqsave(&vector_lock, flags);
  1111. err = __assign_irq_vector(irq, cfg, mask);
  1112. spin_unlock_irqrestore(&vector_lock, flags);
  1113. return err;
  1114. }
  1115. static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
  1116. {
  1117. cpumask_t mask;
  1118. int cpu, vector;
  1119. BUG_ON(!cfg->vector);
  1120. vector = cfg->vector;
  1121. cpus_and(mask, cfg->domain, cpu_online_map);
  1122. for_each_cpu_mask_nr(cpu, mask)
  1123. per_cpu(vector_irq, cpu)[vector] = -1;
  1124. cfg->vector = 0;
  1125. cpus_clear(cfg->domain);
  1126. if (likely(!cfg->move_in_progress))
  1127. return;
  1128. cpus_and(mask, cfg->old_domain, cpu_online_map);
  1129. for_each_cpu_mask_nr(cpu, mask) {
  1130. for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
  1131. vector++) {
  1132. if (per_cpu(vector_irq, cpu)[vector] != irq)
  1133. continue;
  1134. per_cpu(vector_irq, cpu)[vector] = -1;
  1135. break;
  1136. }
  1137. }
  1138. cfg->move_in_progress = 0;
  1139. }
  1140. void __setup_vector_irq(int cpu)
  1141. {
  1142. /* Initialize vector_irq on a new cpu */
  1143. /* This function must be called with vector_lock held */
  1144. int irq, vector;
  1145. struct irq_cfg *cfg;
  1146. struct irq_desc *desc;
  1147. /* Mark the inuse vectors */
  1148. for_each_irq_desc(irq, desc) {
  1149. if (!desc)
  1150. continue;
  1151. cfg = desc->chip_data;
  1152. if (!cpu_isset(cpu, cfg->domain))
  1153. continue;
  1154. vector = cfg->vector;
  1155. per_cpu(vector_irq, cpu)[vector] = irq;
  1156. }
  1157. /* Mark the free vectors */
  1158. for (vector = 0; vector < NR_VECTORS; ++vector) {
  1159. irq = per_cpu(vector_irq, cpu)[vector];
  1160. if (irq < 0)
  1161. continue;
  1162. cfg = irq_cfg(irq);
  1163. if (!cpu_isset(cpu, cfg->domain))
  1164. per_cpu(vector_irq, cpu)[vector] = -1;
  1165. }
  1166. }
  1167. static struct irq_chip ioapic_chip;
  1168. #ifdef CONFIG_INTR_REMAP
  1169. static struct irq_chip ir_ioapic_chip;
  1170. #endif
  1171. #define IOAPIC_AUTO -1
  1172. #define IOAPIC_EDGE 0
  1173. #define IOAPIC_LEVEL 1
  1174. #ifdef CONFIG_X86_32
  1175. static inline int IO_APIC_irq_trigger(int irq)
  1176. {
  1177. int apic, idx, pin;
  1178. for (apic = 0; apic < nr_ioapics; apic++) {
  1179. for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
  1180. idx = find_irq_entry(apic, pin, mp_INT);
  1181. if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
  1182. return irq_trigger(idx);
  1183. }
  1184. }
  1185. /*
  1186. * nonexistent IRQs are edge default
  1187. */
  1188. return 0;
  1189. }
  1190. #else
  1191. static inline int IO_APIC_irq_trigger(int irq)
  1192. {
  1193. return 1;
  1194. }
  1195. #endif
  1196. static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger)
  1197. {
  1198. if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
  1199. trigger == IOAPIC_LEVEL)
  1200. desc->status |= IRQ_LEVEL;
  1201. else
  1202. desc->status &= ~IRQ_LEVEL;
  1203. #ifdef CONFIG_INTR_REMAP
  1204. if (irq_remapped(irq)) {
  1205. desc->status |= IRQ_MOVE_PCNTXT;
  1206. if (trigger)
  1207. set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
  1208. handle_fasteoi_irq,
  1209. "fasteoi");
  1210. else
  1211. set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
  1212. handle_edge_irq, "edge");
  1213. return;
  1214. }
  1215. #endif
  1216. if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
  1217. trigger == IOAPIC_LEVEL)
  1218. set_irq_chip_and_handler_name(irq, &ioapic_chip,
  1219. handle_fasteoi_irq,
  1220. "fasteoi");
  1221. else
  1222. set_irq_chip_and_handler_name(irq, &ioapic_chip,
  1223. handle_edge_irq, "edge");
  1224. }
  1225. static int setup_ioapic_entry(int apic, int irq,
  1226. struct IO_APIC_route_entry *entry,
  1227. unsigned int destination, int trigger,
  1228. int polarity, int vector)
  1229. {
  1230. /*
  1231. * add it to the IO-APIC irq-routing table:
  1232. */
  1233. memset(entry,0,sizeof(*entry));
  1234. #ifdef CONFIG_INTR_REMAP
  1235. if (intr_remapping_enabled) {
  1236. struct intel_iommu *iommu = map_ioapic_to_ir(apic);
  1237. struct irte irte;
  1238. struct IR_IO_APIC_route_entry *ir_entry =
  1239. (struct IR_IO_APIC_route_entry *) entry;
  1240. int index;
  1241. if (!iommu)
  1242. panic("No mapping iommu for ioapic %d\n", apic);
  1243. index = alloc_irte(iommu, irq, 1);
  1244. if (index < 0)
  1245. panic("Failed to allocate IRTE for ioapic %d\n", apic);
  1246. memset(&irte, 0, sizeof(irte));
  1247. irte.present = 1;
  1248. irte.dst_mode = INT_DEST_MODE;
  1249. irte.trigger_mode = trigger;
  1250. irte.dlvry_mode = INT_DELIVERY_MODE;
  1251. irte.vector = vector;
  1252. irte.dest_id = IRTE_DEST(destination);
  1253. modify_irte(irq, &irte);
  1254. ir_entry->index2 = (index >> 15) & 0x1;
  1255. ir_entry->zero = 0;
  1256. ir_entry->format = 1;
  1257. ir_entry->index = (index & 0x7fff);
  1258. } else
  1259. #endif
  1260. {
  1261. entry->delivery_mode = INT_DELIVERY_MODE;
  1262. entry->dest_mode = INT_DEST_MODE;
  1263. entry->dest = destination;
  1264. }
  1265. entry->mask = 0; /* enable IRQ */
  1266. entry->trigger = trigger;
  1267. entry->polarity = polarity;
  1268. entry->vector = vector;
  1269. /* Mask level triggered irqs.
  1270. * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
  1271. */
  1272. if (trigger)
  1273. entry->mask = 1;
  1274. return 0;
  1275. }
  1276. static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_desc *desc,
  1277. int trigger, int polarity)
  1278. {
  1279. struct irq_cfg *cfg;
  1280. struct IO_APIC_route_entry entry;
  1281. cpumask_t mask;
  1282. if (!IO_APIC_IRQ(irq))
  1283. return;
  1284. cfg = desc->chip_data;
  1285. mask = TARGET_CPUS;
  1286. if (assign_irq_vector(irq, cfg, mask))
  1287. return;
  1288. cpus_and(mask, cfg->domain, mask);
  1289. apic_printk(APIC_VERBOSE,KERN_DEBUG
  1290. "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
  1291. "IRQ %d Mode:%i Active:%i)\n",
  1292. apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
  1293. irq, trigger, polarity);
  1294. if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
  1295. cpu_mask_to_apicid(mask), trigger, polarity,
  1296. cfg->vector)) {
  1297. printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
  1298. mp_ioapics[apic].mp_apicid, pin);
  1299. __clear_irq_vector(irq, cfg);
  1300. return;
  1301. }
  1302. ioapic_register_intr(irq, desc, trigger);
  1303. if (irq < NR_IRQS_LEGACY)
  1304. disable_8259A_irq(irq);
  1305. ioapic_write_entry(apic, pin, entry);
  1306. }
  1307. static void __init setup_IO_APIC_irqs(void)
  1308. {
  1309. int apic, pin, idx, irq;
  1310. int notcon = 0;
  1311. struct irq_desc *desc;
  1312. struct irq_cfg *cfg;
  1313. int cpu = boot_cpu_id;
  1314. apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
  1315. for (apic = 0; apic < nr_ioapics; apic++) {
  1316. for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
  1317. idx = find_irq_entry(apic, pin, mp_INT);
  1318. if (idx == -1) {
  1319. if (!notcon) {
  1320. notcon = 1;
  1321. apic_printk(APIC_VERBOSE,
  1322. KERN_DEBUG " %d-%d",
  1323. mp_ioapics[apic].mp_apicid,
  1324. pin);
  1325. } else
  1326. apic_printk(APIC_VERBOSE, " %d-%d",
  1327. mp_ioapics[apic].mp_apicid,
  1328. pin);
  1329. continue;
  1330. }
  1331. if (notcon) {
  1332. apic_printk(APIC_VERBOSE,
  1333. " (apicid-pin) not connected\n");
  1334. notcon = 0;
  1335. }
  1336. irq = pin_2_irq(idx, apic, pin);
  1337. #ifdef CONFIG_X86_32
  1338. if (multi_timer_check(apic, irq))
  1339. continue;
  1340. #endif
  1341. desc = irq_to_desc_alloc_cpu(irq, cpu);
  1342. if (!desc) {
  1343. printk(KERN_INFO "can not get irq_desc for %d\n", irq);
  1344. continue;
  1345. }
  1346. cfg = desc->chip_data;
  1347. add_pin_to_irq_cpu(cfg, cpu, apic, pin);
  1348. setup_IO_APIC_irq(apic, pin, irq, desc,
  1349. irq_trigger(idx), irq_polarity(idx));
  1350. }
  1351. }
  1352. if (notcon)
  1353. apic_printk(APIC_VERBOSE,
  1354. " (apicid-pin) not connected\n");
  1355. }
  1356. /*
  1357. * Set up the timer pin, possibly with the 8259A-master behind.
  1358. */
  1359. static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
  1360. int vector)
  1361. {
  1362. struct IO_APIC_route_entry entry;
  1363. #ifdef CONFIG_INTR_REMAP
  1364. if (intr_remapping_enabled)
  1365. return;
  1366. #endif
  1367. memset(&entry, 0, sizeof(entry));
  1368. /*
  1369. * We use logical delivery to get the timer IRQ
  1370. * to the first CPU.
  1371. */
  1372. entry.dest_mode = INT_DEST_MODE;
  1373. entry.mask = 1; /* mask IRQ now */
  1374. entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
  1375. entry.delivery_mode = INT_DELIVERY_MODE;
  1376. entry.polarity = 0;
  1377. entry.trigger = 0;
  1378. entry.vector = vector;
  1379. /*
  1380. * The timer IRQ doesn't have to know that behind the
  1381. * scene we may have a 8259A-master in AEOI mode ...
  1382. */
  1383. set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
  1384. /*
  1385. * Add it to the IO-APIC irq-routing table:
  1386. */
  1387. ioapic_write_entry(apic, pin, entry);
  1388. }
  1389. __apicdebuginit(void) print_IO_APIC(void)
  1390. {
  1391. int apic, i;
  1392. union IO_APIC_reg_00 reg_00;
  1393. union IO_APIC_reg_01 reg_01;
  1394. union IO_APIC_reg_02 reg_02;
  1395. union IO_APIC_reg_03 reg_03;
  1396. unsigned long flags;
  1397. struct irq_cfg *cfg;
  1398. struct irq_desc *desc;
  1399. unsigned int irq;
  1400. if (apic_verbosity == APIC_QUIET)
  1401. return;
  1402. printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
  1403. for (i = 0; i < nr_ioapics; i++)
  1404. printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
  1405. mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
  1406. /*
  1407. * We are a bit conservative about what we expect. We have to
  1408. * know about every hardware change ASAP.
  1409. */
  1410. printk(KERN_INFO "testing the IO APIC.......................\n");
  1411. for (apic = 0; apic < nr_ioapics; apic++) {
  1412. spin_lock_irqsave(&ioapic_lock, flags);
  1413. reg_00.raw = io_apic_read(apic, 0);
  1414. reg_01.raw = io_apic_read(apic, 1);
  1415. if (reg_01.bits.version >= 0x10)
  1416. reg_02.raw = io_apic_read(apic, 2);
  1417. if (reg_01.bits.version >= 0x20)
  1418. reg_03.raw = io_apic_read(apic, 3);
  1419. spin_unlock_irqrestore(&ioapic_lock, flags);
  1420. printk("\n");
  1421. printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
  1422. printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
  1423. printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
  1424. printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
  1425. printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
  1426. printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
  1427. printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
  1428. printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
  1429. printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
  1430. /*
  1431. * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
  1432. * but the value of reg_02 is read as the previous read register
  1433. * value, so ignore it if reg_02 == reg_01.
  1434. */
  1435. if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
  1436. printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
  1437. printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
  1438. }
  1439. /*
  1440. * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
  1441. * or reg_03, but the value of reg_0[23] is read as the previous read
  1442. * register value, so ignore it if reg_03 == reg_0[12].
  1443. */
  1444. if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
  1445. reg_03.raw != reg_01.raw) {
  1446. printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
  1447. printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
  1448. }
  1449. printk(KERN_DEBUG ".... IRQ redirection table:\n");
  1450. printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
  1451. " Stat Dmod Deli Vect: \n");
  1452. for (i = 0; i <= reg_01.bits.entries; i++) {
  1453. struct IO_APIC_route_entry entry;
  1454. entry = ioapic_read_entry(apic, i);
  1455. printk(KERN_DEBUG " %02x %03X ",
  1456. i,
  1457. entry.dest
  1458. );
  1459. printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
  1460. entry.mask,
  1461. entry.trigger,
  1462. entry.irr,
  1463. entry.polarity,
  1464. entry.delivery_status,
  1465. entry.dest_mode,
  1466. entry.delivery_mode,
  1467. entry.vector
  1468. );
  1469. }
  1470. }
  1471. printk(KERN_DEBUG "IRQ to pin mappings:\n");
  1472. for_each_irq_desc(irq, desc) {
  1473. struct irq_pin_list *entry;
  1474. if (!desc)
  1475. continue;
  1476. cfg = desc->chip_data;
  1477. entry = cfg->irq_2_pin;
  1478. if (!entry)
  1479. continue;
  1480. printk(KERN_DEBUG "IRQ%d ", irq);
  1481. for (;;) {
  1482. printk("-> %d:%d", entry->apic, entry->pin);
  1483. if (!entry->next)
  1484. break;
  1485. entry = entry->next;
  1486. }
  1487. printk("\n");
  1488. }
  1489. printk(KERN_INFO ".................................... done.\n");
  1490. return;
  1491. }
  1492. __apicdebuginit(void) print_APIC_bitfield(int base)
  1493. {
  1494. unsigned int v;
  1495. int i, j;
  1496. if (apic_verbosity == APIC_QUIET)
  1497. return;
  1498. printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
  1499. for (i = 0; i < 8; i++) {
  1500. v = apic_read(base + i*0x10);
  1501. for (j = 0; j < 32; j++) {
  1502. if (v & (1<<j))
  1503. printk("1");
  1504. else
  1505. printk("0");
  1506. }
  1507. printk("\n");
  1508. }
  1509. }
  1510. __apicdebuginit(void) print_local_APIC(void *dummy)
  1511. {
  1512. unsigned int v, ver, maxlvt;
  1513. u64 icr;
  1514. if (apic_verbosity == APIC_QUIET)
  1515. return;
  1516. printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
  1517. smp_processor_id(), hard_smp_processor_id());
  1518. v = apic_read(APIC_ID);
  1519. printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
  1520. v = apic_read(APIC_LVR);
  1521. printk(KERN_INFO "... APIC VERSION: %08x\n", v);
  1522. ver = GET_APIC_VERSION(v);
  1523. maxlvt = lapic_get_maxlvt();
  1524. v = apic_read(APIC_TASKPRI);
  1525. printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
  1526. if (APIC_INTEGRATED(ver)) { /* !82489DX */
  1527. if (!APIC_XAPIC(ver)) {
  1528. v = apic_read(APIC_ARBPRI);
  1529. printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
  1530. v & APIC_ARBPRI_MASK);
  1531. }
  1532. v = apic_read(APIC_PROCPRI);
  1533. printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
  1534. }
  1535. /*
  1536. * Remote read supported only in the 82489DX and local APIC for
  1537. * Pentium processors.
  1538. */
  1539. if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
  1540. v = apic_read(APIC_RRR);
  1541. printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
  1542. }
  1543. v = apic_read(APIC_LDR);
  1544. printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
  1545. if (!x2apic_enabled()) {
  1546. v = apic_read(APIC_DFR);
  1547. printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
  1548. }
  1549. v = apic_read(APIC_SPIV);
  1550. printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
  1551. printk(KERN_DEBUG "... APIC ISR field:\n");
  1552. print_APIC_bitfield(APIC_ISR);
  1553. printk(KERN_DEBUG "... APIC TMR field:\n");
  1554. print_APIC_bitfield(APIC_TMR);
  1555. printk(KERN_DEBUG "... APIC IRR field:\n");
  1556. print_APIC_bitfield(APIC_IRR);
  1557. if (APIC_INTEGRATED(ver)) { /* !82489DX */
  1558. if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
  1559. apic_write(APIC_ESR, 0);
  1560. v = apic_read(APIC_ESR);
  1561. printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
  1562. }
  1563. icr = apic_icr_read();
  1564. printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
  1565. printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
  1566. v = apic_read(APIC_LVTT);
  1567. printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
  1568. if (maxlvt > 3) { /* PC is LVT#4. */
  1569. v = apic_read(APIC_LVTPC);
  1570. printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
  1571. }
  1572. v = apic_read(APIC_LVT0);
  1573. printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
  1574. v = apic_read(APIC_LVT1);
  1575. printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
  1576. if (maxlvt > 2) { /* ERR is LVT#3. */
  1577. v = apic_read(APIC_LVTERR);
  1578. printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
  1579. }
  1580. v = apic_read(APIC_TMICT);
  1581. printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
  1582. v = apic_read(APIC_TMCCT);
  1583. printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
  1584. v = apic_read(APIC_TDCR);
  1585. printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
  1586. printk("\n");
  1587. }
  1588. __apicdebuginit(void) print_all_local_APICs(void)
  1589. {
  1590. int cpu;
  1591. preempt_disable();
  1592. for_each_online_cpu(cpu)
  1593. smp_call_function_single(cpu, print_local_APIC, NULL, 1);
  1594. preempt_enable();
  1595. }
  1596. __apicdebuginit(void) print_PIC(void)
  1597. {
  1598. unsigned int v;
  1599. unsigned long flags;
  1600. if (apic_verbosity == APIC_QUIET)
  1601. return;
  1602. printk(KERN_DEBUG "\nprinting PIC contents\n");
  1603. spin_lock_irqsave(&i8259A_lock, flags);
  1604. v = inb(0xa1) << 8 | inb(0x21);
  1605. printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
  1606. v = inb(0xa0) << 8 | inb(0x20);
  1607. printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
  1608. outb(0x0b,0xa0);
  1609. outb(0x0b,0x20);
  1610. v = inb(0xa0) << 8 | inb(0x20);
  1611. outb(0x0a,0xa0);
  1612. outb(0x0a,0x20);
  1613. spin_unlock_irqrestore(&i8259A_lock, flags);
  1614. printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
  1615. v = inb(0x4d1) << 8 | inb(0x4d0);
  1616. printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
  1617. }
  1618. __apicdebuginit(int) print_all_ICs(void)
  1619. {
  1620. print_PIC();
  1621. print_all_local_APICs();
  1622. print_IO_APIC();
  1623. return 0;
  1624. }
  1625. fs_initcall(print_all_ICs);
  1626. /* Where if anywhere is the i8259 connect in external int mode */
  1627. static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
  1628. void __init enable_IO_APIC(void)
  1629. {
  1630. union IO_APIC_reg_01 reg_01;
  1631. int i8259_apic, i8259_pin;
  1632. int apic;
  1633. unsigned long flags;
  1634. #ifdef CONFIG_X86_32
  1635. int i;
  1636. if (!pirqs_enabled)
  1637. for (i = 0; i < MAX_PIRQS; i++)
  1638. pirq_entries[i] = -1;
  1639. #endif
  1640. /*
  1641. * The number of IO-APIC IRQ registers (== #pins):
  1642. */
  1643. for (apic = 0; apic < nr_ioapics; apic++) {
  1644. spin_lock_irqsave(&ioapic_lock, flags);
  1645. reg_01.raw = io_apic_read(apic, 1);
  1646. spin_unlock_irqrestore(&ioapic_lock, flags);
  1647. nr_ioapic_registers[apic] = reg_01.bits.entries+1;
  1648. }
  1649. for(apic = 0; apic < nr_ioapics; apic++) {
  1650. int pin;
  1651. /* See if any of the pins is in ExtINT mode */
  1652. for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
  1653. struct IO_APIC_route_entry entry;
  1654. entry = ioapic_read_entry(apic, pin);
  1655. /* If the interrupt line is enabled and in ExtInt mode
  1656. * I have found the pin where the i8259 is connected.
  1657. */
  1658. if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
  1659. ioapic_i8259.apic = apic;
  1660. ioapic_i8259.pin = pin;
  1661. goto found_i8259;
  1662. }
  1663. }
  1664. }
  1665. found_i8259:
  1666. /* Look to see what if the MP table has reported the ExtINT */
  1667. /* If we could not find the appropriate pin by looking at the ioapic
  1668. * the i8259 probably is not connected the ioapic but give the
  1669. * mptable a chance anyway.
  1670. */
  1671. i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
  1672. i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
  1673. /* Trust the MP table if nothing is setup in the hardware */
  1674. if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
  1675. printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
  1676. ioapic_i8259.pin = i8259_pin;
  1677. ioapic_i8259.apic = i8259_apic;
  1678. }
  1679. /* Complain if the MP table and the hardware disagree */
  1680. if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
  1681. (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
  1682. {
  1683. printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
  1684. }
  1685. /*
  1686. * Do not trust the IO-APIC being empty at bootup
  1687. */
  1688. clear_IO_APIC();
  1689. }
  1690. /*
  1691. * Not an __init, needed by the reboot code
  1692. */
  1693. void disable_IO_APIC(void)
  1694. {
  1695. /*
  1696. * Clear the IO-APIC before rebooting:
  1697. */
  1698. clear_IO_APIC();
  1699. /*
  1700. * If the i8259 is routed through an IOAPIC
  1701. * Put that IOAPIC in virtual wire mode
  1702. * so legacy interrupts can be delivered.
  1703. */
  1704. if (ioapic_i8259.pin != -1) {
  1705. struct IO_APIC_route_entry entry;
  1706. memset(&entry, 0, sizeof(entry));
  1707. entry.mask = 0; /* Enabled */
  1708. entry.trigger = 0; /* Edge */
  1709. entry.irr = 0;
  1710. entry.polarity = 0; /* High */
  1711. entry.delivery_status = 0;
  1712. entry.dest_mode = 0; /* Physical */
  1713. entry.delivery_mode = dest_ExtINT; /* ExtInt */
  1714. entry.vector = 0;
  1715. entry.dest = read_apic_id();
  1716. /*
  1717. * Add it to the IO-APIC irq-routing table:
  1718. */
  1719. ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
  1720. }
  1721. disconnect_bsp_APIC(ioapic_i8259.pin != -1);
  1722. }
  1723. #ifdef CONFIG_X86_32
  1724. /*
  1725. * function to set the IO-APIC physical IDs based on the
  1726. * values stored in the MPC table.
  1727. *
  1728. * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
  1729. */
  1730. static void __init setup_ioapic_ids_from_mpc(void)
  1731. {
  1732. union IO_APIC_reg_00 reg_00;
  1733. physid_mask_t phys_id_present_map;
  1734. int apic;
  1735. int i;
  1736. unsigned char old_id;
  1737. unsigned long flags;
  1738. if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids())
  1739. return;
  1740. /*
  1741. * Don't check I/O APIC IDs for xAPIC systems. They have
  1742. * no meaning without the serial APIC bus.
  1743. */
  1744. if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
  1745. || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
  1746. return;
  1747. /*
  1748. * This is broken; anything with a real cpu count has to
  1749. * circumvent this idiocy regardless.
  1750. */
  1751. phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
  1752. /*
  1753. * Set the IOAPIC ID to the value stored in the MPC table.
  1754. */
  1755. for (apic = 0; apic < nr_ioapics; apic++) {
  1756. /* Read the register 0 value */
  1757. spin_lock_irqsave(&ioapic_lock, flags);
  1758. reg_00.raw = io_apic_read(apic, 0);
  1759. spin_unlock_irqrestore(&ioapic_lock, flags);
  1760. old_id = mp_ioapics[apic].mp_apicid;
  1761. if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) {
  1762. printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
  1763. apic, mp_ioapics[apic].mp_apicid);
  1764. printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
  1765. reg_00.bits.ID);
  1766. mp_ioapics[apic].mp_apicid = reg_00.bits.ID;
  1767. }
  1768. /*
  1769. * Sanity check, is the ID really free? Every APIC in a
  1770. * system must have a unique ID or we get lots of nice
  1771. * 'stuck on smp_invalidate_needed IPI wait' messages.
  1772. */
  1773. if (check_apicid_used(phys_id_present_map,
  1774. mp_ioapics[apic].mp_apicid)) {
  1775. printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
  1776. apic, mp_ioapics[apic].mp_apicid);
  1777. for (i = 0; i < get_physical_broadcast(); i++)
  1778. if (!physid_isset(i, phys_id_present_map))
  1779. break;
  1780. if (i >= get_physical_broadcast())
  1781. panic("Max APIC ID exceeded!\n");
  1782. printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
  1783. i);
  1784. physid_set(i, phys_id_present_map);
  1785. mp_ioapics[apic].mp_apicid = i;
  1786. } else {
  1787. physid_mask_t tmp;
  1788. tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid);
  1789. apic_printk(APIC_VERBOSE, "Setting %d in the "
  1790. "phys_id_present_map\n",
  1791. mp_ioapics[apic].mp_apicid);
  1792. physids_or(phys_id_present_map, phys_id_present_map, tmp);
  1793. }
  1794. /*
  1795. * We need to adjust the IRQ routing table
  1796. * if the ID changed.
  1797. */
  1798. if (old_id != mp_ioapics[apic].mp_apicid)
  1799. for (i = 0; i < mp_irq_entries; i++)
  1800. if (mp_irqs[i].mp_dstapic == old_id)
  1801. mp_irqs[i].mp_dstapic
  1802. = mp_ioapics[apic].mp_apicid;
  1803. /*
  1804. * Read the right value from the MPC table and
  1805. * write it into the ID register.
  1806. */
  1807. apic_printk(APIC_VERBOSE, KERN_INFO
  1808. "...changing IO-APIC physical APIC ID to %d ...",
  1809. mp_ioapics[apic].mp_apicid);
  1810. reg_00.bits.ID = mp_ioapics[apic].mp_apicid;
  1811. spin_lock_irqsave(&ioapic_lock, flags);
  1812. io_apic_write(apic, 0, reg_00.raw);
  1813. spin_unlock_irqrestore(&ioapic_lock, flags);
  1814. /*
  1815. * Sanity check
  1816. */
  1817. spin_lock_irqsave(&ioapic_lock, flags);
  1818. reg_00.raw = io_apic_read(apic, 0);
  1819. spin_unlock_irqrestore(&ioapic_lock, flags);
  1820. if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid)
  1821. printk("could not set ID!\n");
  1822. else
  1823. apic_printk(APIC_VERBOSE, " ok.\n");
  1824. }
  1825. }
  1826. #endif
  1827. int no_timer_check __initdata;
  1828. static int __init notimercheck(char *s)
  1829. {
  1830. no_timer_check = 1;
  1831. return 1;
  1832. }
  1833. __setup("no_timer_check", notimercheck);
  1834. /*
  1835. * There is a nasty bug in some older SMP boards, their mptable lies
  1836. * about the timer IRQ. We do the following to work around the situation:
  1837. *
  1838. * - timer IRQ defaults to IO-APIC IRQ
  1839. * - if this function detects that timer IRQs are defunct, then we fall
  1840. * back to ISA timer IRQs
  1841. */
  1842. static int __init timer_irq_works(void)
  1843. {
  1844. unsigned long t1 = jiffies;
  1845. unsigned long flags;
  1846. if (no_timer_check)
  1847. return 1;
  1848. local_save_flags(flags);
  1849. local_irq_enable();
  1850. /* Let ten ticks pass... */
  1851. mdelay((10 * 1000) / HZ);
  1852. local_irq_restore(flags);
  1853. /*
  1854. * Expect a few ticks at least, to be sure some possible
  1855. * glue logic does not lock up after one or two first
  1856. * ticks in a non-ExtINT mode. Also the local APIC
  1857. * might have cached one ExtINT interrupt. Finally, at
  1858. * least one tick may be lost due to delays.
  1859. */
  1860. /* jiffies wrap? */
  1861. if (time_after(jiffies, t1 + 4))
  1862. return 1;
  1863. return 0;
  1864. }
  1865. /*
  1866. * In the SMP+IOAPIC case it might happen that there are an unspecified
  1867. * number of pending IRQ events unhandled. These cases are very rare,
  1868. * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
  1869. * better to do it this way as thus we do not have to be aware of
  1870. * 'pending' interrupts in the IRQ path, except at this point.
  1871. */
  1872. /*
  1873. * Edge triggered needs to resend any interrupt
  1874. * that was delayed but this is now handled in the device
  1875. * independent code.
  1876. */
  1877. /*
  1878. * Starting up a edge-triggered IO-APIC interrupt is
  1879. * nasty - we need to make sure that we get the edge.
  1880. * If it is already asserted for some reason, we need
  1881. * return 1 to indicate that is was pending.
  1882. *
  1883. * This is not complete - we should be able to fake
  1884. * an edge even if it isn't on the 8259A...
  1885. */
  1886. static unsigned int startup_ioapic_irq(unsigned int irq)
  1887. {
  1888. int was_pending = 0;
  1889. unsigned long flags;
  1890. struct irq_cfg *cfg;
  1891. spin_lock_irqsave(&ioapic_lock, flags);
  1892. if (irq < NR_IRQS_LEGACY) {
  1893. disable_8259A_irq(irq);
  1894. if (i8259A_irq_pending(irq))
  1895. was_pending = 1;
  1896. }
  1897. cfg = irq_cfg(irq);
  1898. __unmask_IO_APIC_irq(cfg);
  1899. spin_unlock_irqrestore(&ioapic_lock, flags);
  1900. return was_pending;
  1901. }
  1902. #ifdef CONFIG_X86_64
  1903. static int ioapic_retrigger_irq(unsigned int irq)
  1904. {
  1905. struct irq_cfg *cfg = irq_cfg(irq);
  1906. unsigned long flags;
  1907. spin_lock_irqsave(&vector_lock, flags);
  1908. send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
  1909. spin_unlock_irqrestore(&vector_lock, flags);
  1910. return 1;
  1911. }
  1912. #else
  1913. static int ioapic_retrigger_irq(unsigned int irq)
  1914. {
  1915. send_IPI_self(irq_cfg(irq)->vector);
  1916. return 1;
  1917. }
  1918. #endif
  1919. /*
  1920. * Level and edge triggered IO-APIC interrupts need different handling,
  1921. * so we use two separate IRQ descriptors. Edge triggered IRQs can be
  1922. * handled with the level-triggered descriptor, but that one has slightly
  1923. * more overhead. Level-triggered interrupts cannot be handled with the
  1924. * edge-triggered handler, without risking IRQ storms and other ugly
  1925. * races.
  1926. */
  1927. #ifdef CONFIG_SMP
  1928. #ifdef CONFIG_INTR_REMAP
  1929. static void ir_irq_migration(struct work_struct *work);
  1930. static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
  1931. /*
  1932. * Migrate the IO-APIC irq in the presence of intr-remapping.
  1933. *
  1934. * For edge triggered, irq migration is a simple atomic update(of vector
  1935. * and cpu destination) of IRTE and flush the hardware cache.
  1936. *
  1937. * For level triggered, we need to modify the io-apic RTE aswell with the update
  1938. * vector information, along with modifying IRTE with vector and destination.
  1939. * So irq migration for level triggered is little bit more complex compared to
  1940. * edge triggered migration. But the good news is, we use the same algorithm
  1941. * for level triggered migration as we have today, only difference being,
  1942. * we now initiate the irq migration from process context instead of the
  1943. * interrupt context.
  1944. *
  1945. * In future, when we do a directed EOI (combined with cpu EOI broadcast
  1946. * suppression) to the IO-APIC, level triggered irq migration will also be
  1947. * as simple as edge triggered migration and we can do the irq migration
  1948. * with a simple atomic update to IO-APIC RTE.
  1949. */
  1950. static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask)
  1951. {
  1952. struct irq_cfg *cfg;
  1953. cpumask_t tmp, cleanup_mask;
  1954. struct irte irte;
  1955. int modify_ioapic_rte;
  1956. unsigned int dest;
  1957. unsigned long flags;
  1958. unsigned int irq;
  1959. cpus_and(tmp, mask, cpu_online_map);
  1960. if (cpus_empty(tmp))
  1961. return;
  1962. irq = desc->irq;
  1963. if (get_irte(irq, &irte))
  1964. return;
  1965. cfg = desc->chip_data;
  1966. if (assign_irq_vector(irq, cfg, mask))
  1967. return;
  1968. set_extra_move_desc(desc, mask);
  1969. cpus_and(tmp, cfg->domain, mask);
  1970. dest = cpu_mask_to_apicid(tmp);
  1971. modify_ioapic_rte = desc->status & IRQ_LEVEL;
  1972. if (modify_ioapic_rte) {
  1973. spin_lock_irqsave(&ioapic_lock, flags);
  1974. __target_IO_APIC_irq(irq, dest, cfg);
  1975. spin_unlock_irqrestore(&ioapic_lock, flags);
  1976. }
  1977. irte.vector = cfg->vector;
  1978. irte.dest_id = IRTE_DEST(dest);
  1979. /*
  1980. * Modified the IRTE and flushes the Interrupt entry cache.
  1981. */
  1982. modify_irte(irq, &irte);
  1983. if (cfg->move_in_progress) {
  1984. cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
  1985. cfg->move_cleanup_count = cpus_weight(cleanup_mask);
  1986. send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
  1987. cfg->move_in_progress = 0;
  1988. }
  1989. desc->affinity = mask;
  1990. }
  1991. static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
  1992. {
  1993. int ret = -1;
  1994. struct irq_cfg *cfg = desc->chip_data;
  1995. mask_IO_APIC_irq_desc(desc);
  1996. if (io_apic_level_ack_pending(cfg)) {
  1997. /*
  1998. * Interrupt in progress. Migrating irq now will change the
  1999. * vector information in the IO-APIC RTE and that will confuse
  2000. * the EOI broadcast performed by cpu.
  2001. * So, delay the irq migration to the next instance.
  2002. */
  2003. schedule_delayed_work(&ir_migration_work, 1);
  2004. goto unmask;
  2005. }
  2006. /* everthing is clear. we have right of way */
  2007. migrate_ioapic_irq_desc(desc, desc->pending_mask);
  2008. ret = 0;
  2009. desc->status &= ~IRQ_MOVE_PENDING;
  2010. cpus_clear(desc->pending_mask);
  2011. unmask:
  2012. unmask_IO_APIC_irq_desc(desc);
  2013. return ret;
  2014. }
  2015. static void ir_irq_migration(struct work_struct *work)
  2016. {
  2017. unsigned int irq;
  2018. struct irq_desc *desc;
  2019. for_each_irq_desc(irq, desc) {
  2020. if (!desc)
  2021. continue;
  2022. if (desc->status & IRQ_MOVE_PENDING) {
  2023. unsigned long flags;
  2024. spin_lock_irqsave(&desc->lock, flags);
  2025. if (!desc->chip->set_affinity ||
  2026. !(desc->status & IRQ_MOVE_PENDING)) {
  2027. desc->status &= ~IRQ_MOVE_PENDING;
  2028. spin_unlock_irqrestore(&desc->lock, flags);
  2029. continue;
  2030. }
  2031. desc->chip->set_affinity(irq, desc->pending_mask);
  2032. spin_unlock_irqrestore(&desc->lock, flags);
  2033. }
  2034. }
  2035. }
  2036. /*
  2037. * Migrates the IRQ destination in the process context.
  2038. */
  2039. static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc, cpumask_t mask)
  2040. {
  2041. if (desc->status & IRQ_LEVEL) {
  2042. desc->status |= IRQ_MOVE_PENDING;
  2043. desc->pending_mask = mask;
  2044. migrate_irq_remapped_level_desc(desc);
  2045. return;
  2046. }
  2047. migrate_ioapic_irq_desc(desc, mask);
  2048. }
  2049. static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
  2050. {
  2051. struct irq_desc *desc = irq_to_desc(irq);
  2052. set_ir_ioapic_affinity_irq_desc(desc, mask);
  2053. }
  2054. #endif
  2055. asmlinkage void smp_irq_move_cleanup_interrupt(void)
  2056. {
  2057. unsigned vector, me;
  2058. ack_APIC_irq();
  2059. exit_idle();
  2060. irq_enter();
  2061. me = smp_processor_id();
  2062. for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
  2063. unsigned int irq;
  2064. struct irq_desc *desc;
  2065. struct irq_cfg *cfg;
  2066. irq = __get_cpu_var(vector_irq)[vector];
  2067. if (irq == -1)
  2068. continue;
  2069. desc = irq_to_desc(irq);
  2070. if (!desc)
  2071. continue;
  2072. cfg = irq_cfg(irq);
  2073. spin_lock(&desc->lock);
  2074. if (!cfg->move_cleanup_count)
  2075. goto unlock;
  2076. if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
  2077. goto unlock;
  2078. __get_cpu_var(vector_irq)[vector] = -1;
  2079. cfg->move_cleanup_count--;
  2080. unlock:
  2081. spin_unlock(&desc->lock);
  2082. }
  2083. irq_exit();
  2084. }
  2085. static void irq_complete_move(struct irq_desc **descp)
  2086. {
  2087. struct irq_desc *desc = *descp;
  2088. struct irq_cfg *cfg = desc->chip_data;
  2089. unsigned vector, me;
  2090. if (likely(!cfg->move_in_progress)) {
  2091. #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
  2092. if (likely(!cfg->move_desc_pending))
  2093. return;
  2094. /* domain has not changed, but affinity did */
  2095. me = smp_processor_id();
  2096. if (cpu_isset(me, desc->affinity)) {
  2097. *descp = desc = move_irq_desc(desc, me);
  2098. /* get the new one */
  2099. cfg = desc->chip_data;
  2100. cfg->move_desc_pending = 0;
  2101. }
  2102. #endif
  2103. return;
  2104. }
  2105. vector = ~get_irq_regs()->orig_ax;
  2106. me = smp_processor_id();
  2107. if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
  2108. cpumask_t cleanup_mask;
  2109. #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
  2110. *descp = desc = move_irq_desc(desc, me);
  2111. /* get the new one */
  2112. cfg = desc->chip_data;
  2113. #endif
  2114. cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
  2115. cfg->move_cleanup_count = cpus_weight(cleanup_mask);
  2116. send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
  2117. cfg->move_in_progress = 0;
  2118. }
  2119. }
  2120. #else
  2121. static inline void irq_complete_move(struct irq_desc **descp) {}
  2122. #endif
  2123. #ifdef CONFIG_INTR_REMAP
  2124. static void ack_x2apic_level(unsigned int irq)
  2125. {
  2126. ack_x2APIC_irq();
  2127. }
  2128. static void ack_x2apic_edge(unsigned int irq)
  2129. {
  2130. ack_x2APIC_irq();
  2131. }
  2132. #endif
  2133. static void ack_apic_edge(unsigned int irq)
  2134. {
  2135. struct irq_desc *desc = irq_to_desc(irq);
  2136. irq_complete_move(&desc);
  2137. move_native_irq(irq);
  2138. ack_APIC_irq();
  2139. }
  2140. atomic_t irq_mis_count;
  2141. static void ack_apic_level(unsigned int irq)
  2142. {
  2143. struct irq_desc *desc = irq_to_desc(irq);
  2144. #ifdef CONFIG_X86_32
  2145. unsigned long v;
  2146. int i;
  2147. #endif
  2148. struct irq_cfg *cfg;
  2149. int do_unmask_irq = 0;
  2150. irq_complete_move(&desc);
  2151. #ifdef CONFIG_GENERIC_PENDING_IRQ
  2152. /* If we are moving the irq we need to mask it */
  2153. if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
  2154. do_unmask_irq = 1;
  2155. mask_IO_APIC_irq_desc(desc);
  2156. }
  2157. #endif
  2158. #ifdef CONFIG_X86_32
  2159. /*
  2160. * It appears there is an erratum which affects at least version 0x11
  2161. * of I/O APIC (that's the 82093AA and cores integrated into various
  2162. * chipsets). Under certain conditions a level-triggered interrupt is
  2163. * erroneously delivered as edge-triggered one but the respective IRR
  2164. * bit gets set nevertheless. As a result the I/O unit expects an EOI
  2165. * message but it will never arrive and further interrupts are blocked
  2166. * from the source. The exact reason is so far unknown, but the
  2167. * phenomenon was observed when two consecutive interrupt requests
  2168. * from a given source get delivered to the same CPU and the source is
  2169. * temporarily disabled in between.
  2170. *
  2171. * A workaround is to simulate an EOI message manually. We achieve it
  2172. * by setting the trigger mode to edge and then to level when the edge
  2173. * trigger mode gets detected in the TMR of a local APIC for a
  2174. * level-triggered interrupt. We mask the source for the time of the
  2175. * operation to prevent an edge-triggered interrupt escaping meanwhile.
  2176. * The idea is from Manfred Spraul. --macro
  2177. */
  2178. cfg = desc->chip_data;
  2179. i = cfg->vector;
  2180. v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
  2181. #endif
  2182. /*
  2183. * We must acknowledge the irq before we move it or the acknowledge will
  2184. * not propagate properly.
  2185. */
  2186. ack_APIC_irq();
  2187. /* Now we can move and renable the irq */
  2188. if (unlikely(do_unmask_irq)) {
  2189. /* Only migrate the irq if the ack has been received.
  2190. *
  2191. * On rare occasions the broadcast level triggered ack gets
  2192. * delayed going to ioapics, and if we reprogram the
  2193. * vector while Remote IRR is still set the irq will never
  2194. * fire again.
  2195. *
  2196. * To prevent this scenario we read the Remote IRR bit
  2197. * of the ioapic. This has two effects.
  2198. * - On any sane system the read of the ioapic will
  2199. * flush writes (and acks) going to the ioapic from
  2200. * this cpu.
  2201. * - We get to see if the ACK has actually been delivered.
  2202. *
  2203. * Based on failed experiments of reprogramming the
  2204. * ioapic entry from outside of irq context starting
  2205. * with masking the ioapic entry and then polling until
  2206. * Remote IRR was clear before reprogramming the
  2207. * ioapic I don't trust the Remote IRR bit to be
  2208. * completey accurate.
  2209. *
  2210. * However there appears to be no other way to plug
  2211. * this race, so if the Remote IRR bit is not
  2212. * accurate and is causing problems then it is a hardware bug
  2213. * and you can go talk to the chipset vendor about it.
  2214. */
  2215. cfg = desc->chip_data;
  2216. if (!io_apic_level_ack_pending(cfg))
  2217. move_masked_irq(irq);
  2218. unmask_IO_APIC_irq_desc(desc);
  2219. }
  2220. #ifdef CONFIG_X86_32
  2221. if (!(v & (1 << (i & 0x1f)))) {
  2222. atomic_inc(&irq_mis_count);
  2223. spin_lock(&ioapic_lock);
  2224. __mask_and_edge_IO_APIC_irq(cfg);
  2225. __unmask_and_level_IO_APIC_irq(cfg);
  2226. spin_unlock(&ioapic_lock);
  2227. }
  2228. #endif
  2229. }
  2230. static struct irq_chip ioapic_chip __read_mostly = {
  2231. .name = "IO-APIC",
  2232. .startup = startup_ioapic_irq,
  2233. .mask = mask_IO_APIC_irq,
  2234. .unmask = unmask_IO_APIC_irq,
  2235. .ack = ack_apic_edge,
  2236. .eoi = ack_apic_level,
  2237. #ifdef CONFIG_SMP
  2238. .set_affinity = set_ioapic_affinity_irq,
  2239. #endif
  2240. .retrigger = ioapic_retrigger_irq,
  2241. };
  2242. #ifdef CONFIG_INTR_REMAP
  2243. static struct irq_chip ir_ioapic_chip __read_mostly = {
  2244. .name = "IR-IO-APIC",
  2245. .startup = startup_ioapic_irq,
  2246. .mask = mask_IO_APIC_irq,
  2247. .unmask = unmask_IO_APIC_irq,
  2248. .ack = ack_x2apic_edge,
  2249. .eoi = ack_x2apic_level,
  2250. #ifdef CONFIG_SMP
  2251. .set_affinity = set_ir_ioapic_affinity_irq,
  2252. #endif
  2253. .retrigger = ioapic_retrigger_irq,
  2254. };
  2255. #endif
  2256. static inline void init_IO_APIC_traps(void)
  2257. {
  2258. int irq;
  2259. struct irq_desc *desc;
  2260. struct irq_cfg *cfg;
  2261. /*
  2262. * NOTE! The local APIC isn't very good at handling
  2263. * multiple interrupts at the same interrupt level.
  2264. * As the interrupt level is determined by taking the
  2265. * vector number and shifting that right by 4, we
  2266. * want to spread these out a bit so that they don't
  2267. * all fall in the same interrupt level.
  2268. *
  2269. * Also, we've got to be careful not to trash gate
  2270. * 0x80, because int 0x80 is hm, kind of importantish. ;)
  2271. */
  2272. for_each_irq_desc(irq, desc) {
  2273. if (!desc)
  2274. continue;
  2275. cfg = desc->chip_data;
  2276. if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
  2277. /*
  2278. * Hmm.. We don't have an entry for this,
  2279. * so default to an old-fashioned 8259
  2280. * interrupt if we can..
  2281. */
  2282. if (irq < NR_IRQS_LEGACY)
  2283. make_8259A_irq(irq);
  2284. else
  2285. /* Strange. Oh, well.. */
  2286. desc->chip = &no_irq_chip;
  2287. }
  2288. }
  2289. }
  2290. /*
  2291. * The local APIC irq-chip implementation:
  2292. */
  2293. static void mask_lapic_irq(unsigned int irq)
  2294. {
  2295. unsigned long v;
  2296. v = apic_read(APIC_LVT0);
  2297. apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
  2298. }
  2299. static void unmask_lapic_irq(unsigned int irq)
  2300. {
  2301. unsigned long v;
  2302. v = apic_read(APIC_LVT0);
  2303. apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
  2304. }
  2305. static void ack_lapic_irq(unsigned int irq)
  2306. {
  2307. ack_APIC_irq();
  2308. }
  2309. static struct irq_chip lapic_chip __read_mostly = {
  2310. .name = "local-APIC",
  2311. .mask = mask_lapic_irq,
  2312. .unmask = unmask_lapic_irq,
  2313. .ack = ack_lapic_irq,
  2314. };
  2315. static void lapic_register_intr(int irq, struct irq_desc *desc)
  2316. {
  2317. desc->status &= ~IRQ_LEVEL;
  2318. set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
  2319. "edge");
  2320. }
  2321. static void __init setup_nmi(void)
  2322. {
  2323. /*
  2324. * Dirty trick to enable the NMI watchdog ...
  2325. * We put the 8259A master into AEOI mode and
  2326. * unmask on all local APICs LVT0 as NMI.
  2327. *
  2328. * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
  2329. * is from Maciej W. Rozycki - so we do not have to EOI from
  2330. * the NMI handler or the timer interrupt.
  2331. */
  2332. apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
  2333. enable_NMI_through_LVT0();
  2334. apic_printk(APIC_VERBOSE, " done.\n");
  2335. }
  2336. /*
  2337. * This looks a bit hackish but it's about the only one way of sending
  2338. * a few INTA cycles to 8259As and any associated glue logic. ICR does
  2339. * not support the ExtINT mode, unfortunately. We need to send these
  2340. * cycles as some i82489DX-based boards have glue logic that keeps the
  2341. * 8259A interrupt line asserted until INTA. --macro
  2342. */
  2343. static inline void __init unlock_ExtINT_logic(void)
  2344. {
  2345. int apic, pin, i;
  2346. struct IO_APIC_route_entry entry0, entry1;
  2347. unsigned char save_control, save_freq_select;
  2348. pin = find_isa_irq_pin(8, mp_INT);
  2349. if (pin == -1) {
  2350. WARN_ON_ONCE(1);
  2351. return;
  2352. }
  2353. apic = find_isa_irq_apic(8, mp_INT);
  2354. if (apic == -1) {
  2355. WARN_ON_ONCE(1);
  2356. return;
  2357. }
  2358. entry0 = ioapic_read_entry(apic, pin);
  2359. clear_IO_APIC_pin(apic, pin);
  2360. memset(&entry1, 0, sizeof(entry1));
  2361. entry1.dest_mode = 0; /* physical delivery */
  2362. entry1.mask = 0; /* unmask IRQ now */
  2363. entry1.dest = hard_smp_processor_id();
  2364. entry1.delivery_mode = dest_ExtINT;
  2365. entry1.polarity = entry0.polarity;
  2366. entry1.trigger = 0;
  2367. entry1.vector = 0;
  2368. ioapic_write_entry(apic, pin, entry1);
  2369. save_control = CMOS_READ(RTC_CONTROL);
  2370. save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
  2371. CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
  2372. RTC_FREQ_SELECT);
  2373. CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
  2374. i = 100;
  2375. while (i-- > 0) {
  2376. mdelay(10);
  2377. if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
  2378. i -= 10;
  2379. }
  2380. CMOS_WRITE(save_control, RTC_CONTROL);
  2381. CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
  2382. clear_IO_APIC_pin(apic, pin);
  2383. ioapic_write_entry(apic, pin, entry0);
  2384. }
  2385. static int disable_timer_pin_1 __initdata;
  2386. /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
  2387. static int __init disable_timer_pin_setup(char *arg)
  2388. {
  2389. disable_timer_pin_1 = 1;
  2390. return 0;
  2391. }
  2392. early_param("disable_timer_pin_1", disable_timer_pin_setup);
  2393. int timer_through_8259 __initdata;
  2394. /*
  2395. * This code may look a bit paranoid, but it's supposed to cooperate with
  2396. * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
  2397. * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
  2398. * fanatically on his truly buggy board.
  2399. *
  2400. * FIXME: really need to revamp this for all platforms.
  2401. */
  2402. static inline void __init check_timer(void)
  2403. {
  2404. struct irq_desc *desc = irq_to_desc(0);
  2405. struct irq_cfg *cfg = desc->chip_data;
  2406. int cpu = boot_cpu_id;
  2407. int apic1, pin1, apic2, pin2;
  2408. unsigned long flags;
  2409. unsigned int ver;
  2410. int no_pin1 = 0;
  2411. local_irq_save(flags);
  2412. ver = apic_read(APIC_LVR);
  2413. ver = GET_APIC_VERSION(ver);
  2414. /*
  2415. * get/set the timer IRQ vector:
  2416. */
  2417. disable_8259A_irq(0);
  2418. assign_irq_vector(0, cfg, TARGET_CPUS);
  2419. /*
  2420. * As IRQ0 is to be enabled in the 8259A, the virtual
  2421. * wire has to be disabled in the local APIC. Also
  2422. * timer interrupts need to be acknowledged manually in
  2423. * the 8259A for the i82489DX when using the NMI
  2424. * watchdog as that APIC treats NMIs as level-triggered.
  2425. * The AEOI mode will finish them in the 8259A
  2426. * automatically.
  2427. */
  2428. apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
  2429. init_8259A(1);
  2430. #ifdef CONFIG_X86_32
  2431. timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
  2432. #endif
  2433. pin1 = find_isa_irq_pin(0, mp_INT);
  2434. apic1 = find_isa_irq_apic(0, mp_INT);
  2435. pin2 = ioapic_i8259.pin;
  2436. apic2 = ioapic_i8259.apic;
  2437. apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
  2438. "apic1=%d pin1=%d apic2=%d pin2=%d\n",
  2439. cfg->vector, apic1, pin1, apic2, pin2);
  2440. /*
  2441. * Some BIOS writers are clueless and report the ExtINTA
  2442. * I/O APIC input from the cascaded 8259A as the timer
  2443. * interrupt input. So just in case, if only one pin
  2444. * was found above, try it both directly and through the
  2445. * 8259A.
  2446. */
  2447. if (pin1 == -1) {
  2448. #ifdef CONFIG_INTR_REMAP
  2449. if (intr_remapping_enabled)
  2450. panic("BIOS bug: timer not connected to IO-APIC");
  2451. #endif
  2452. pin1 = pin2;
  2453. apic1 = apic2;
  2454. no_pin1 = 1;
  2455. } else if (pin2 == -1) {
  2456. pin2 = pin1;
  2457. apic2 = apic1;
  2458. }
  2459. if (pin1 != -1) {
  2460. /*
  2461. * Ok, does IRQ0 through the IOAPIC work?
  2462. */
  2463. if (no_pin1) {
  2464. add_pin_to_irq_cpu(cfg, cpu, apic1, pin1);
  2465. setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
  2466. }
  2467. unmask_IO_APIC_irq_desc(desc);
  2468. if (timer_irq_works()) {
  2469. if (nmi_watchdog == NMI_IO_APIC) {
  2470. setup_nmi();
  2471. enable_8259A_irq(0);
  2472. }
  2473. if (disable_timer_pin_1 > 0)
  2474. clear_IO_APIC_pin(0, pin1);
  2475. goto out;
  2476. }
  2477. #ifdef CONFIG_INTR_REMAP
  2478. if (intr_remapping_enabled)
  2479. panic("timer doesn't work through Interrupt-remapped IO-APIC");
  2480. #endif
  2481. clear_IO_APIC_pin(apic1, pin1);
  2482. if (!no_pin1)
  2483. apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
  2484. "8254 timer not connected to IO-APIC\n");
  2485. apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
  2486. "(IRQ0) through the 8259A ...\n");
  2487. apic_printk(APIC_QUIET, KERN_INFO
  2488. "..... (found apic %d pin %d) ...\n", apic2, pin2);
  2489. /*
  2490. * legacy devices should be connected to IO APIC #0
  2491. */
  2492. replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2);
  2493. setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
  2494. unmask_IO_APIC_irq_desc(desc);
  2495. enable_8259A_irq(0);
  2496. if (timer_irq_works()) {
  2497. apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
  2498. timer_through_8259 = 1;
  2499. if (nmi_watchdog == NMI_IO_APIC) {
  2500. disable_8259A_irq(0);
  2501. setup_nmi();
  2502. enable_8259A_irq(0);
  2503. }
  2504. goto out;
  2505. }
  2506. /*
  2507. * Cleanup, just in case ...
  2508. */
  2509. disable_8259A_irq(0);
  2510. clear_IO_APIC_pin(apic2, pin2);
  2511. apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
  2512. }
  2513. if (nmi_watchdog == NMI_IO_APIC) {
  2514. apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
  2515. "through the IO-APIC - disabling NMI Watchdog!\n");
  2516. nmi_watchdog = NMI_NONE;
  2517. }
  2518. #ifdef CONFIG_X86_32
  2519. timer_ack = 0;
  2520. #endif
  2521. apic_printk(APIC_QUIET, KERN_INFO
  2522. "...trying to set up timer as Virtual Wire IRQ...\n");
  2523. lapic_register_intr(0, desc);
  2524. apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
  2525. enable_8259A_irq(0);
  2526. if (timer_irq_works()) {
  2527. apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
  2528. goto out;
  2529. }
  2530. disable_8259A_irq(0);
  2531. apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
  2532. apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
  2533. apic_printk(APIC_QUIET, KERN_INFO
  2534. "...trying to set up timer as ExtINT IRQ...\n");
  2535. init_8259A(0);
  2536. make_8259A_irq(0);
  2537. apic_write(APIC_LVT0, APIC_DM_EXTINT);
  2538. unlock_ExtINT_logic();
  2539. if (timer_irq_works()) {
  2540. apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
  2541. goto out;
  2542. }
  2543. apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
  2544. panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
  2545. "report. Then try booting with the 'noapic' option.\n");
  2546. out:
  2547. local_irq_restore(flags);
  2548. }
  2549. /*
  2550. * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
  2551. * to devices. However there may be an I/O APIC pin available for
  2552. * this interrupt regardless. The pin may be left unconnected, but
  2553. * typically it will be reused as an ExtINT cascade interrupt for
  2554. * the master 8259A. In the MPS case such a pin will normally be
  2555. * reported as an ExtINT interrupt in the MP table. With ACPI
  2556. * there is no provision for ExtINT interrupts, and in the absence
  2557. * of an override it would be treated as an ordinary ISA I/O APIC
  2558. * interrupt, that is edge-triggered and unmasked by default. We
  2559. * used to do this, but it caused problems on some systems because
  2560. * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
  2561. * the same ExtINT cascade interrupt to drive the local APIC of the
  2562. * bootstrap processor. Therefore we refrain from routing IRQ2 to
  2563. * the I/O APIC in all cases now. No actual device should request
  2564. * it anyway. --macro
  2565. */
  2566. #define PIC_IRQS (1 << PIC_CASCADE_IR)
  2567. void __init setup_IO_APIC(void)
  2568. {
  2569. #ifdef CONFIG_X86_32
  2570. enable_IO_APIC();
  2571. #else
  2572. /*
  2573. * calling enable_IO_APIC() is moved to setup_local_APIC for BP
  2574. */
  2575. #endif
  2576. io_apic_irqs = ~PIC_IRQS;
  2577. apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
  2578. /*
  2579. * Set up IO-APIC IRQ routing.
  2580. */
  2581. #ifdef CONFIG_X86_32
  2582. if (!acpi_ioapic)
  2583. setup_ioapic_ids_from_mpc();
  2584. #endif
  2585. sync_Arb_IDs();
  2586. setup_IO_APIC_irqs();
  2587. init_IO_APIC_traps();
  2588. check_timer();
  2589. }
  2590. /*
  2591. * Called after all the initialization is done. If we didnt find any
  2592. * APIC bugs then we can allow the modify fast path
  2593. */
  2594. static int __init io_apic_bug_finalize(void)
  2595. {
  2596. if (sis_apic_bug == -1)
  2597. sis_apic_bug = 0;
  2598. return 0;
  2599. }
  2600. late_initcall(io_apic_bug_finalize);
  2601. struct sysfs_ioapic_data {
  2602. struct sys_device dev;
  2603. struct IO_APIC_route_entry entry[0];
  2604. };
  2605. static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
  2606. static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
  2607. {
  2608. struct IO_APIC_route_entry *entry;
  2609. struct sysfs_ioapic_data *data;
  2610. int i;
  2611. data = container_of(dev, struct sysfs_ioapic_data, dev);
  2612. entry = data->entry;
  2613. for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
  2614. *entry = ioapic_read_entry(dev->id, i);
  2615. return 0;
  2616. }
  2617. static int ioapic_resume(struct sys_device *dev)
  2618. {
  2619. struct IO_APIC_route_entry *entry;
  2620. struct sysfs_ioapic_data *data;
  2621. unsigned long flags;
  2622. union IO_APIC_reg_00 reg_00;
  2623. int i;
  2624. data = container_of(dev, struct sysfs_ioapic_data, dev);
  2625. entry = data->entry;
  2626. spin_lock_irqsave(&ioapic_lock, flags);
  2627. reg_00.raw = io_apic_read(dev->id, 0);
  2628. if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
  2629. reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
  2630. io_apic_write(dev->id, 0, reg_00.raw);
  2631. }
  2632. spin_unlock_irqrestore(&ioapic_lock, flags);
  2633. for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
  2634. ioapic_write_entry(dev->id, i, entry[i]);
  2635. return 0;
  2636. }
  2637. static struct sysdev_class ioapic_sysdev_class = {
  2638. .name = "ioapic",
  2639. .suspend = ioapic_suspend,
  2640. .resume = ioapic_resume,
  2641. };
  2642. static int __init ioapic_init_sysfs(void)
  2643. {
  2644. struct sys_device * dev;
  2645. int i, size, error;
  2646. error = sysdev_class_register(&ioapic_sysdev_class);
  2647. if (error)
  2648. return error;
  2649. for (i = 0; i < nr_ioapics; i++ ) {
  2650. size = sizeof(struct sys_device) + nr_ioapic_registers[i]
  2651. * sizeof(struct IO_APIC_route_entry);
  2652. mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
  2653. if (!mp_ioapic_data[i]) {
  2654. printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
  2655. continue;
  2656. }
  2657. dev = &mp_ioapic_data[i]->dev;
  2658. dev->id = i;
  2659. dev->cls = &ioapic_sysdev_class;
  2660. error = sysdev_register(dev);
  2661. if (error) {
  2662. kfree(mp_ioapic_data[i]);
  2663. mp_ioapic_data[i] = NULL;
  2664. printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
  2665. continue;
  2666. }
  2667. }
  2668. return 0;
  2669. }
  2670. device_initcall(ioapic_init_sysfs);
  2671. /*
  2672. * Dynamic irq allocate and deallocation
  2673. */
  2674. unsigned int create_irq_nr(unsigned int irq_want)
  2675. {
  2676. /* Allocate an unused irq */
  2677. unsigned int irq;
  2678. unsigned int new;
  2679. unsigned long flags;
  2680. struct irq_cfg *cfg_new = NULL;
  2681. int cpu = boot_cpu_id;
  2682. struct irq_desc *desc_new = NULL;
  2683. irq = 0;
  2684. spin_lock_irqsave(&vector_lock, flags);
  2685. for (new = irq_want; new < NR_IRQS; new++) {
  2686. if (platform_legacy_irq(new))
  2687. continue;
  2688. desc_new = irq_to_desc_alloc_cpu(new, cpu);
  2689. if (!desc_new) {
  2690. printk(KERN_INFO "can not get irq_desc for %d\n", new);
  2691. continue;
  2692. }
  2693. cfg_new = desc_new->chip_data;
  2694. if (cfg_new->vector != 0)
  2695. continue;
  2696. if (__assign_irq_vector(new, cfg_new, TARGET_CPUS) == 0)
  2697. irq = new;
  2698. break;
  2699. }
  2700. spin_unlock_irqrestore(&vector_lock, flags);
  2701. if (irq > 0) {
  2702. dynamic_irq_init(irq);
  2703. /* restore it, in case dynamic_irq_init clear it */
  2704. if (desc_new)
  2705. desc_new->chip_data = cfg_new;
  2706. }
  2707. return irq;
  2708. }
  2709. static int nr_irqs_gsi = NR_IRQS_LEGACY;
  2710. int create_irq(void)
  2711. {
  2712. unsigned int irq_want;
  2713. int irq;
  2714. irq_want = nr_irqs_gsi;
  2715. irq = create_irq_nr(irq_want);
  2716. if (irq == 0)
  2717. irq = -1;
  2718. return irq;
  2719. }
  2720. void destroy_irq(unsigned int irq)
  2721. {
  2722. unsigned long flags;
  2723. struct irq_cfg *cfg;
  2724. struct irq_desc *desc;
  2725. /* store it, in case dynamic_irq_cleanup clear it */
  2726. desc = irq_to_desc(irq);
  2727. cfg = desc->chip_data;
  2728. dynamic_irq_cleanup(irq);
  2729. /* connect back irq_cfg */
  2730. if (desc)
  2731. desc->chip_data = cfg;
  2732. #ifdef CONFIG_INTR_REMAP
  2733. free_irte(irq);
  2734. #endif
  2735. spin_lock_irqsave(&vector_lock, flags);
  2736. __clear_irq_vector(irq, cfg);
  2737. spin_unlock_irqrestore(&vector_lock, flags);
  2738. }
  2739. /*
  2740. * MSI message composition
  2741. */
  2742. #ifdef CONFIG_PCI_MSI
  2743. static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
  2744. {
  2745. struct irq_cfg *cfg;
  2746. int err;
  2747. unsigned dest;
  2748. cpumask_t tmp;
  2749. cfg = irq_cfg(irq);
  2750. tmp = TARGET_CPUS;
  2751. err = assign_irq_vector(irq, cfg, tmp);
  2752. if (err)
  2753. return err;
  2754. cpus_and(tmp, cfg->domain, tmp);
  2755. dest = cpu_mask_to_apicid(tmp);
  2756. #ifdef CONFIG_INTR_REMAP
  2757. if (irq_remapped(irq)) {
  2758. struct irte irte;
  2759. int ir_index;
  2760. u16 sub_handle;
  2761. ir_index = map_irq_to_irte_handle(irq, &sub_handle);
  2762. BUG_ON(ir_index == -1);
  2763. memset (&irte, 0, sizeof(irte));
  2764. irte.present = 1;
  2765. irte.dst_mode = INT_DEST_MODE;
  2766. irte.trigger_mode = 0; /* edge */
  2767. irte.dlvry_mode = INT_DELIVERY_MODE;
  2768. irte.vector = cfg->vector;
  2769. irte.dest_id = IRTE_DEST(dest);
  2770. modify_irte(irq, &irte);
  2771. msg->address_hi = MSI_ADDR_BASE_HI;
  2772. msg->data = sub_handle;
  2773. msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
  2774. MSI_ADDR_IR_SHV |
  2775. MSI_ADDR_IR_INDEX1(ir_index) |
  2776. MSI_ADDR_IR_INDEX2(ir_index);
  2777. } else
  2778. #endif
  2779. {
  2780. msg->address_hi = MSI_ADDR_BASE_HI;
  2781. msg->address_lo =
  2782. MSI_ADDR_BASE_LO |
  2783. ((INT_DEST_MODE == 0) ?
  2784. MSI_ADDR_DEST_MODE_PHYSICAL:
  2785. MSI_ADDR_DEST_MODE_LOGICAL) |
  2786. ((INT_DELIVERY_MODE != dest_LowestPrio) ?
  2787. MSI_ADDR_REDIRECTION_CPU:
  2788. MSI_ADDR_REDIRECTION_LOWPRI) |
  2789. MSI_ADDR_DEST_ID(dest);
  2790. msg->data =
  2791. MSI_DATA_TRIGGER_EDGE |
  2792. MSI_DATA_LEVEL_ASSERT |
  2793. ((INT_DELIVERY_MODE != dest_LowestPrio) ?
  2794. MSI_DATA_DELIVERY_FIXED:
  2795. MSI_DATA_DELIVERY_LOWPRI) |
  2796. MSI_DATA_VECTOR(cfg->vector);
  2797. }
  2798. return err;
  2799. }
  2800. #ifdef CONFIG_SMP
  2801. static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
  2802. {
  2803. struct irq_desc *desc = irq_to_desc(irq);
  2804. struct irq_cfg *cfg;
  2805. struct msi_msg msg;
  2806. unsigned int dest;
  2807. cpumask_t tmp;
  2808. cpus_and(tmp, mask, cpu_online_map);
  2809. if (cpus_empty(tmp))
  2810. return;
  2811. cfg = desc->chip_data;
  2812. if (assign_irq_vector(irq, cfg, mask))
  2813. return;
  2814. set_extra_move_desc(desc, mask);
  2815. cpus_and(tmp, cfg->domain, mask);
  2816. dest = cpu_mask_to_apicid(tmp);
  2817. read_msi_msg_desc(desc, &msg);
  2818. msg.data &= ~MSI_DATA_VECTOR_MASK;
  2819. msg.data |= MSI_DATA_VECTOR(cfg->vector);
  2820. msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
  2821. msg.address_lo |= MSI_ADDR_DEST_ID(dest);
  2822. write_msi_msg_desc(desc, &msg);
  2823. desc->affinity = mask;
  2824. }
  2825. #ifdef CONFIG_INTR_REMAP
  2826. /*
  2827. * Migrate the MSI irq to another cpumask. This migration is
  2828. * done in the process context using interrupt-remapping hardware.
  2829. */
  2830. static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
  2831. {
  2832. struct irq_desc *desc = irq_to_desc(irq);
  2833. struct irq_cfg *cfg;
  2834. unsigned int dest;
  2835. cpumask_t tmp, cleanup_mask;
  2836. struct irte irte;
  2837. cpus_and(tmp, mask, cpu_online_map);
  2838. if (cpus_empty(tmp))
  2839. return;
  2840. if (get_irte(irq, &irte))
  2841. return;
  2842. cfg = desc->chip_data;
  2843. if (assign_irq_vector(irq, cfg, mask))
  2844. return;
  2845. set_extra_move_desc(desc, mask);
  2846. cpus_and(tmp, cfg->domain, mask);
  2847. dest = cpu_mask_to_apicid(tmp);
  2848. irte.vector = cfg->vector;
  2849. irte.dest_id = IRTE_DEST(dest);
  2850. /*
  2851. * atomically update the IRTE with the new destination and vector.
  2852. */
  2853. modify_irte(irq, &irte);
  2854. /*
  2855. * After this point, all the interrupts will start arriving
  2856. * at the new destination. So, time to cleanup the previous
  2857. * vector allocation.
  2858. */
  2859. if (cfg->move_in_progress) {
  2860. cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
  2861. cfg->move_cleanup_count = cpus_weight(cleanup_mask);
  2862. send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
  2863. cfg->move_in_progress = 0;
  2864. }
  2865. desc->affinity = mask;
  2866. }
  2867. #endif
  2868. #endif /* CONFIG_SMP */
  2869. /*
  2870. * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
  2871. * which implement the MSI or MSI-X Capability Structure.
  2872. */
  2873. static struct irq_chip msi_chip = {
  2874. .name = "PCI-MSI",
  2875. .unmask = unmask_msi_irq,
  2876. .mask = mask_msi_irq,
  2877. .ack = ack_apic_edge,
  2878. #ifdef CONFIG_SMP
  2879. .set_affinity = set_msi_irq_affinity,
  2880. #endif
  2881. .retrigger = ioapic_retrigger_irq,
  2882. };
  2883. #ifdef CONFIG_INTR_REMAP
  2884. static struct irq_chip msi_ir_chip = {
  2885. .name = "IR-PCI-MSI",
  2886. .unmask = unmask_msi_irq,
  2887. .mask = mask_msi_irq,
  2888. .ack = ack_x2apic_edge,
  2889. #ifdef CONFIG_SMP
  2890. .set_affinity = ir_set_msi_irq_affinity,
  2891. #endif
  2892. .retrigger = ioapic_retrigger_irq,
  2893. };
  2894. /*
  2895. * Map the PCI dev to the corresponding remapping hardware unit
  2896. * and allocate 'nvec' consecutive interrupt-remapping table entries
  2897. * in it.
  2898. */
  2899. static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
  2900. {
  2901. struct intel_iommu *iommu;
  2902. int index;
  2903. iommu = map_dev_to_ir(dev);
  2904. if (!iommu) {
  2905. printk(KERN_ERR
  2906. "Unable to map PCI %s to iommu\n", pci_name(dev));
  2907. return -ENOENT;
  2908. }
  2909. index = alloc_irte(iommu, irq, nvec);
  2910. if (index < 0) {
  2911. printk(KERN_ERR
  2912. "Unable to allocate %d IRTE for PCI %s\n", nvec,
  2913. pci_name(dev));
  2914. return -ENOSPC;
  2915. }
  2916. return index;
  2917. }
  2918. #endif
  2919. static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
  2920. {
  2921. int ret;
  2922. struct msi_msg msg;
  2923. ret = msi_compose_msg(dev, irq, &msg);
  2924. if (ret < 0)
  2925. return ret;
  2926. set_irq_msi(irq, msidesc);
  2927. write_msi_msg(irq, &msg);
  2928. #ifdef CONFIG_INTR_REMAP
  2929. if (irq_remapped(irq)) {
  2930. struct irq_desc *desc = irq_to_desc(irq);
  2931. /*
  2932. * irq migration in process context
  2933. */
  2934. desc->status |= IRQ_MOVE_PCNTXT;
  2935. set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
  2936. } else
  2937. #endif
  2938. set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
  2939. dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
  2940. return 0;
  2941. }
  2942. int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc)
  2943. {
  2944. unsigned int irq;
  2945. int ret;
  2946. unsigned int irq_want;
  2947. irq_want = nr_irqs_gsi;
  2948. irq = create_irq_nr(irq_want);
  2949. if (irq == 0)
  2950. return -1;
  2951. #ifdef CONFIG_INTR_REMAP
  2952. if (!intr_remapping_enabled)
  2953. goto no_ir;
  2954. ret = msi_alloc_irte(dev, irq, 1);
  2955. if (ret < 0)
  2956. goto error;
  2957. no_ir:
  2958. #endif
  2959. ret = setup_msi_irq(dev, msidesc, irq);
  2960. if (ret < 0) {
  2961. destroy_irq(irq);
  2962. return ret;
  2963. }
  2964. return 0;
  2965. #ifdef CONFIG_INTR_REMAP
  2966. error:
  2967. destroy_irq(irq);
  2968. return ret;
  2969. #endif
  2970. }
  2971. int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
  2972. {
  2973. unsigned int irq;
  2974. int ret, sub_handle;
  2975. struct msi_desc *msidesc;
  2976. unsigned int irq_want;
  2977. #ifdef CONFIG_INTR_REMAP
  2978. struct intel_iommu *iommu = 0;
  2979. int index = 0;
  2980. #endif
  2981. irq_want = nr_irqs_gsi;
  2982. sub_handle = 0;
  2983. list_for_each_entry(msidesc, &dev->msi_list, list) {
  2984. irq = create_irq_nr(irq_want);
  2985. irq_want++;
  2986. if (irq == 0)
  2987. return -1;
  2988. #ifdef CONFIG_INTR_REMAP
  2989. if (!intr_remapping_enabled)
  2990. goto no_ir;
  2991. if (!sub_handle) {
  2992. /*
  2993. * allocate the consecutive block of IRTE's
  2994. * for 'nvec'
  2995. */
  2996. index = msi_alloc_irte(dev, irq, nvec);
  2997. if (index < 0) {
  2998. ret = index;
  2999. goto error;
  3000. }
  3001. } else {
  3002. iommu = map_dev_to_ir(dev);
  3003. if (!iommu) {
  3004. ret = -ENOENT;
  3005. goto error;
  3006. }
  3007. /*
  3008. * setup the mapping between the irq and the IRTE
  3009. * base index, the sub_handle pointing to the
  3010. * appropriate interrupt remap table entry.
  3011. */
  3012. set_irte_irq(irq, iommu, index, sub_handle);
  3013. }
  3014. no_ir:
  3015. #endif
  3016. ret = setup_msi_irq(dev, msidesc, irq);
  3017. if (ret < 0)
  3018. goto error;
  3019. sub_handle++;
  3020. }
  3021. return 0;
  3022. error:
  3023. destroy_irq(irq);
  3024. return ret;
  3025. }
  3026. void arch_teardown_msi_irq(unsigned int irq)
  3027. {
  3028. destroy_irq(irq);
  3029. }
  3030. #ifdef CONFIG_DMAR
  3031. #ifdef CONFIG_SMP
  3032. static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
  3033. {
  3034. struct irq_desc *desc = irq_to_desc(irq);
  3035. struct irq_cfg *cfg;
  3036. struct msi_msg msg;
  3037. unsigned int dest;
  3038. cpumask_t tmp;
  3039. cpus_and(tmp, mask, cpu_online_map);
  3040. if (cpus_empty(tmp))
  3041. return;
  3042. cfg = desc->chip_data;
  3043. if (assign_irq_vector(irq, cfg, mask))
  3044. return;
  3045. set_extra_move_desc(desc, mask);
  3046. cpus_and(tmp, cfg->domain, mask);
  3047. dest = cpu_mask_to_apicid(tmp);
  3048. dmar_msi_read(irq, &msg);
  3049. msg.data &= ~MSI_DATA_VECTOR_MASK;
  3050. msg.data |= MSI_DATA_VECTOR(cfg->vector);
  3051. msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
  3052. msg.address_lo |= MSI_ADDR_DEST_ID(dest);
  3053. dmar_msi_write(irq, &msg);
  3054. desc->affinity = mask;
  3055. }
  3056. #endif /* CONFIG_SMP */
  3057. struct irq_chip dmar_msi_type = {
  3058. .name = "DMAR_MSI",
  3059. .unmask = dmar_msi_unmask,
  3060. .mask = dmar_msi_mask,
  3061. .ack = ack_apic_edge,
  3062. #ifdef CONFIG_SMP
  3063. .set_affinity = dmar_msi_set_affinity,
  3064. #endif
  3065. .retrigger = ioapic_retrigger_irq,
  3066. };
  3067. int arch_setup_dmar_msi(unsigned int irq)
  3068. {
  3069. int ret;
  3070. struct msi_msg msg;
  3071. ret = msi_compose_msg(NULL, irq, &msg);
  3072. if (ret < 0)
  3073. return ret;
  3074. dmar_msi_write(irq, &msg);
  3075. set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
  3076. "edge");
  3077. return 0;
  3078. }
  3079. #endif
  3080. #ifdef CONFIG_HPET_TIMER
  3081. #ifdef CONFIG_SMP
  3082. static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
  3083. {
  3084. struct irq_desc *desc = irq_to_desc(irq);
  3085. struct irq_cfg *cfg;
  3086. struct msi_msg msg;
  3087. unsigned int dest;
  3088. cpumask_t tmp;
  3089. cpus_and(tmp, mask, cpu_online_map);
  3090. if (cpus_empty(tmp))
  3091. return;
  3092. cfg = desc->chip_data;
  3093. if (assign_irq_vector(irq, cfg, mask))
  3094. return;
  3095. set_extra_move_desc(desc, mask);
  3096. cpus_and(tmp, cfg->domain, mask);
  3097. dest = cpu_mask_to_apicid(tmp);
  3098. hpet_msi_read(irq, &msg);
  3099. msg.data &= ~MSI_DATA_VECTOR_MASK;
  3100. msg.data |= MSI_DATA_VECTOR(cfg->vector);
  3101. msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
  3102. msg.address_lo |= MSI_ADDR_DEST_ID(dest);
  3103. hpet_msi_write(irq, &msg);
  3104. desc->affinity = mask;
  3105. }
  3106. #endif /* CONFIG_SMP */
  3107. struct irq_chip hpet_msi_type = {
  3108. .name = "HPET_MSI",
  3109. .unmask = hpet_msi_unmask,
  3110. .mask = hpet_msi_mask,
  3111. .ack = ack_apic_edge,
  3112. #ifdef CONFIG_SMP
  3113. .set_affinity = hpet_msi_set_affinity,
  3114. #endif
  3115. .retrigger = ioapic_retrigger_irq,
  3116. };
  3117. int arch_setup_hpet_msi(unsigned int irq)
  3118. {
  3119. int ret;
  3120. struct msi_msg msg;
  3121. ret = msi_compose_msg(NULL, irq, &msg);
  3122. if (ret < 0)
  3123. return ret;
  3124. hpet_msi_write(irq, &msg);
  3125. set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq,
  3126. "edge");
  3127. return 0;
  3128. }
  3129. #endif
  3130. #endif /* CONFIG_PCI_MSI */
  3131. /*
  3132. * Hypertransport interrupt support
  3133. */
  3134. #ifdef CONFIG_HT_IRQ
  3135. #ifdef CONFIG_SMP
  3136. static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
  3137. {
  3138. struct ht_irq_msg msg;
  3139. fetch_ht_irq_msg(irq, &msg);
  3140. msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
  3141. msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
  3142. msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
  3143. msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
  3144. write_ht_irq_msg(irq, &msg);
  3145. }
  3146. static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
  3147. {
  3148. struct irq_desc *desc = irq_to_desc(irq);
  3149. struct irq_cfg *cfg;
  3150. unsigned int dest;
  3151. cpumask_t tmp;
  3152. cpus_and(tmp, mask, cpu_online_map);
  3153. if (cpus_empty(tmp))
  3154. return;
  3155. cfg = desc->chip_data;
  3156. if (assign_irq_vector(irq, cfg, mask))
  3157. return;
  3158. set_extra_move_desc(desc, mask);
  3159. cpus_and(tmp, cfg->domain, mask);
  3160. dest = cpu_mask_to_apicid(tmp);
  3161. target_ht_irq(irq, dest, cfg->vector);
  3162. desc->affinity = mask;
  3163. }
  3164. #endif
  3165. static struct irq_chip ht_irq_chip = {
  3166. .name = "PCI-HT",
  3167. .mask = mask_ht_irq,
  3168. .unmask = unmask_ht_irq,
  3169. .ack = ack_apic_edge,
  3170. #ifdef CONFIG_SMP
  3171. .set_affinity = set_ht_irq_affinity,
  3172. #endif
  3173. .retrigger = ioapic_retrigger_irq,
  3174. };
  3175. int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
  3176. {
  3177. struct irq_cfg *cfg;
  3178. int err;
  3179. cpumask_t tmp;
  3180. cfg = irq_cfg(irq);
  3181. tmp = TARGET_CPUS;
  3182. err = assign_irq_vector(irq, cfg, tmp);
  3183. if (!err) {
  3184. struct ht_irq_msg msg;
  3185. unsigned dest;
  3186. cpus_and(tmp, cfg->domain, tmp);
  3187. dest = cpu_mask_to_apicid(tmp);
  3188. msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
  3189. msg.address_lo =
  3190. HT_IRQ_LOW_BASE |
  3191. HT_IRQ_LOW_DEST_ID(dest) |
  3192. HT_IRQ_LOW_VECTOR(cfg->vector) |
  3193. ((INT_DEST_MODE == 0) ?
  3194. HT_IRQ_LOW_DM_PHYSICAL :
  3195. HT_IRQ_LOW_DM_LOGICAL) |
  3196. HT_IRQ_LOW_RQEOI_EDGE |
  3197. ((INT_DELIVERY_MODE != dest_LowestPrio) ?
  3198. HT_IRQ_LOW_MT_FIXED :
  3199. HT_IRQ_LOW_MT_ARBITRATED) |
  3200. HT_IRQ_LOW_IRQ_MASKED;
  3201. write_ht_irq_msg(irq, &msg);
  3202. set_irq_chip_and_handler_name(irq, &ht_irq_chip,
  3203. handle_edge_irq, "edge");
  3204. dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
  3205. }
  3206. return err;
  3207. }
  3208. #endif /* CONFIG_HT_IRQ */
  3209. #ifdef CONFIG_X86_64
  3210. /*
  3211. * Re-target the irq to the specified CPU and enable the specified MMR located
  3212. * on the specified blade to allow the sending of MSIs to the specified CPU.
  3213. */
  3214. int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
  3215. unsigned long mmr_offset)
  3216. {
  3217. const cpumask_t *eligible_cpu = get_cpu_mask(cpu);
  3218. struct irq_cfg *cfg;
  3219. int mmr_pnode;
  3220. unsigned long mmr_value;
  3221. struct uv_IO_APIC_route_entry *entry;
  3222. unsigned long flags;
  3223. int err;
  3224. cfg = irq_cfg(irq);
  3225. err = assign_irq_vector(irq, cfg, *eligible_cpu);
  3226. if (err != 0)
  3227. return err;
  3228. spin_lock_irqsave(&vector_lock, flags);
  3229. set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
  3230. irq_name);
  3231. spin_unlock_irqrestore(&vector_lock, flags);
  3232. mmr_value = 0;
  3233. entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
  3234. BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
  3235. entry->vector = cfg->vector;
  3236. entry->delivery_mode = INT_DELIVERY_MODE;
  3237. entry->dest_mode = INT_DEST_MODE;
  3238. entry->polarity = 0;
  3239. entry->trigger = 0;
  3240. entry->mask = 0;
  3241. entry->dest = cpu_mask_to_apicid(*eligible_cpu);
  3242. mmr_pnode = uv_blade_to_pnode(mmr_blade);
  3243. uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
  3244. return irq;
  3245. }
  3246. /*
  3247. * Disable the specified MMR located on the specified blade so that MSIs are
  3248. * longer allowed to be sent.
  3249. */
  3250. void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset)
  3251. {
  3252. unsigned long mmr_value;
  3253. struct uv_IO_APIC_route_entry *entry;
  3254. int mmr_pnode;
  3255. mmr_value = 0;
  3256. entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
  3257. BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
  3258. entry->mask = 1;
  3259. mmr_pnode = uv_blade_to_pnode(mmr_blade);
  3260. uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
  3261. }
  3262. #endif /* CONFIG_X86_64 */
  3263. int __init io_apic_get_redir_entries (int ioapic)
  3264. {
  3265. union IO_APIC_reg_01 reg_01;
  3266. unsigned long flags;
  3267. spin_lock_irqsave(&ioapic_lock, flags);
  3268. reg_01.raw = io_apic_read(ioapic, 1);
  3269. spin_unlock_irqrestore(&ioapic_lock, flags);
  3270. return reg_01.bits.entries;
  3271. }
  3272. void __init probe_nr_irqs_gsi(void)
  3273. {
  3274. int idx;
  3275. int nr = 0;
  3276. for (idx = 0; idx < nr_ioapics; idx++)
  3277. nr += io_apic_get_redir_entries(idx) + 1;
  3278. if (nr > nr_irqs_gsi)
  3279. nr_irqs_gsi = nr;
  3280. }
  3281. /* --------------------------------------------------------------------------
  3282. ACPI-based IOAPIC Configuration
  3283. -------------------------------------------------------------------------- */
  3284. #ifdef CONFIG_ACPI
  3285. #ifdef CONFIG_X86_32
  3286. int __init io_apic_get_unique_id(int ioapic, int apic_id)
  3287. {
  3288. union IO_APIC_reg_00 reg_00;
  3289. static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
  3290. physid_mask_t tmp;
  3291. unsigned long flags;
  3292. int i = 0;
  3293. /*
  3294. * The P4 platform supports up to 256 APIC IDs on two separate APIC
  3295. * buses (one for LAPICs, one for IOAPICs), where predecessors only
  3296. * supports up to 16 on one shared APIC bus.
  3297. *
  3298. * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
  3299. * advantage of new APIC bus architecture.
  3300. */
  3301. if (physids_empty(apic_id_map))
  3302. apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
  3303. spin_lock_irqsave(&ioapic_lock, flags);
  3304. reg_00.raw = io_apic_read(ioapic, 0);
  3305. spin_unlock_irqrestore(&ioapic_lock, flags);
  3306. if (apic_id >= get_physical_broadcast()) {
  3307. printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
  3308. "%d\n", ioapic, apic_id, reg_00.bits.ID);
  3309. apic_id = reg_00.bits.ID;
  3310. }
  3311. /*
  3312. * Every APIC in a system must have a unique ID or we get lots of nice
  3313. * 'stuck on smp_invalidate_needed IPI wait' messages.
  3314. */
  3315. if (check_apicid_used(apic_id_map, apic_id)) {
  3316. for (i = 0; i < get_physical_broadcast(); i++) {
  3317. if (!check_apicid_used(apic_id_map, i))
  3318. break;
  3319. }
  3320. if (i == get_physical_broadcast())
  3321. panic("Max apic_id exceeded!\n");
  3322. printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
  3323. "trying %d\n", ioapic, apic_id, i);
  3324. apic_id = i;
  3325. }
  3326. tmp = apicid_to_cpu_present(apic_id);
  3327. physids_or(apic_id_map, apic_id_map, tmp);
  3328. if (reg_00.bits.ID != apic_id) {
  3329. reg_00.bits.ID = apic_id;
  3330. spin_lock_irqsave(&ioapic_lock, flags);
  3331. io_apic_write(ioapic, 0, reg_00.raw);
  3332. reg_00.raw = io_apic_read(ioapic, 0);
  3333. spin_unlock_irqrestore(&ioapic_lock, flags);
  3334. /* Sanity check */
  3335. if (reg_00.bits.ID != apic_id) {
  3336. printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
  3337. return -1;
  3338. }
  3339. }
  3340. apic_printk(APIC_VERBOSE, KERN_INFO
  3341. "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
  3342. return apic_id;
  3343. }
  3344. int __init io_apic_get_version(int ioapic)
  3345. {
  3346. union IO_APIC_reg_01 reg_01;
  3347. unsigned long flags;
  3348. spin_lock_irqsave(&ioapic_lock, flags);
  3349. reg_01.raw = io_apic_read(ioapic, 1);
  3350. spin_unlock_irqrestore(&ioapic_lock, flags);
  3351. return reg_01.bits.version;
  3352. }
  3353. #endif
  3354. int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
  3355. {
  3356. struct irq_desc *desc;
  3357. struct irq_cfg *cfg;
  3358. int cpu = boot_cpu_id;
  3359. if (!IO_APIC_IRQ(irq)) {
  3360. apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
  3361. ioapic);
  3362. return -EINVAL;
  3363. }
  3364. desc = irq_to_desc_alloc_cpu(irq, cpu);
  3365. if (!desc) {
  3366. printk(KERN_INFO "can not get irq_desc %d\n", irq);
  3367. return 0;
  3368. }
  3369. /*
  3370. * IRQs < 16 are already in the irq_2_pin[] map
  3371. */
  3372. if (irq >= NR_IRQS_LEGACY) {
  3373. cfg = desc->chip_data;
  3374. add_pin_to_irq_cpu(cfg, cpu, ioapic, pin);
  3375. }
  3376. setup_IO_APIC_irq(ioapic, pin, irq, desc, triggering, polarity);
  3377. return 0;
  3378. }
  3379. int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
  3380. {
  3381. int i;
  3382. if (skip_ioapic_setup)
  3383. return -1;
  3384. for (i = 0; i < mp_irq_entries; i++)
  3385. if (mp_irqs[i].mp_irqtype == mp_INT &&
  3386. mp_irqs[i].mp_srcbusirq == bus_irq)
  3387. break;
  3388. if (i >= mp_irq_entries)
  3389. return -1;
  3390. *trigger = irq_trigger(i);
  3391. *polarity = irq_polarity(i);
  3392. return 0;
  3393. }
  3394. #endif /* CONFIG_ACPI */
  3395. /*
  3396. * This function currently is only a helper for the i386 smp boot process where
  3397. * we need to reprogram the ioredtbls to cater for the cpus which have come online
  3398. * so mask in all cases should simply be TARGET_CPUS
  3399. */
  3400. #ifdef CONFIG_SMP
  3401. void __init setup_ioapic_dest(void)
  3402. {
  3403. int pin, ioapic, irq, irq_entry;
  3404. struct irq_desc *desc;
  3405. struct irq_cfg *cfg;
  3406. cpumask_t mask;
  3407. if (skip_ioapic_setup == 1)
  3408. return;
  3409. for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
  3410. for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
  3411. irq_entry = find_irq_entry(ioapic, pin, mp_INT);
  3412. if (irq_entry == -1)
  3413. continue;
  3414. irq = pin_2_irq(irq_entry, ioapic, pin);
  3415. /* setup_IO_APIC_irqs could fail to get vector for some device
  3416. * when you have too many devices, because at that time only boot
  3417. * cpu is online.
  3418. */
  3419. desc = irq_to_desc(irq);
  3420. cfg = desc->chip_data;
  3421. if (!cfg->vector) {
  3422. setup_IO_APIC_irq(ioapic, pin, irq, desc,
  3423. irq_trigger(irq_entry),
  3424. irq_polarity(irq_entry));
  3425. continue;
  3426. }
  3427. /*
  3428. * Honour affinities which have been set in early boot
  3429. */
  3430. if (desc->status &
  3431. (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
  3432. mask = desc->affinity;
  3433. else
  3434. mask = TARGET_CPUS;
  3435. #ifdef CONFIG_INTR_REMAP
  3436. if (intr_remapping_enabled)
  3437. set_ir_ioapic_affinity_irq_desc(desc, mask);
  3438. else
  3439. #endif
  3440. set_ioapic_affinity_irq_desc(desc, mask);
  3441. }
  3442. }
  3443. }
  3444. #endif
  3445. #define IOAPIC_RESOURCE_NAME_SIZE 11
  3446. static struct resource *ioapic_resources;
  3447. static struct resource * __init ioapic_setup_resources(void)
  3448. {
  3449. unsigned long n;
  3450. struct resource *res;
  3451. char *mem;
  3452. int i;
  3453. if (nr_ioapics <= 0)
  3454. return NULL;
  3455. n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
  3456. n *= nr_ioapics;
  3457. mem = alloc_bootmem(n);
  3458. res = (void *)mem;
  3459. if (mem != NULL) {
  3460. mem += sizeof(struct resource) * nr_ioapics;
  3461. for (i = 0; i < nr_ioapics; i++) {
  3462. res[i].name = mem;
  3463. res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
  3464. sprintf(mem, "IOAPIC %u", i);
  3465. mem += IOAPIC_RESOURCE_NAME_SIZE;
  3466. }
  3467. }
  3468. ioapic_resources = res;
  3469. return res;
  3470. }
  3471. void __init ioapic_init_mappings(void)
  3472. {
  3473. unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
  3474. struct resource *ioapic_res;
  3475. int i;
  3476. ioapic_res = ioapic_setup_resources();
  3477. for (i = 0; i < nr_ioapics; i++) {
  3478. if (smp_found_config) {
  3479. ioapic_phys = mp_ioapics[i].mp_apicaddr;
  3480. #ifdef CONFIG_X86_32
  3481. if (!ioapic_phys) {
  3482. printk(KERN_ERR
  3483. "WARNING: bogus zero IO-APIC "
  3484. "address found in MPTABLE, "
  3485. "disabling IO/APIC support!\n");
  3486. smp_found_config = 0;
  3487. skip_ioapic_setup = 1;
  3488. goto fake_ioapic_page;
  3489. }
  3490. #endif
  3491. } else {
  3492. #ifdef CONFIG_X86_32
  3493. fake_ioapic_page:
  3494. #endif
  3495. ioapic_phys = (unsigned long)
  3496. alloc_bootmem_pages(PAGE_SIZE);
  3497. ioapic_phys = __pa(ioapic_phys);
  3498. }
  3499. set_fixmap_nocache(idx, ioapic_phys);
  3500. apic_printk(APIC_VERBOSE,
  3501. "mapped IOAPIC to %08lx (%08lx)\n",
  3502. __fix_to_virt(idx), ioapic_phys);
  3503. idx++;
  3504. if (ioapic_res != NULL) {
  3505. ioapic_res->start = ioapic_phys;
  3506. ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
  3507. ioapic_res++;
  3508. }
  3509. }
  3510. }
  3511. static int __init ioapic_insert_resources(void)
  3512. {
  3513. int i;
  3514. struct resource *r = ioapic_resources;
  3515. if (!r) {
  3516. printk(KERN_ERR
  3517. "IO APIC resources could be not be allocated.\n");
  3518. return -1;
  3519. }
  3520. for (i = 0; i < nr_ioapics; i++) {
  3521. insert_resource(&iomem_resource, r);
  3522. r++;
  3523. }
  3524. return 0;
  3525. }
  3526. /* Insert the IO APIC resources after PCI initialization has occured to handle
  3527. * IO APICS that are mapped in on a BAR in PCI space. */
  3528. late_initcall(ioapic_insert_resources);