irq_64.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044
  1. /* irq.c: UltraSparc IRQ handling/init/registry.
  2. *
  3. * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
  4. * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
  5. * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
  6. */
  7. #include <linux/module.h>
  8. #include <linux/sched.h>
  9. #include <linux/linkage.h>
  10. #include <linux/ptrace.h>
  11. #include <linux/errno.h>
  12. #include <linux/kernel_stat.h>
  13. #include <linux/signal.h>
  14. #include <linux/mm.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/slab.h>
  17. #include <linux/random.h>
  18. #include <linux/init.h>
  19. #include <linux/delay.h>
  20. #include <linux/proc_fs.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/ftrace.h>
  23. #include <linux/irq.h>
  24. #include <linux/kmemleak.h>
  25. #include <asm/ptrace.h>
  26. #include <asm/processor.h>
  27. #include <asm/atomic.h>
  28. #include <asm/system.h>
  29. #include <asm/irq.h>
  30. #include <asm/io.h>
  31. #include <asm/iommu.h>
  32. #include <asm/upa.h>
  33. #include <asm/oplib.h>
  34. #include <asm/prom.h>
  35. #include <asm/timer.h>
  36. #include <asm/smp.h>
  37. #include <asm/starfire.h>
  38. #include <asm/uaccess.h>
  39. #include <asm/cache.h>
  40. #include <asm/cpudata.h>
  41. #include <asm/auxio.h>
  42. #include <asm/head.h>
  43. #include <asm/hypervisor.h>
  44. #include <asm/cacheflush.h>
  45. #include "entry.h"
  46. #include "cpumap.h"
  47. #include "kstack.h"
  48. #define NUM_IVECS (IMAP_INR + 1)
  49. struct ino_bucket *ivector_table;
  50. unsigned long ivector_table_pa;
  51. /* On several sun4u processors, it is illegal to mix bypass and
  52. * non-bypass accesses. Therefore we access all INO buckets
  53. * using bypass accesses only.
  54. */
  55. static unsigned long bucket_get_chain_pa(unsigned long bucket_pa)
  56. {
  57. unsigned long ret;
  58. __asm__ __volatile__("ldxa [%1] %2, %0"
  59. : "=&r" (ret)
  60. : "r" (bucket_pa +
  61. offsetof(struct ino_bucket,
  62. __irq_chain_pa)),
  63. "i" (ASI_PHYS_USE_EC));
  64. return ret;
  65. }
  66. static void bucket_clear_chain_pa(unsigned long bucket_pa)
  67. {
  68. __asm__ __volatile__("stxa %%g0, [%0] %1"
  69. : /* no outputs */
  70. : "r" (bucket_pa +
  71. offsetof(struct ino_bucket,
  72. __irq_chain_pa)),
  73. "i" (ASI_PHYS_USE_EC));
  74. }
  75. static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
  76. {
  77. unsigned int ret;
  78. __asm__ __volatile__("lduwa [%1] %2, %0"
  79. : "=&r" (ret)
  80. : "r" (bucket_pa +
  81. offsetof(struct ino_bucket,
  82. __virt_irq)),
  83. "i" (ASI_PHYS_USE_EC));
  84. return ret;
  85. }
  86. static void bucket_set_virt_irq(unsigned long bucket_pa,
  87. unsigned int virt_irq)
  88. {
  89. __asm__ __volatile__("stwa %0, [%1] %2"
  90. : /* no outputs */
  91. : "r" (virt_irq),
  92. "r" (bucket_pa +
  93. offsetof(struct ino_bucket,
  94. __virt_irq)),
  95. "i" (ASI_PHYS_USE_EC));
  96. }
  97. #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
  98. static struct {
  99. unsigned int dev_handle;
  100. unsigned int dev_ino;
  101. unsigned int in_use;
  102. } virt_irq_table[NR_IRQS];
  103. static DEFINE_SPINLOCK(virt_irq_alloc_lock);
  104. unsigned char virt_irq_alloc(unsigned int dev_handle,
  105. unsigned int dev_ino)
  106. {
  107. unsigned long flags;
  108. unsigned char ent;
  109. BUILD_BUG_ON(NR_IRQS >= 256);
  110. spin_lock_irqsave(&virt_irq_alloc_lock, flags);
  111. for (ent = 1; ent < NR_IRQS; ent++) {
  112. if (!virt_irq_table[ent].in_use)
  113. break;
  114. }
  115. if (ent >= NR_IRQS) {
  116. printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
  117. ent = 0;
  118. } else {
  119. virt_irq_table[ent].dev_handle = dev_handle;
  120. virt_irq_table[ent].dev_ino = dev_ino;
  121. virt_irq_table[ent].in_use = 1;
  122. }
  123. spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
  124. return ent;
  125. }
  126. #ifdef CONFIG_PCI_MSI
  127. void virt_irq_free(unsigned int virt_irq)
  128. {
  129. unsigned long flags;
  130. if (virt_irq >= NR_IRQS)
  131. return;
  132. spin_lock_irqsave(&virt_irq_alloc_lock, flags);
  133. virt_irq_table[virt_irq].in_use = 0;
  134. spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
  135. }
  136. #endif
  137. /*
  138. * /proc/interrupts printing:
  139. */
  140. int show_interrupts(struct seq_file *p, void *v)
  141. {
  142. int i = *(loff_t *) v, j;
  143. struct irqaction * action;
  144. unsigned long flags;
  145. if (i == 0) {
  146. seq_printf(p, " ");
  147. for_each_online_cpu(j)
  148. seq_printf(p, "CPU%d ",j);
  149. seq_putc(p, '\n');
  150. }
  151. if (i < NR_IRQS) {
  152. raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
  153. action = irq_desc[i].action;
  154. if (!action)
  155. goto skip;
  156. seq_printf(p, "%3d: ",i);
  157. #ifndef CONFIG_SMP
  158. seq_printf(p, "%10u ", kstat_irqs(i));
  159. #else
  160. for_each_online_cpu(j)
  161. seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
  162. #endif
  163. seq_printf(p, " %9s", irq_desc[i].chip->name);
  164. seq_printf(p, " %s", action->name);
  165. for (action=action->next; action; action = action->next)
  166. seq_printf(p, ", %s", action->name);
  167. seq_putc(p, '\n');
  168. skip:
  169. raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
  170. } else if (i == NR_IRQS) {
  171. seq_printf(p, "NMI: ");
  172. for_each_online_cpu(j)
  173. seq_printf(p, "%10u ", cpu_data(j).__nmi_count);
  174. seq_printf(p, " Non-maskable interrupts\n");
  175. }
  176. return 0;
  177. }
  178. static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
  179. {
  180. unsigned int tid;
  181. if (this_is_starfire) {
  182. tid = starfire_translate(imap, cpuid);
  183. tid <<= IMAP_TID_SHIFT;
  184. tid &= IMAP_TID_UPA;
  185. } else {
  186. if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  187. unsigned long ver;
  188. __asm__ ("rdpr %%ver, %0" : "=r" (ver));
  189. if ((ver >> 32UL) == __JALAPENO_ID ||
  190. (ver >> 32UL) == __SERRANO_ID) {
  191. tid = cpuid << IMAP_TID_SHIFT;
  192. tid &= IMAP_TID_JBUS;
  193. } else {
  194. unsigned int a = cpuid & 0x1f;
  195. unsigned int n = (cpuid >> 5) & 0x1f;
  196. tid = ((a << IMAP_AID_SHIFT) |
  197. (n << IMAP_NID_SHIFT));
  198. tid &= (IMAP_AID_SAFARI |
  199. IMAP_NID_SAFARI);
  200. }
  201. } else {
  202. tid = cpuid << IMAP_TID_SHIFT;
  203. tid &= IMAP_TID_UPA;
  204. }
  205. }
  206. return tid;
  207. }
  208. struct irq_handler_data {
  209. unsigned long iclr;
  210. unsigned long imap;
  211. void (*pre_handler)(unsigned int, void *, void *);
  212. void *arg1;
  213. void *arg2;
  214. };
  215. #ifdef CONFIG_SMP
  216. static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity)
  217. {
  218. cpumask_t mask;
  219. int cpuid;
  220. cpumask_copy(&mask, affinity);
  221. if (cpus_equal(mask, cpu_online_map)) {
  222. cpuid = map_to_cpu(virt_irq);
  223. } else {
  224. cpumask_t tmp;
  225. cpus_and(tmp, cpu_online_map, mask);
  226. cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp);
  227. }
  228. return cpuid;
  229. }
  230. #else
  231. #define irq_choose_cpu(virt_irq, affinity) \
  232. real_hard_smp_processor_id()
  233. #endif
  234. static void sun4u_irq_enable(unsigned int virt_irq)
  235. {
  236. struct irq_handler_data *data = get_irq_chip_data(virt_irq);
  237. if (likely(data)) {
  238. unsigned long cpuid, imap, val;
  239. unsigned int tid;
  240. cpuid = irq_choose_cpu(virt_irq,
  241. irq_desc[virt_irq].affinity);
  242. imap = data->imap;
  243. tid = sun4u_compute_tid(imap, cpuid);
  244. val = upa_readq(imap);
  245. val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
  246. IMAP_AID_SAFARI | IMAP_NID_SAFARI);
  247. val |= tid | IMAP_VALID;
  248. upa_writeq(val, imap);
  249. upa_writeq(ICLR_IDLE, data->iclr);
  250. }
  251. }
  252. static int sun4u_set_affinity(unsigned int virt_irq,
  253. const struct cpumask *mask)
  254. {
  255. struct irq_handler_data *data = get_irq_chip_data(virt_irq);
  256. if (likely(data)) {
  257. unsigned long cpuid, imap, val;
  258. unsigned int tid;
  259. cpuid = irq_choose_cpu(virt_irq, mask);
  260. imap = data->imap;
  261. tid = sun4u_compute_tid(imap, cpuid);
  262. val = upa_readq(imap);
  263. val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
  264. IMAP_AID_SAFARI | IMAP_NID_SAFARI);
  265. val |= tid | IMAP_VALID;
  266. upa_writeq(val, imap);
  267. upa_writeq(ICLR_IDLE, data->iclr);
  268. }
  269. return 0;
  270. }
  271. /* Don't do anything. The desc->status check for IRQ_DISABLED in
  272. * handler_irq() will skip the handler call and that will leave the
  273. * interrupt in the sent state. The next ->enable() call will hit the
  274. * ICLR register to reset the state machine.
  275. *
  276. * This scheme is necessary, instead of clearing the Valid bit in the
  277. * IMAP register, to handle the case of IMAP registers being shared by
  278. * multiple INOs (and thus ICLR registers). Since we use a different
  279. * virtual IRQ for each shared IMAP instance, the generic code thinks
  280. * there is only one user so it prematurely calls ->disable() on
  281. * free_irq().
  282. *
  283. * We have to provide an explicit ->disable() method instead of using
  284. * NULL to get the default. The reason is that if the generic code
  285. * sees that, it also hooks up a default ->shutdown method which
  286. * invokes ->mask() which we do not want. See irq_chip_set_defaults().
  287. */
  288. static void sun4u_irq_disable(unsigned int virt_irq)
  289. {
  290. }
  291. static void sun4u_irq_eoi(unsigned int virt_irq)
  292. {
  293. struct irq_handler_data *data = get_irq_chip_data(virt_irq);
  294. struct irq_desc *desc = irq_desc + virt_irq;
  295. if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
  296. return;
  297. if (likely(data))
  298. upa_writeq(ICLR_IDLE, data->iclr);
  299. }
  300. static void sun4v_irq_enable(unsigned int virt_irq)
  301. {
  302. unsigned int ino = virt_irq_table[virt_irq].dev_ino;
  303. unsigned long cpuid = irq_choose_cpu(virt_irq,
  304. irq_desc[virt_irq].affinity);
  305. int err;
  306. err = sun4v_intr_settarget(ino, cpuid);
  307. if (err != HV_EOK)
  308. printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
  309. "err(%d)\n", ino, cpuid, err);
  310. err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
  311. if (err != HV_EOK)
  312. printk(KERN_ERR "sun4v_intr_setstate(%x): "
  313. "err(%d)\n", ino, err);
  314. err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
  315. if (err != HV_EOK)
  316. printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
  317. ino, err);
  318. }
  319. static int sun4v_set_affinity(unsigned int virt_irq,
  320. const struct cpumask *mask)
  321. {
  322. unsigned int ino = virt_irq_table[virt_irq].dev_ino;
  323. unsigned long cpuid = irq_choose_cpu(virt_irq, mask);
  324. int err;
  325. err = sun4v_intr_settarget(ino, cpuid);
  326. if (err != HV_EOK)
  327. printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
  328. "err(%d)\n", ino, cpuid, err);
  329. return 0;
  330. }
  331. static void sun4v_irq_disable(unsigned int virt_irq)
  332. {
  333. unsigned int ino = virt_irq_table[virt_irq].dev_ino;
  334. int err;
  335. err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
  336. if (err != HV_EOK)
  337. printk(KERN_ERR "sun4v_intr_setenabled(%x): "
  338. "err(%d)\n", ino, err);
  339. }
  340. static void sun4v_irq_eoi(unsigned int virt_irq)
  341. {
  342. unsigned int ino = virt_irq_table[virt_irq].dev_ino;
  343. struct irq_desc *desc = irq_desc + virt_irq;
  344. int err;
  345. if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
  346. return;
  347. err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
  348. if (err != HV_EOK)
  349. printk(KERN_ERR "sun4v_intr_setstate(%x): "
  350. "err(%d)\n", ino, err);
  351. }
  352. static void sun4v_virq_enable(unsigned int virt_irq)
  353. {
  354. unsigned long cpuid, dev_handle, dev_ino;
  355. int err;
  356. cpuid = irq_choose_cpu(virt_irq, irq_desc[virt_irq].affinity);
  357. dev_handle = virt_irq_table[virt_irq].dev_handle;
  358. dev_ino = virt_irq_table[virt_irq].dev_ino;
  359. err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
  360. if (err != HV_EOK)
  361. printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
  362. "err(%d)\n",
  363. dev_handle, dev_ino, cpuid, err);
  364. err = sun4v_vintr_set_state(dev_handle, dev_ino,
  365. HV_INTR_STATE_IDLE);
  366. if (err != HV_EOK)
  367. printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
  368. "HV_INTR_STATE_IDLE): err(%d)\n",
  369. dev_handle, dev_ino, err);
  370. err = sun4v_vintr_set_valid(dev_handle, dev_ino,
  371. HV_INTR_ENABLED);
  372. if (err != HV_EOK)
  373. printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
  374. "HV_INTR_ENABLED): err(%d)\n",
  375. dev_handle, dev_ino, err);
  376. }
  377. static int sun4v_virt_set_affinity(unsigned int virt_irq,
  378. const struct cpumask *mask)
  379. {
  380. unsigned long cpuid, dev_handle, dev_ino;
  381. int err;
  382. cpuid = irq_choose_cpu(virt_irq, mask);
  383. dev_handle = virt_irq_table[virt_irq].dev_handle;
  384. dev_ino = virt_irq_table[virt_irq].dev_ino;
  385. err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
  386. if (err != HV_EOK)
  387. printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
  388. "err(%d)\n",
  389. dev_handle, dev_ino, cpuid, err);
  390. return 0;
  391. }
  392. static void sun4v_virq_disable(unsigned int virt_irq)
  393. {
  394. unsigned long dev_handle, dev_ino;
  395. int err;
  396. dev_handle = virt_irq_table[virt_irq].dev_handle;
  397. dev_ino = virt_irq_table[virt_irq].dev_ino;
  398. err = sun4v_vintr_set_valid(dev_handle, dev_ino,
  399. HV_INTR_DISABLED);
  400. if (err != HV_EOK)
  401. printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
  402. "HV_INTR_DISABLED): err(%d)\n",
  403. dev_handle, dev_ino, err);
  404. }
  405. static void sun4v_virq_eoi(unsigned int virt_irq)
  406. {
  407. struct irq_desc *desc = irq_desc + virt_irq;
  408. unsigned long dev_handle, dev_ino;
  409. int err;
  410. if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
  411. return;
  412. dev_handle = virt_irq_table[virt_irq].dev_handle;
  413. dev_ino = virt_irq_table[virt_irq].dev_ino;
  414. err = sun4v_vintr_set_state(dev_handle, dev_ino,
  415. HV_INTR_STATE_IDLE);
  416. if (err != HV_EOK)
  417. printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
  418. "HV_INTR_STATE_IDLE): err(%d)\n",
  419. dev_handle, dev_ino, err);
  420. }
  421. static struct irq_chip sun4u_irq = {
  422. .name = "sun4u",
  423. .enable = sun4u_irq_enable,
  424. .disable = sun4u_irq_disable,
  425. .eoi = sun4u_irq_eoi,
  426. .set_affinity = sun4u_set_affinity,
  427. };
  428. static struct irq_chip sun4v_irq = {
  429. .name = "sun4v",
  430. .enable = sun4v_irq_enable,
  431. .disable = sun4v_irq_disable,
  432. .eoi = sun4v_irq_eoi,
  433. .set_affinity = sun4v_set_affinity,
  434. };
  435. static struct irq_chip sun4v_virq = {
  436. .name = "vsun4v",
  437. .enable = sun4v_virq_enable,
  438. .disable = sun4v_virq_disable,
  439. .eoi = sun4v_virq_eoi,
  440. .set_affinity = sun4v_virt_set_affinity,
  441. };
  442. static void pre_flow_handler(unsigned int virt_irq,
  443. struct irq_desc *desc)
  444. {
  445. struct irq_handler_data *data = get_irq_chip_data(virt_irq);
  446. unsigned int ino = virt_irq_table[virt_irq].dev_ino;
  447. data->pre_handler(ino, data->arg1, data->arg2);
  448. handle_fasteoi_irq(virt_irq, desc);
  449. }
  450. void irq_install_pre_handler(int virt_irq,
  451. void (*func)(unsigned int, void *, void *),
  452. void *arg1, void *arg2)
  453. {
  454. struct irq_handler_data *data = get_irq_chip_data(virt_irq);
  455. struct irq_desc *desc = irq_desc + virt_irq;
  456. data->pre_handler = func;
  457. data->arg1 = arg1;
  458. data->arg2 = arg2;
  459. desc->handle_irq = pre_flow_handler;
  460. }
  461. unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
  462. {
  463. struct ino_bucket *bucket;
  464. struct irq_handler_data *data;
  465. unsigned int virt_irq;
  466. int ino;
  467. BUG_ON(tlb_type == hypervisor);
  468. ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
  469. bucket = &ivector_table[ino];
  470. virt_irq = bucket_get_virt_irq(__pa(bucket));
  471. if (!virt_irq) {
  472. virt_irq = virt_irq_alloc(0, ino);
  473. bucket_set_virt_irq(__pa(bucket), virt_irq);
  474. set_irq_chip_and_handler_name(virt_irq,
  475. &sun4u_irq,
  476. handle_fasteoi_irq,
  477. "IVEC");
  478. }
  479. data = get_irq_chip_data(virt_irq);
  480. if (unlikely(data))
  481. goto out;
  482. data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
  483. if (unlikely(!data)) {
  484. prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
  485. prom_halt();
  486. }
  487. set_irq_chip_data(virt_irq, data);
  488. data->imap = imap;
  489. data->iclr = iclr;
  490. out:
  491. return virt_irq;
  492. }
  493. static unsigned int sun4v_build_common(unsigned long sysino,
  494. struct irq_chip *chip)
  495. {
  496. struct ino_bucket *bucket;
  497. struct irq_handler_data *data;
  498. unsigned int virt_irq;
  499. BUG_ON(tlb_type != hypervisor);
  500. bucket = &ivector_table[sysino];
  501. virt_irq = bucket_get_virt_irq(__pa(bucket));
  502. if (!virt_irq) {
  503. virt_irq = virt_irq_alloc(0, sysino);
  504. bucket_set_virt_irq(__pa(bucket), virt_irq);
  505. set_irq_chip_and_handler_name(virt_irq, chip,
  506. handle_fasteoi_irq,
  507. "IVEC");
  508. }
  509. data = get_irq_chip_data(virt_irq);
  510. if (unlikely(data))
  511. goto out;
  512. data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
  513. if (unlikely(!data)) {
  514. prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
  515. prom_halt();
  516. }
  517. set_irq_chip_data(virt_irq, data);
  518. /* Catch accidental accesses to these things. IMAP/ICLR handling
  519. * is done by hypervisor calls on sun4v platforms, not by direct
  520. * register accesses.
  521. */
  522. data->imap = ~0UL;
  523. data->iclr = ~0UL;
  524. out:
  525. return virt_irq;
  526. }
  527. unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
  528. {
  529. unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
  530. return sun4v_build_common(sysino, &sun4v_irq);
  531. }
  532. unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
  533. {
  534. struct irq_handler_data *data;
  535. unsigned long hv_err, cookie;
  536. struct ino_bucket *bucket;
  537. struct irq_desc *desc;
  538. unsigned int virt_irq;
  539. bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
  540. if (unlikely(!bucket))
  541. return 0;
  542. /* The only reference we store to the IRQ bucket is
  543. * by physical address which kmemleak can't see, tell
  544. * it that this object explicitly is not a leak and
  545. * should be scanned.
  546. */
  547. kmemleak_not_leak(bucket);
  548. __flush_dcache_range((unsigned long) bucket,
  549. ((unsigned long) bucket +
  550. sizeof(struct ino_bucket)));
  551. virt_irq = virt_irq_alloc(devhandle, devino);
  552. bucket_set_virt_irq(__pa(bucket), virt_irq);
  553. set_irq_chip_and_handler_name(virt_irq, &sun4v_virq,
  554. handle_fasteoi_irq,
  555. "IVEC");
  556. data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
  557. if (unlikely(!data))
  558. return 0;
  559. /* In order to make the LDC channel startup sequence easier,
  560. * especially wrt. locking, we do not let request_irq() enable
  561. * the interrupt.
  562. */
  563. desc = irq_desc + virt_irq;
  564. desc->status |= IRQ_NOAUTOEN;
  565. set_irq_chip_data(virt_irq, data);
  566. /* Catch accidental accesses to these things. IMAP/ICLR handling
  567. * is done by hypervisor calls on sun4v platforms, not by direct
  568. * register accesses.
  569. */
  570. data->imap = ~0UL;
  571. data->iclr = ~0UL;
  572. cookie = ~__pa(bucket);
  573. hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
  574. if (hv_err) {
  575. prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
  576. "err=%lu\n", devhandle, devino, hv_err);
  577. prom_halt();
  578. }
  579. return virt_irq;
  580. }
  581. void ack_bad_irq(unsigned int virt_irq)
  582. {
  583. unsigned int ino = virt_irq_table[virt_irq].dev_ino;
  584. if (!ino)
  585. ino = 0xdeadbeef;
  586. printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
  587. ino, virt_irq);
  588. }
  589. void *hardirq_stack[NR_CPUS];
  590. void *softirq_stack[NR_CPUS];
  591. void __irq_entry handler_irq(int irq, struct pt_regs *regs)
  592. {
  593. unsigned long pstate, bucket_pa;
  594. struct pt_regs *old_regs;
  595. void *orig_sp;
  596. clear_softint(1 << irq);
  597. old_regs = set_irq_regs(regs);
  598. irq_enter();
  599. /* Grab an atomic snapshot of the pending IVECs. */
  600. __asm__ __volatile__("rdpr %%pstate, %0\n\t"
  601. "wrpr %0, %3, %%pstate\n\t"
  602. "ldx [%2], %1\n\t"
  603. "stx %%g0, [%2]\n\t"
  604. "wrpr %0, 0x0, %%pstate\n\t"
  605. : "=&r" (pstate), "=&r" (bucket_pa)
  606. : "r" (irq_work_pa(smp_processor_id())),
  607. "i" (PSTATE_IE)
  608. : "memory");
  609. orig_sp = set_hardirq_stack();
  610. while (bucket_pa) {
  611. struct irq_desc *desc;
  612. unsigned long next_pa;
  613. unsigned int virt_irq;
  614. next_pa = bucket_get_chain_pa(bucket_pa);
  615. virt_irq = bucket_get_virt_irq(bucket_pa);
  616. bucket_clear_chain_pa(bucket_pa);
  617. desc = irq_desc + virt_irq;
  618. if (!(desc->status & IRQ_DISABLED))
  619. desc->handle_irq(virt_irq, desc);
  620. bucket_pa = next_pa;
  621. }
  622. restore_hardirq_stack(orig_sp);
  623. irq_exit();
  624. set_irq_regs(old_regs);
  625. }
  626. void do_softirq(void)
  627. {
  628. unsigned long flags;
  629. if (in_interrupt())
  630. return;
  631. local_irq_save(flags);
  632. if (local_softirq_pending()) {
  633. void *orig_sp, *sp = softirq_stack[smp_processor_id()];
  634. sp += THREAD_SIZE - 192 - STACK_BIAS;
  635. __asm__ __volatile__("mov %%sp, %0\n\t"
  636. "mov %1, %%sp"
  637. : "=&r" (orig_sp)
  638. : "r" (sp));
  639. __do_softirq();
  640. __asm__ __volatile__("mov %0, %%sp"
  641. : : "r" (orig_sp));
  642. }
  643. local_irq_restore(flags);
  644. }
  645. #ifdef CONFIG_HOTPLUG_CPU
  646. void fixup_irqs(void)
  647. {
  648. unsigned int irq;
  649. for (irq = 0; irq < NR_IRQS; irq++) {
  650. unsigned long flags;
  651. raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
  652. if (irq_desc[irq].action &&
  653. !(irq_desc[irq].status & IRQ_PER_CPU)) {
  654. if (irq_desc[irq].chip->set_affinity)
  655. irq_desc[irq].chip->set_affinity(irq,
  656. irq_desc[irq].affinity);
  657. }
  658. raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
  659. }
  660. tick_ops->disable_irq();
  661. }
  662. #endif
  663. struct sun5_timer {
  664. u64 count0;
  665. u64 limit0;
  666. u64 count1;
  667. u64 limit1;
  668. };
  669. static struct sun5_timer *prom_timers;
  670. static u64 prom_limit0, prom_limit1;
  671. static void map_prom_timers(void)
  672. {
  673. struct device_node *dp;
  674. const unsigned int *addr;
  675. /* PROM timer node hangs out in the top level of device siblings... */
  676. dp = of_find_node_by_path("/");
  677. dp = dp->child;
  678. while (dp) {
  679. if (!strcmp(dp->name, "counter-timer"))
  680. break;
  681. dp = dp->sibling;
  682. }
  683. /* Assume if node is not present, PROM uses different tick mechanism
  684. * which we should not care about.
  685. */
  686. if (!dp) {
  687. prom_timers = (struct sun5_timer *) 0;
  688. return;
  689. }
  690. /* If PROM is really using this, it must be mapped by him. */
  691. addr = of_get_property(dp, "address", NULL);
  692. if (!addr) {
  693. prom_printf("PROM does not have timer mapped, trying to continue.\n");
  694. prom_timers = (struct sun5_timer *) 0;
  695. return;
  696. }
  697. prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
  698. }
  699. static void kill_prom_timer(void)
  700. {
  701. if (!prom_timers)
  702. return;
  703. /* Save them away for later. */
  704. prom_limit0 = prom_timers->limit0;
  705. prom_limit1 = prom_timers->limit1;
  706. /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
  707. * We turn both off here just to be paranoid.
  708. */
  709. prom_timers->limit0 = 0;
  710. prom_timers->limit1 = 0;
  711. /* Wheee, eat the interrupt packet too... */
  712. __asm__ __volatile__(
  713. " mov 0x40, %%g2\n"
  714. " ldxa [%%g0] %0, %%g1\n"
  715. " ldxa [%%g2] %1, %%g1\n"
  716. " stxa %%g0, [%%g0] %0\n"
  717. " membar #Sync\n"
  718. : /* no outputs */
  719. : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
  720. : "g1", "g2");
  721. }
  722. void notrace init_irqwork_curcpu(void)
  723. {
  724. int cpu = hard_smp_processor_id();
  725. trap_block[cpu].irq_worklist_pa = 0UL;
  726. }
  727. /* Please be very careful with register_one_mondo() and
  728. * sun4v_register_mondo_queues().
  729. *
  730. * On SMP this gets invoked from the CPU trampoline before
  731. * the cpu has fully taken over the trap table from OBP,
  732. * and it's kernel stack + %g6 thread register state is
  733. * not fully cooked yet.
  734. *
  735. * Therefore you cannot make any OBP calls, not even prom_printf,
  736. * from these two routines.
  737. */
  738. static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
  739. {
  740. unsigned long num_entries = (qmask + 1) / 64;
  741. unsigned long status;
  742. status = sun4v_cpu_qconf(type, paddr, num_entries);
  743. if (status != HV_EOK) {
  744. prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
  745. "err %lu\n", type, paddr, num_entries, status);
  746. prom_halt();
  747. }
  748. }
  749. void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu)
  750. {
  751. struct trap_per_cpu *tb = &trap_block[this_cpu];
  752. register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
  753. tb->cpu_mondo_qmask);
  754. register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
  755. tb->dev_mondo_qmask);
  756. register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
  757. tb->resum_qmask);
  758. register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
  759. tb->nonresum_qmask);
  760. }
  761. /* Each queue region must be a power of 2 multiple of 64 bytes in
  762. * size. The base real address must be aligned to the size of the
  763. * region. Thus, an 8KB queue must be 8KB aligned, for example.
  764. */
  765. static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
  766. {
  767. unsigned long size = PAGE_ALIGN(qmask + 1);
  768. unsigned long order = get_order(size);
  769. unsigned long p;
  770. p = __get_free_pages(GFP_KERNEL, order);
  771. if (!p) {
  772. prom_printf("SUN4V: Error, cannot allocate queue.\n");
  773. prom_halt();
  774. }
  775. *pa_ptr = __pa(p);
  776. }
  777. static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
  778. {
  779. #ifdef CONFIG_SMP
  780. unsigned long page;
  781. BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
  782. page = get_zeroed_page(GFP_KERNEL);
  783. if (!page) {
  784. prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
  785. prom_halt();
  786. }
  787. tb->cpu_mondo_block_pa = __pa(page);
  788. tb->cpu_list_pa = __pa(page + 64);
  789. #endif
  790. }
  791. /* Allocate mondo and error queues for all possible cpus. */
  792. static void __init sun4v_init_mondo_queues(void)
  793. {
  794. int cpu;
  795. for_each_possible_cpu(cpu) {
  796. struct trap_per_cpu *tb = &trap_block[cpu];
  797. alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
  798. alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
  799. alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
  800. alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
  801. alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
  802. alloc_one_queue(&tb->nonresum_kernel_buf_pa,
  803. tb->nonresum_qmask);
  804. }
  805. }
  806. static void __init init_send_mondo_info(void)
  807. {
  808. int cpu;
  809. for_each_possible_cpu(cpu) {
  810. struct trap_per_cpu *tb = &trap_block[cpu];
  811. init_cpu_send_mondo_info(tb);
  812. }
  813. }
  814. static struct irqaction timer_irq_action = {
  815. .name = "timer",
  816. };
  817. /* Only invoked on boot processor. */
  818. void __init init_IRQ(void)
  819. {
  820. unsigned long size;
  821. map_prom_timers();
  822. kill_prom_timer();
  823. size = sizeof(struct ino_bucket) * NUM_IVECS;
  824. ivector_table = kzalloc(size, GFP_KERNEL);
  825. if (!ivector_table) {
  826. prom_printf("Fatal error, cannot allocate ivector_table\n");
  827. prom_halt();
  828. }
  829. __flush_dcache_range((unsigned long) ivector_table,
  830. ((unsigned long) ivector_table) + size);
  831. ivector_table_pa = __pa(ivector_table);
  832. if (tlb_type == hypervisor)
  833. sun4v_init_mondo_queues();
  834. init_send_mondo_info();
  835. if (tlb_type == hypervisor) {
  836. /* Load up the boot cpu's entries. */
  837. sun4v_register_mondo_queues(hard_smp_processor_id());
  838. }
  839. /* We need to clear any IRQ's pending in the soft interrupt
  840. * registers, a spurious one could be left around from the
  841. * PROM timer which we just disabled.
  842. */
  843. clear_softint(get_softint());
  844. /* Now that ivector table is initialized, it is safe
  845. * to receive IRQ vector traps. We will normally take
  846. * one or two right now, in case some device PROM used
  847. * to boot us wants to speak to us. We just ignore them.
  848. */
  849. __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
  850. "or %%g1, %0, %%g1\n\t"
  851. "wrpr %%g1, 0x0, %%pstate"
  852. : /* No outputs */
  853. : "i" (PSTATE_IE)
  854. : "g1");
  855. irq_desc[0].action = &timer_irq_action;
  856. }