irq.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970
  1. /* irq.c: UltraSparc IRQ handling/init/registry.
  2. *
  3. * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net)
  4. * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
  5. * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
  6. */
  7. #include <linux/module.h>
  8. #include <linux/sched.h>
  9. #include <linux/ptrace.h>
  10. #include <linux/errno.h>
  11. #include <linux/kernel_stat.h>
  12. #include <linux/signal.h>
  13. #include <linux/mm.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/slab.h>
  16. #include <linux/random.h>
  17. #include <linux/init.h>
  18. #include <linux/delay.h>
  19. #include <linux/proc_fs.h>
  20. #include <linux/seq_file.h>
  21. #include <linux/bootmem.h>
  22. #include <linux/irq.h>
  23. #include <asm/ptrace.h>
  24. #include <asm/processor.h>
  25. #include <asm/atomic.h>
  26. #include <asm/system.h>
  27. #include <asm/irq.h>
  28. #include <asm/io.h>
  29. #include <asm/sbus.h>
  30. #include <asm/iommu.h>
  31. #include <asm/upa.h>
  32. #include <asm/oplib.h>
  33. #include <asm/prom.h>
  34. #include <asm/timer.h>
  35. #include <asm/smp.h>
  36. #include <asm/starfire.h>
  37. #include <asm/uaccess.h>
  38. #include <asm/cache.h>
  39. #include <asm/cpudata.h>
  40. #include <asm/auxio.h>
  41. #include <asm/head.h>
  42. #include <asm/hypervisor.h>
  43. /* UPA nodes send interrupt packet to UltraSparc with first data reg
  44. * value low 5 (7 on Starfire) bits holding the IRQ identifier being
  45. * delivered. We must translate this into a non-vector IRQ so we can
  46. * set the softint on this cpu.
  47. *
  48. * To make processing these packets efficient and race free we use
  49. * an array of irq buckets below. The interrupt vector handler in
  50. * entry.S feeds incoming packets into per-cpu pil-indexed lists.
  51. *
  52. * If you make changes to ino_bucket, please update hand coded assembler
  53. * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
  54. */
  55. struct ino_bucket {
  56. /*0x00*/unsigned long irq_chain_pa;
  57. /* Virtual interrupt number assigned to this INO. */
  58. /*0x08*/unsigned int virt_irq;
  59. /*0x0c*/unsigned int __pad;
  60. };
  61. #define NUM_IVECS (IMAP_INR + 1)
  62. struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
  63. unsigned long ivector_table_pa;
  64. #define __irq_ino(irq) \
  65. (((struct ino_bucket *)(irq)) - &ivector_table[0])
  66. #define __bucket(irq) ((struct ino_bucket *)(irq))
  67. #define __irq(bucket) ((unsigned long)(bucket))
  68. #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa)
  69. static struct {
  70. unsigned long irq;
  71. unsigned int dev_handle;
  72. unsigned int dev_ino;
  73. } virt_to_real_irq_table[NR_IRQS];
  74. static DEFINE_SPINLOCK(virt_irq_alloc_lock);
  75. unsigned char virt_irq_alloc(unsigned long real_irq)
  76. {
  77. unsigned long flags;
  78. unsigned char ent;
  79. BUILD_BUG_ON(NR_IRQS >= 256);
  80. spin_lock_irqsave(&virt_irq_alloc_lock, flags);
  81. for (ent = 1; ent < NR_IRQS; ent++) {
  82. if (!virt_to_real_irq_table[ent].irq)
  83. break;
  84. }
  85. if (ent >= NR_IRQS) {
  86. printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
  87. ent = 0;
  88. } else {
  89. virt_to_real_irq_table[ent].irq = real_irq;
  90. }
  91. spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
  92. return ent;
  93. }
  94. #ifdef CONFIG_PCI_MSI
  95. void virt_irq_free(unsigned int virt_irq)
  96. {
  97. unsigned long flags;
  98. if (virt_irq >= NR_IRQS)
  99. return;
  100. spin_lock_irqsave(&virt_irq_alloc_lock, flags);
  101. virt_to_real_irq_table[virt_irq].irq = 0;
  102. spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
  103. }
  104. #endif
  105. static unsigned long virt_to_real_irq(unsigned char virt_irq)
  106. {
  107. return virt_to_real_irq_table[virt_irq].irq;
  108. }
  109. /*
  110. * /proc/interrupts printing:
  111. */
  112. int show_interrupts(struct seq_file *p, void *v)
  113. {
  114. int i = *(loff_t *) v, j;
  115. struct irqaction * action;
  116. unsigned long flags;
  117. if (i == 0) {
  118. seq_printf(p, " ");
  119. for_each_online_cpu(j)
  120. seq_printf(p, "CPU%d ",j);
  121. seq_putc(p, '\n');
  122. }
  123. if (i < NR_IRQS) {
  124. spin_lock_irqsave(&irq_desc[i].lock, flags);
  125. action = irq_desc[i].action;
  126. if (!action)
  127. goto skip;
  128. seq_printf(p, "%3d: ",i);
  129. #ifndef CONFIG_SMP
  130. seq_printf(p, "%10u ", kstat_irqs(i));
  131. #else
  132. for_each_online_cpu(j)
  133. seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
  134. #endif
  135. seq_printf(p, " %9s", irq_desc[i].chip->typename);
  136. seq_printf(p, " %s", action->name);
  137. for (action=action->next; action; action = action->next)
  138. seq_printf(p, ", %s", action->name);
  139. seq_putc(p, '\n');
  140. skip:
  141. spin_unlock_irqrestore(&irq_desc[i].lock, flags);
  142. }
  143. return 0;
  144. }
  145. static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
  146. {
  147. unsigned int tid;
  148. if (this_is_starfire) {
  149. tid = starfire_translate(imap, cpuid);
  150. tid <<= IMAP_TID_SHIFT;
  151. tid &= IMAP_TID_UPA;
  152. } else {
  153. if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  154. unsigned long ver;
  155. __asm__ ("rdpr %%ver, %0" : "=r" (ver));
  156. if ((ver >> 32UL) == __JALAPENO_ID ||
  157. (ver >> 32UL) == __SERRANO_ID) {
  158. tid = cpuid << IMAP_TID_SHIFT;
  159. tid &= IMAP_TID_JBUS;
  160. } else {
  161. unsigned int a = cpuid & 0x1f;
  162. unsigned int n = (cpuid >> 5) & 0x1f;
  163. tid = ((a << IMAP_AID_SHIFT) |
  164. (n << IMAP_NID_SHIFT));
  165. tid &= (IMAP_AID_SAFARI |
  166. IMAP_NID_SAFARI);;
  167. }
  168. } else {
  169. tid = cpuid << IMAP_TID_SHIFT;
  170. tid &= IMAP_TID_UPA;
  171. }
  172. }
  173. return tid;
  174. }
  175. struct irq_handler_data {
  176. unsigned long iclr;
  177. unsigned long imap;
  178. void (*pre_handler)(unsigned int, void *, void *);
  179. void *pre_handler_arg1;
  180. void *pre_handler_arg2;
  181. };
  182. static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
  183. {
  184. unsigned long real_irq = virt_to_real_irq(virt_irq);
  185. struct ino_bucket *bucket = NULL;
  186. if (likely(real_irq))
  187. bucket = __bucket(real_irq);
  188. return bucket;
  189. }
  190. #ifdef CONFIG_SMP
  191. static int irq_choose_cpu(unsigned int virt_irq)
  192. {
  193. cpumask_t mask = irq_desc[virt_irq].affinity;
  194. int cpuid;
  195. if (cpus_equal(mask, CPU_MASK_ALL)) {
  196. static int irq_rover;
  197. static DEFINE_SPINLOCK(irq_rover_lock);
  198. unsigned long flags;
  199. /* Round-robin distribution... */
  200. do_round_robin:
  201. spin_lock_irqsave(&irq_rover_lock, flags);
  202. while (!cpu_online(irq_rover)) {
  203. if (++irq_rover >= NR_CPUS)
  204. irq_rover = 0;
  205. }
  206. cpuid = irq_rover;
  207. do {
  208. if (++irq_rover >= NR_CPUS)
  209. irq_rover = 0;
  210. } while (!cpu_online(irq_rover));
  211. spin_unlock_irqrestore(&irq_rover_lock, flags);
  212. } else {
  213. cpumask_t tmp;
  214. cpus_and(tmp, cpu_online_map, mask);
  215. if (cpus_empty(tmp))
  216. goto do_round_robin;
  217. cpuid = first_cpu(tmp);
  218. }
  219. return cpuid;
  220. }
  221. #else
  222. static int irq_choose_cpu(unsigned int virt_irq)
  223. {
  224. return real_hard_smp_processor_id();
  225. }
  226. #endif
  227. static void sun4u_irq_enable(unsigned int virt_irq)
  228. {
  229. struct irq_handler_data *data = get_irq_chip_data(virt_irq);
  230. if (likely(data)) {
  231. unsigned long cpuid, imap, val;
  232. unsigned int tid;
  233. cpuid = irq_choose_cpu(virt_irq);
  234. imap = data->imap;
  235. tid = sun4u_compute_tid(imap, cpuid);
  236. val = upa_readq(imap);
  237. val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
  238. IMAP_AID_SAFARI | IMAP_NID_SAFARI);
  239. val |= tid | IMAP_VALID;
  240. upa_writeq(val, imap);
  241. }
  242. }
  243. static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask)
  244. {
  245. sun4u_irq_enable(virt_irq);
  246. }
  247. static void sun4u_irq_disable(unsigned int virt_irq)
  248. {
  249. struct irq_handler_data *data = get_irq_chip_data(virt_irq);
  250. if (likely(data)) {
  251. unsigned long imap = data->imap;
  252. unsigned long tmp = upa_readq(imap);
  253. tmp &= ~IMAP_VALID;
  254. upa_writeq(tmp, imap);
  255. }
  256. }
  257. static void sun4u_irq_end(unsigned int virt_irq)
  258. {
  259. struct irq_handler_data *data = get_irq_chip_data(virt_irq);
  260. struct irq_desc *desc = irq_desc + virt_irq;
  261. if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
  262. return;
  263. if (likely(data))
  264. upa_writeq(ICLR_IDLE, data->iclr);
  265. }
  266. static void sun4v_irq_enable(unsigned int virt_irq)
  267. {
  268. struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
  269. unsigned int ino = bucket - &ivector_table[0];
  270. if (likely(bucket)) {
  271. unsigned long cpuid;
  272. int err;
  273. cpuid = irq_choose_cpu(virt_irq);
  274. err = sun4v_intr_settarget(ino, cpuid);
  275. if (err != HV_EOK)
  276. printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
  277. "err(%d)\n", ino, cpuid, err);
  278. err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
  279. if (err != HV_EOK)
  280. printk(KERN_ERR "sun4v_intr_setstate(%x): "
  281. "err(%d)\n", ino, err);
  282. err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
  283. if (err != HV_EOK)
  284. printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",
  285. ino, err);
  286. }
  287. }
  288. static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask)
  289. {
  290. struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
  291. unsigned int ino = bucket - &ivector_table[0];
  292. if (likely(bucket)) {
  293. unsigned long cpuid;
  294. int err;
  295. cpuid = irq_choose_cpu(virt_irq);
  296. err = sun4v_intr_settarget(ino, cpuid);
  297. if (err != HV_EOK)
  298. printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
  299. "err(%d)\n", ino, cpuid, err);
  300. }
  301. }
  302. static void sun4v_irq_disable(unsigned int virt_irq)
  303. {
  304. struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
  305. unsigned int ino = bucket - &ivector_table[0];
  306. if (likely(bucket)) {
  307. int err;
  308. err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
  309. if (err != HV_EOK)
  310. printk(KERN_ERR "sun4v_intr_setenabled(%x): "
  311. "err(%d)\n", ino, err);
  312. }
  313. }
  314. static void sun4v_irq_end(unsigned int virt_irq)
  315. {
  316. struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
  317. unsigned int ino = bucket - &ivector_table[0];
  318. struct irq_desc *desc = irq_desc + virt_irq;
  319. if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
  320. return;
  321. if (likely(bucket)) {
  322. int err;
  323. err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
  324. if (err != HV_EOK)
  325. printk(KERN_ERR "sun4v_intr_setstate(%x): "
  326. "err(%d)\n", ino, err);
  327. }
  328. }
  329. static void sun4v_virq_enable(unsigned int virt_irq)
  330. {
  331. struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
  332. if (likely(bucket)) {
  333. unsigned long cpuid, dev_handle, dev_ino;
  334. int err;
  335. cpuid = irq_choose_cpu(virt_irq);
  336. dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
  337. dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
  338. err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
  339. if (err != HV_EOK)
  340. printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
  341. "err(%d)\n",
  342. dev_handle, dev_ino, cpuid, err);
  343. err = sun4v_vintr_set_state(dev_handle, dev_ino,
  344. HV_INTR_STATE_IDLE);
  345. if (err != HV_EOK)
  346. printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
  347. "HV_INTR_STATE_IDLE): err(%d)\n",
  348. dev_handle, dev_ino, err);
  349. err = sun4v_vintr_set_valid(dev_handle, dev_ino,
  350. HV_INTR_ENABLED);
  351. if (err != HV_EOK)
  352. printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
  353. "HV_INTR_ENABLED): err(%d)\n",
  354. dev_handle, dev_ino, err);
  355. }
  356. }
  357. static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask)
  358. {
  359. struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
  360. if (likely(bucket)) {
  361. unsigned long cpuid, dev_handle, dev_ino;
  362. int err;
  363. cpuid = irq_choose_cpu(virt_irq);
  364. dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
  365. dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
  366. err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
  367. if (err != HV_EOK)
  368. printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
  369. "err(%d)\n",
  370. dev_handle, dev_ino, cpuid, err);
  371. }
  372. }
  373. static void sun4v_virq_disable(unsigned int virt_irq)
  374. {
  375. struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
  376. if (likely(bucket)) {
  377. unsigned long dev_handle, dev_ino;
  378. int err;
  379. dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
  380. dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
  381. err = sun4v_vintr_set_valid(dev_handle, dev_ino,
  382. HV_INTR_DISABLED);
  383. if (err != HV_EOK)
  384. printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
  385. "HV_INTR_DISABLED): err(%d)\n",
  386. dev_handle, dev_ino, err);
  387. }
  388. }
  389. static void sun4v_virq_end(unsigned int virt_irq)
  390. {
  391. struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
  392. struct irq_desc *desc = irq_desc + virt_irq;
  393. if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
  394. return;
  395. if (likely(bucket)) {
  396. unsigned long dev_handle, dev_ino;
  397. int err;
  398. dev_handle = virt_to_real_irq_table[virt_irq].dev_handle;
  399. dev_ino = virt_to_real_irq_table[virt_irq].dev_ino;
  400. err = sun4v_vintr_set_state(dev_handle, dev_ino,
  401. HV_INTR_STATE_IDLE);
  402. if (err != HV_EOK)
  403. printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"
  404. "HV_INTR_STATE_IDLE): err(%d)\n",
  405. dev_handle, dev_ino, err);
  406. }
  407. }
  408. static void run_pre_handler(unsigned int virt_irq)
  409. {
  410. struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
  411. struct irq_handler_data *data = get_irq_chip_data(virt_irq);
  412. if (likely(data->pre_handler)) {
  413. data->pre_handler(__irq_ino(__irq(bucket)),
  414. data->pre_handler_arg1,
  415. data->pre_handler_arg2);
  416. }
  417. }
  418. static struct irq_chip sun4u_irq = {
  419. .typename = "sun4u",
  420. .enable = sun4u_irq_enable,
  421. .disable = sun4u_irq_disable,
  422. .end = sun4u_irq_end,
  423. .set_affinity = sun4u_set_affinity,
  424. };
  425. static struct irq_chip sun4u_irq_ack = {
  426. .typename = "sun4u+ack",
  427. .enable = sun4u_irq_enable,
  428. .disable = sun4u_irq_disable,
  429. .ack = run_pre_handler,
  430. .end = sun4u_irq_end,
  431. .set_affinity = sun4u_set_affinity,
  432. };
  433. static struct irq_chip sun4v_irq = {
  434. .typename = "sun4v",
  435. .enable = sun4v_irq_enable,
  436. .disable = sun4v_irq_disable,
  437. .end = sun4v_irq_end,
  438. .set_affinity = sun4v_set_affinity,
  439. };
  440. static struct irq_chip sun4v_virq = {
  441. .typename = "vsun4v",
  442. .enable = sun4v_virq_enable,
  443. .disable = sun4v_virq_disable,
  444. .end = sun4v_virq_end,
  445. .set_affinity = sun4v_virt_set_affinity,
  446. };
  447. void irq_install_pre_handler(int virt_irq,
  448. void (*func)(unsigned int, void *, void *),
  449. void *arg1, void *arg2)
  450. {
  451. struct irq_handler_data *data = get_irq_chip_data(virt_irq);
  452. struct irq_chip *chip = get_irq_chip(virt_irq);
  453. if (WARN_ON(chip == &sun4v_irq || chip == &sun4v_virq)) {
  454. printk(KERN_ERR "IRQ: Trying to install pre-handler on "
  455. "sun4v irq %u\n", virt_irq);
  456. return;
  457. }
  458. data->pre_handler = func;
  459. data->pre_handler_arg1 = arg1;
  460. data->pre_handler_arg2 = arg2;
  461. if (chip == &sun4u_irq_ack)
  462. return;
  463. set_irq_chip(virt_irq, &sun4u_irq_ack);
  464. }
  465. unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
  466. {
  467. struct ino_bucket *bucket;
  468. struct irq_handler_data *data;
  469. int ino;
  470. BUG_ON(tlb_type == hypervisor);
  471. ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
  472. bucket = &ivector_table[ino];
  473. if (!bucket->virt_irq) {
  474. bucket->virt_irq = virt_irq_alloc(__irq(bucket));
  475. set_irq_chip(bucket->virt_irq, &sun4u_irq);
  476. }
  477. data = get_irq_chip_data(bucket->virt_irq);
  478. if (unlikely(data))
  479. goto out;
  480. data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
  481. if (unlikely(!data)) {
  482. prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
  483. prom_halt();
  484. }
  485. set_irq_chip_data(bucket->virt_irq, data);
  486. data->imap = imap;
  487. data->iclr = iclr;
  488. out:
  489. return bucket->virt_irq;
  490. }
  491. static unsigned int sun4v_build_common(unsigned long sysino,
  492. struct irq_chip *chip)
  493. {
  494. struct ino_bucket *bucket;
  495. struct irq_handler_data *data;
  496. BUG_ON(tlb_type != hypervisor);
  497. bucket = &ivector_table[sysino];
  498. if (!bucket->virt_irq) {
  499. bucket->virt_irq = virt_irq_alloc(__irq(bucket));
  500. set_irq_chip(bucket->virt_irq, chip);
  501. }
  502. data = get_irq_chip_data(bucket->virt_irq);
  503. if (unlikely(data))
  504. goto out;
  505. data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
  506. if (unlikely(!data)) {
  507. prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
  508. prom_halt();
  509. }
  510. set_irq_chip_data(bucket->virt_irq, data);
  511. /* Catch accidental accesses to these things. IMAP/ICLR handling
  512. * is done by hypervisor calls on sun4v platforms, not by direct
  513. * register accesses.
  514. */
  515. data->imap = ~0UL;
  516. data->iclr = ~0UL;
  517. out:
  518. return bucket->virt_irq;
  519. }
  520. unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
  521. {
  522. unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
  523. return sun4v_build_common(sysino, &sun4v_irq);
  524. }
  525. unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
  526. {
  527. unsigned long sysino, hv_err;
  528. unsigned int virq;
  529. BUG_ON(devhandle & devino);
  530. sysino = devhandle | devino;
  531. BUG_ON(sysino & ~(IMAP_IGN | IMAP_INO));
  532. hv_err = sun4v_vintr_set_cookie(devhandle, devino, sysino);
  533. if (hv_err) {
  534. prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
  535. "err=%lu\n", devhandle, devino, hv_err);
  536. prom_halt();
  537. }
  538. virq = sun4v_build_common(sysino, &sun4v_virq);
  539. virt_to_real_irq_table[virq].dev_handle = devhandle;
  540. virt_to_real_irq_table[virq].dev_ino = devino;
  541. return virq;
  542. }
  543. void ack_bad_irq(unsigned int virt_irq)
  544. {
  545. struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
  546. unsigned int ino = 0xdeadbeef;
  547. if (bucket)
  548. ino = bucket - &ivector_table[0];
  549. printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
  550. ino, virt_irq);
  551. }
  552. void handler_irq(int irq, struct pt_regs *regs)
  553. {
  554. unsigned long pstate, bucket_pa;
  555. struct pt_regs *old_regs;
  556. clear_softint(1 << irq);
  557. old_regs = set_irq_regs(regs);
  558. irq_enter();
  559. /* Grab an atomic snapshot of the pending IVECs. */
  560. __asm__ __volatile__("rdpr %%pstate, %0\n\t"
  561. "wrpr %0, %3, %%pstate\n\t"
  562. "ldx [%2], %1\n\t"
  563. "stx %%g0, [%2]\n\t"
  564. "wrpr %0, 0x0, %%pstate\n\t"
  565. : "=&r" (pstate), "=&r" (bucket_pa)
  566. : "r" (irq_work_pa(smp_processor_id())),
  567. "i" (PSTATE_IE)
  568. : "memory");
  569. while (bucket_pa) {
  570. unsigned long next_pa;
  571. unsigned int virt_irq;
  572. __asm__ __volatile__("ldxa [%2] %4, %0\n\t"
  573. "lduwa [%3] %4, %1\n\t"
  574. "stxa %%g0, [%2] %4"
  575. : "=&r" (next_pa), "=&r" (virt_irq)
  576. : "r" (bucket_pa +
  577. offsetof(struct ino_bucket,
  578. irq_chain_pa)),
  579. "r" (bucket_pa +
  580. offsetof(struct ino_bucket,
  581. virt_irq)),
  582. "i" (ASI_PHYS_USE_EC));
  583. __do_IRQ(virt_irq);
  584. bucket_pa = next_pa;
  585. }
  586. irq_exit();
  587. set_irq_regs(old_regs);
  588. }
  589. #ifdef CONFIG_HOTPLUG_CPU
  590. void fixup_irqs(void)
  591. {
  592. unsigned int irq;
  593. for (irq = 0; irq < NR_IRQS; irq++) {
  594. unsigned long flags;
  595. spin_lock_irqsave(&irq_desc[irq].lock, flags);
  596. if (irq_desc[irq].action &&
  597. !(irq_desc[irq].status & IRQ_PER_CPU)) {
  598. if (irq_desc[irq].chip->set_affinity)
  599. irq_desc[irq].chip->set_affinity(irq,
  600. irq_desc[irq].affinity);
  601. }
  602. spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
  603. }
  604. }
  605. #endif
  606. struct sun5_timer {
  607. u64 count0;
  608. u64 limit0;
  609. u64 count1;
  610. u64 limit1;
  611. };
  612. static struct sun5_timer *prom_timers;
  613. static u64 prom_limit0, prom_limit1;
  614. static void map_prom_timers(void)
  615. {
  616. struct device_node *dp;
  617. const unsigned int *addr;
  618. /* PROM timer node hangs out in the top level of device siblings... */
  619. dp = of_find_node_by_path("/");
  620. dp = dp->child;
  621. while (dp) {
  622. if (!strcmp(dp->name, "counter-timer"))
  623. break;
  624. dp = dp->sibling;
  625. }
  626. /* Assume if node is not present, PROM uses different tick mechanism
  627. * which we should not care about.
  628. */
  629. if (!dp) {
  630. prom_timers = (struct sun5_timer *) 0;
  631. return;
  632. }
  633. /* If PROM is really using this, it must be mapped by him. */
  634. addr = of_get_property(dp, "address", NULL);
  635. if (!addr) {
  636. prom_printf("PROM does not have timer mapped, trying to continue.\n");
  637. prom_timers = (struct sun5_timer *) 0;
  638. return;
  639. }
  640. prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
  641. }
  642. static void kill_prom_timer(void)
  643. {
  644. if (!prom_timers)
  645. return;
  646. /* Save them away for later. */
  647. prom_limit0 = prom_timers->limit0;
  648. prom_limit1 = prom_timers->limit1;
  649. /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
  650. * We turn both off here just to be paranoid.
  651. */
  652. prom_timers->limit0 = 0;
  653. prom_timers->limit1 = 0;
  654. /* Wheee, eat the interrupt packet too... */
  655. __asm__ __volatile__(
  656. " mov 0x40, %%g2\n"
  657. " ldxa [%%g0] %0, %%g1\n"
  658. " ldxa [%%g2] %1, %%g1\n"
  659. " stxa %%g0, [%%g0] %0\n"
  660. " membar #Sync\n"
  661. : /* no outputs */
  662. : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
  663. : "g1", "g2");
  664. }
  665. void init_irqwork_curcpu(void)
  666. {
  667. int cpu = hard_smp_processor_id();
  668. trap_block[cpu].irq_worklist_pa = 0UL;
  669. }
  670. /* Please be very careful with register_one_mondo() and
  671. * sun4v_register_mondo_queues().
  672. *
  673. * On SMP this gets invoked from the CPU trampoline before
  674. * the cpu has fully taken over the trap table from OBP,
  675. * and it's kernel stack + %g6 thread register state is
  676. * not fully cooked yet.
  677. *
  678. * Therefore you cannot make any OBP calls, not even prom_printf,
  679. * from these two routines.
  680. */
  681. static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
  682. {
  683. unsigned long num_entries = (qmask + 1) / 64;
  684. unsigned long status;
  685. status = sun4v_cpu_qconf(type, paddr, num_entries);
  686. if (status != HV_EOK) {
  687. prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
  688. "err %lu\n", type, paddr, num_entries, status);
  689. prom_halt();
  690. }
  691. }
  692. void __cpuinit sun4v_register_mondo_queues(int this_cpu)
  693. {
  694. struct trap_per_cpu *tb = &trap_block[this_cpu];
  695. register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
  696. tb->cpu_mondo_qmask);
  697. register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
  698. tb->dev_mondo_qmask);
  699. register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
  700. tb->resum_qmask);
  701. register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
  702. tb->nonresum_qmask);
  703. }
  704. static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask)
  705. {
  706. unsigned long size = PAGE_ALIGN(qmask + 1);
  707. void *p = __alloc_bootmem_low(size, size, 0);
  708. if (!p) {
  709. prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
  710. prom_halt();
  711. }
  712. *pa_ptr = __pa(p);
  713. }
  714. static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
  715. {
  716. unsigned long size = PAGE_ALIGN(qmask + 1);
  717. void *p = __alloc_bootmem_low(size, size, 0);
  718. if (!p) {
  719. prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
  720. prom_halt();
  721. }
  722. *pa_ptr = __pa(p);
  723. }
  724. static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
  725. {
  726. #ifdef CONFIG_SMP
  727. void *page;
  728. BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
  729. page = alloc_bootmem_low_pages(PAGE_SIZE);
  730. if (!page) {
  731. prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
  732. prom_halt();
  733. }
  734. tb->cpu_mondo_block_pa = __pa(page);
  735. tb->cpu_list_pa = __pa(page + 64);
  736. #endif
  737. }
  738. /* Allocate mondo and error queues for all possible cpus. */
  739. static void __init sun4v_init_mondo_queues(void)
  740. {
  741. int cpu;
  742. for_each_possible_cpu(cpu) {
  743. struct trap_per_cpu *tb = &trap_block[cpu];
  744. alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
  745. alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
  746. alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask);
  747. alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask);
  748. alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
  749. alloc_one_kbuf(&tb->nonresum_kernel_buf_pa,
  750. tb->nonresum_qmask);
  751. init_cpu_send_mondo_info(tb);
  752. }
  753. /* Load up the boot cpu's entries. */
  754. sun4v_register_mondo_queues(hard_smp_processor_id());
  755. }
  756. static struct irqaction timer_irq_action = {
  757. .name = "timer",
  758. };
  759. /* XXX Belongs in a common location. XXX */
  760. static unsigned long kimage_addr_to_ra(void *p)
  761. {
  762. unsigned long val = (unsigned long) p;
  763. return kern_base + (val - KERNBASE);
  764. }
  765. /* Only invoked on boot processor. */
  766. void __init init_IRQ(void)
  767. {
  768. map_prom_timers();
  769. kill_prom_timer();
  770. memset(&ivector_table[0], 0, sizeof(ivector_table));
  771. ivector_table_pa = kimage_addr_to_ra(&ivector_table[0]);
  772. if (tlb_type == hypervisor)
  773. sun4v_init_mondo_queues();
  774. /* We need to clear any IRQ's pending in the soft interrupt
  775. * registers, a spurious one could be left around from the
  776. * PROM timer which we just disabled.
  777. */
  778. clear_softint(get_softint());
  779. /* Now that ivector table is initialized, it is safe
  780. * to receive IRQ vector traps. We will normally take
  781. * one or two right now, in case some device PROM used
  782. * to boot us wants to speak to us. We just ignore them.
  783. */
  784. __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
  785. "or %%g1, %0, %%g1\n\t"
  786. "wrpr %%g1, 0x0, %%pstate"
  787. : /* No outputs */
  788. : "i" (PSTATE_IE)
  789. : "g1");
  790. irq_desc[0].action = &timer_irq_action;
  791. }