sun4d_irq.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580
  1. /*
  2. * arch/sparc/kernel/sun4d_irq.c:
  3. * SS1000/SC2000 interrupt handling.
  4. *
  5. * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  6. * Heavily based on arch/sparc/kernel/irq.c.
  7. */
  8. #include <linux/errno.h>
  9. #include <linux/linkage.h>
  10. #include <linux/kernel_stat.h>
  11. #include <linux/signal.h>
  12. #include <linux/sched.h>
  13. #include <linux/ptrace.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/slab.h>
  16. #include <linux/random.h>
  17. #include <linux/init.h>
  18. #include <linux/smp.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/seq_file.h>
  21. #include <asm/ptrace.h>
  22. #include <asm/processor.h>
  23. #include <asm/system.h>
  24. #include <asm/psr.h>
  25. #include <asm/smp.h>
  26. #include <asm/vaddrs.h>
  27. #include <asm/timer.h>
  28. #include <asm/openprom.h>
  29. #include <asm/oplib.h>
  30. #include <asm/traps.h>
  31. #include <asm/irq.h>
  32. #include <asm/io.h>
  33. #include <asm/pgalloc.h>
  34. #include <asm/pgtable.h>
  35. #include <asm/sbus.h>
  36. #include <asm/sbi.h>
  37. #include <asm/cacheflush.h>
  38. #include <asm/irq_regs.h>
  39. #include "irq.h"
  40. /* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */
  41. /* #define DISTRIBUTE_IRQS */
  42. struct sun4d_timer_regs *sun4d_timers;
  43. #define TIMER_IRQ 10
  44. #define MAX_STATIC_ALLOC 4
  45. extern struct irqaction static_irqaction[MAX_STATIC_ALLOC];
  46. extern int static_irq_count;
  47. unsigned char cpu_leds[32];
  48. #ifdef CONFIG_SMP
  49. static unsigned char sbus_tid[32];
  50. #endif
  51. static struct irqaction *irq_action[NR_IRQS];
  52. extern spinlock_t irq_action_lock;
  53. static struct sbus_action {
  54. struct irqaction *action;
  55. /* For SMP this needs to be extended */
  56. } *sbus_actions;
  57. static int pil_to_sbus[] = {
  58. 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
  59. };
  60. static int sbus_to_pil[] = {
  61. 0, 2, 3, 5, 7, 9, 11, 13,
  62. };
  63. static int nsbi;
  64. #ifdef CONFIG_SMP
  65. DEFINE_SPINLOCK(sun4d_imsk_lock);
  66. #endif
  67. int show_sun4d_interrupts(struct seq_file *p, void *v)
  68. {
  69. int i = *(loff_t *) v, j = 0, k = 0, sbusl;
  70. struct irqaction * action;
  71. unsigned long flags;
  72. #ifdef CONFIG_SMP
  73. int x;
  74. #endif
  75. spin_lock_irqsave(&irq_action_lock, flags);
  76. if (i < NR_IRQS) {
  77. sbusl = pil_to_sbus[i];
  78. if (!sbusl) {
  79. action = *(i + irq_action);
  80. if (!action)
  81. goto out_unlock;
  82. } else {
  83. for (j = 0; j < nsbi; j++) {
  84. for (k = 0; k < 4; k++)
  85. if ((action = sbus_actions [(j << 5) + (sbusl << 2) + k].action))
  86. goto found_it;
  87. }
  88. goto out_unlock;
  89. }
  90. found_it: seq_printf(p, "%3d: ", i);
  91. #ifndef CONFIG_SMP
  92. seq_printf(p, "%10u ", kstat_irqs(i));
  93. #else
  94. for_each_online_cpu(x)
  95. seq_printf(p, "%10u ",
  96. kstat_cpu(cpu_logical_map(x)).irqs[i]);
  97. #endif
  98. seq_printf(p, "%c %s",
  99. (action->flags & IRQF_DISABLED) ? '+' : ' ',
  100. action->name);
  101. action = action->next;
  102. for (;;) {
  103. for (; action; action = action->next) {
  104. seq_printf(p, ",%s %s",
  105. (action->flags & IRQF_DISABLED) ? " +" : "",
  106. action->name);
  107. }
  108. if (!sbusl) break;
  109. k++;
  110. if (k < 4)
  111. action = sbus_actions [(j << 5) + (sbusl << 2) + k].action;
  112. else {
  113. j++;
  114. if (j == nsbi) break;
  115. k = 0;
  116. action = sbus_actions [(j << 5) + (sbusl << 2)].action;
  117. }
  118. }
  119. seq_putc(p, '\n');
  120. }
  121. out_unlock:
  122. spin_unlock_irqrestore(&irq_action_lock, flags);
  123. return 0;
  124. }
  125. void sun4d_free_irq(unsigned int irq, void *dev_id)
  126. {
  127. struct irqaction *action, **actionp;
  128. struct irqaction *tmp = NULL;
  129. unsigned long flags;
  130. spin_lock_irqsave(&irq_action_lock, flags);
  131. if (irq < 15)
  132. actionp = irq + irq_action;
  133. else
  134. actionp = &(sbus_actions[irq - (1 << 5)].action);
  135. action = *actionp;
  136. if (!action) {
  137. printk("Trying to free free IRQ%d\n",irq);
  138. goto out_unlock;
  139. }
  140. if (dev_id) {
  141. for (; action; action = action->next) {
  142. if (action->dev_id == dev_id)
  143. break;
  144. tmp = action;
  145. }
  146. if (!action) {
  147. printk("Trying to free free shared IRQ%d\n",irq);
  148. goto out_unlock;
  149. }
  150. } else if (action->flags & IRQF_SHARED) {
  151. printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
  152. goto out_unlock;
  153. }
  154. if (action->flags & SA_STATIC_ALLOC)
  155. {
  156. /* This interrupt is marked as specially allocated
  157. * so it is a bad idea to free it.
  158. */
  159. printk("Attempt to free statically allocated IRQ%d (%s)\n",
  160. irq, action->name);
  161. goto out_unlock;
  162. }
  163. if (action && tmp)
  164. tmp->next = action->next;
  165. else
  166. *actionp = action->next;
  167. spin_unlock_irqrestore(&irq_action_lock, flags);
  168. synchronize_irq(irq);
  169. spin_lock_irqsave(&irq_action_lock, flags);
  170. kfree(action);
  171. if (!(*actionp))
  172. __disable_irq(irq);
  173. out_unlock:
  174. spin_unlock_irqrestore(&irq_action_lock, flags);
  175. }
  176. extern void unexpected_irq(int, void *, struct pt_regs *);
  177. void sun4d_handler_irq(int irq, struct pt_regs * regs)
  178. {
  179. struct pt_regs *old_regs;
  180. struct irqaction * action;
  181. int cpu = smp_processor_id();
  182. /* SBUS IRQ level (1 - 7) */
  183. int sbusl = pil_to_sbus[irq];
  184. /* FIXME: Is this necessary?? */
  185. cc_get_ipen();
  186. cc_set_iclr(1 << irq);
  187. old_regs = set_irq_regs(regs);
  188. irq_enter();
  189. kstat_cpu(cpu).irqs[irq]++;
  190. if (!sbusl) {
  191. action = *(irq + irq_action);
  192. if (!action)
  193. unexpected_irq(irq, NULL, regs);
  194. do {
  195. action->handler(irq, action->dev_id);
  196. action = action->next;
  197. } while (action);
  198. } else {
  199. int bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff;
  200. int sbino;
  201. struct sbus_action *actionp;
  202. unsigned mask, slot;
  203. int sbil = (sbusl << 2);
  204. bw_clear_intr_mask(sbusl, bus_mask);
  205. /* Loop for each pending SBI */
  206. for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1)
  207. if (bus_mask & 1) {
  208. mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil);
  209. mask &= (0xf << sbil);
  210. actionp = sbus_actions + (sbino << 5) + (sbil);
  211. /* Loop for each pending SBI slot */
  212. for (slot = (1 << sbil); mask; slot <<= 1, actionp++)
  213. if (mask & slot) {
  214. mask &= ~slot;
  215. action = actionp->action;
  216. if (!action)
  217. unexpected_irq(irq, NULL, regs);
  218. do {
  219. action->handler(irq, action->dev_id);
  220. action = action->next;
  221. } while (action);
  222. release_sbi(SBI2DEVID(sbino), slot);
  223. }
  224. }
  225. }
  226. irq_exit();
  227. set_irq_regs(old_regs);
  228. }
  229. int sun4d_request_irq(unsigned int irq,
  230. irq_handler_t handler,
  231. unsigned long irqflags, const char * devname, void *dev_id)
  232. {
  233. struct irqaction *action, *tmp = NULL, **actionp;
  234. unsigned long flags;
  235. int ret;
  236. if(irq > 14 && irq < (1 << 5)) {
  237. ret = -EINVAL;
  238. goto out;
  239. }
  240. if (!handler) {
  241. ret = -EINVAL;
  242. goto out;
  243. }
  244. spin_lock_irqsave(&irq_action_lock, flags);
  245. if (irq >= (1 << 5))
  246. actionp = &(sbus_actions[irq - (1 << 5)].action);
  247. else
  248. actionp = irq + irq_action;
  249. action = *actionp;
  250. if (action) {
  251. if ((action->flags & IRQF_SHARED) && (irqflags & IRQF_SHARED)) {
  252. for (tmp = action; tmp->next; tmp = tmp->next);
  253. } else {
  254. ret = -EBUSY;
  255. goto out_unlock;
  256. }
  257. if ((action->flags & IRQF_DISABLED) ^ (irqflags & IRQF_DISABLED)) {
  258. printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
  259. ret = -EBUSY;
  260. goto out_unlock;
  261. }
  262. action = NULL; /* Or else! */
  263. }
  264. /* If this is flagged as statically allocated then we use our
  265. * private struct which is never freed.
  266. */
  267. if (irqflags & SA_STATIC_ALLOC) {
  268. if (static_irq_count < MAX_STATIC_ALLOC)
  269. action = &static_irqaction[static_irq_count++];
  270. else
  271. printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
  272. }
  273. if (action == NULL)
  274. action = kmalloc(sizeof(struct irqaction),
  275. GFP_ATOMIC);
  276. if (!action) {
  277. ret = -ENOMEM;
  278. goto out_unlock;
  279. }
  280. action->handler = handler;
  281. action->flags = irqflags;
  282. cpus_clear(action->mask);
  283. action->name = devname;
  284. action->next = NULL;
  285. action->dev_id = dev_id;
  286. if (tmp)
  287. tmp->next = action;
  288. else
  289. *actionp = action;
  290. __enable_irq(irq);
  291. ret = 0;
  292. out_unlock:
  293. spin_unlock_irqrestore(&irq_action_lock, flags);
  294. out:
  295. return ret;
  296. }
  297. static void sun4d_disable_irq(unsigned int irq)
  298. {
  299. #ifdef CONFIG_SMP
  300. int tid = sbus_tid[(irq >> 5) - 1];
  301. unsigned long flags;
  302. #endif
  303. if (irq < NR_IRQS) return;
  304. #ifdef CONFIG_SMP
  305. spin_lock_irqsave(&sun4d_imsk_lock, flags);
  306. cc_set_imsk_other(tid, cc_get_imsk_other(tid) | (1 << sbus_to_pil[(irq >> 2) & 7]));
  307. spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
  308. #else
  309. cc_set_imsk(cc_get_imsk() | (1 << sbus_to_pil[(irq >> 2) & 7]));
  310. #endif
  311. }
  312. static void sun4d_enable_irq(unsigned int irq)
  313. {
  314. #ifdef CONFIG_SMP
  315. int tid = sbus_tid[(irq >> 5) - 1];
  316. unsigned long flags;
  317. #endif
  318. if (irq < NR_IRQS) return;
  319. #ifdef CONFIG_SMP
  320. spin_lock_irqsave(&sun4d_imsk_lock, flags);
  321. cc_set_imsk_other(tid, cc_get_imsk_other(tid) & ~(1 << sbus_to_pil[(irq >> 2) & 7]));
  322. spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
  323. #else
  324. cc_set_imsk(cc_get_imsk() & ~(1 << sbus_to_pil[(irq >> 2) & 7]));
  325. #endif
  326. }
  327. #ifdef CONFIG_SMP
  328. static void sun4d_set_cpu_int(int cpu, int level)
  329. {
  330. sun4d_send_ipi(cpu, level);
  331. }
  332. static void sun4d_clear_ipi(int cpu, int level)
  333. {
  334. }
  335. static void sun4d_set_udt(int cpu)
  336. {
  337. }
  338. /* Setup IRQ distribution scheme. */
  339. void __init sun4d_distribute_irqs(void)
  340. {
  341. struct device_node *dp;
  342. #ifdef DISTRIBUTE_IRQS
  343. cpumask_t sbus_serving_map;
  344. sbus_serving_map = cpu_present_map;
  345. for_each_node_by_name(dp, "sbi") {
  346. int board = of_getintprop_default(dp, "board#", 0);
  347. if ((board * 2) == boot_cpu_id && cpu_isset(board * 2 + 1, cpu_present_map))
  348. sbus_tid[board] = (board * 2 + 1);
  349. else if (cpu_isset(board * 2, cpu_present_map))
  350. sbus_tid[board] = (board * 2);
  351. else if (cpu_isset(board * 2 + 1, cpu_present_map))
  352. sbus_tid[board] = (board * 2 + 1);
  353. else
  354. sbus_tid[board] = 0xff;
  355. if (sbus_tid[board] != 0xff)
  356. cpu_clear(sbus_tid[board], sbus_serving_map);
  357. }
  358. for_each_node_by_name(dp, "sbi") {
  359. int board = of_getintprop_default(dp, "board#", 0);
  360. if (sbus_tid[board] == 0xff) {
  361. int i = 31;
  362. if (cpus_empty(sbus_serving_map))
  363. sbus_serving_map = cpu_present_map;
  364. while (cpu_isset(i, sbus_serving_map))
  365. i--;
  366. sbus_tid[board] = i;
  367. cpu_clear(i, sbus_serving_map);
  368. }
  369. }
  370. for_each_node_by_name(dp, "sbi") {
  371. int devid = of_getintprop_default(dp, "device-id", 0);
  372. int board = of_getintprop_default(dp, "board#", 0);
  373. printk("sbus%d IRQs directed to CPU%d\n", board, sbus_tid[board]);
  374. set_sbi_tid(devid, sbus_tid[board] << 3);
  375. }
  376. #else
  377. int cpuid = cpu_logical_map(1);
  378. if (cpuid == -1)
  379. cpuid = cpu_logical_map(0);
  380. for_each_node_by_name(dp, "sbi") {
  381. int devid = of_getintprop_default(dp, "device-id", 0);
  382. int board = of_getintprop_default(dp, "board#", 0);
  383. sbus_tid[board] = cpuid;
  384. set_sbi_tid(devid, cpuid << 3);
  385. }
  386. printk("All sbus IRQs directed to CPU%d\n", cpuid);
  387. #endif
  388. }
  389. #endif
  390. static void sun4d_clear_clock_irq(void)
  391. {
  392. volatile unsigned int clear_intr;
  393. clear_intr = sun4d_timers->l10_timer_limit;
  394. }
  395. static void sun4d_clear_profile_irq(int cpu)
  396. {
  397. bw_get_prof_limit(cpu);
  398. }
  399. static void sun4d_load_profile_irq(int cpu, unsigned int limit)
  400. {
  401. bw_set_prof_limit(cpu, limit);
  402. }
  403. static void __init sun4d_init_timers(irq_handler_t counter_fn)
  404. {
  405. int irq;
  406. int cpu;
  407. struct resource r;
  408. int mid;
  409. /* Map the User Timer registers. */
  410. memset(&r, 0, sizeof(r));
  411. #ifdef CONFIG_SMP
  412. r.start = CSR_BASE(boot_cpu_id)+BW_TIMER_LIMIT;
  413. #else
  414. r.start = CSR_BASE(0)+BW_TIMER_LIMIT;
  415. #endif
  416. r.flags = 0xf;
  417. sun4d_timers = (struct sun4d_timer_regs *) sbus_ioremap(&r, 0,
  418. PAGE_SIZE, "user timer");
  419. sun4d_timers->l10_timer_limit = (((1000000/HZ) + 1) << 10);
  420. master_l10_counter = &sun4d_timers->l10_cur_count;
  421. master_l10_limit = &sun4d_timers->l10_timer_limit;
  422. irq = request_irq(TIMER_IRQ,
  423. counter_fn,
  424. (IRQF_DISABLED | SA_STATIC_ALLOC),
  425. "timer", NULL);
  426. if (irq) {
  427. prom_printf("time_init: unable to attach IRQ%d\n",TIMER_IRQ);
  428. prom_halt();
  429. }
  430. /* Enable user timer free run for CPU 0 in BW */
  431. /* bw_set_ctrl(0, bw_get_ctrl(0) | BW_CTRL_USER_TIMER); */
  432. cpu = 0;
  433. while (!cpu_find_by_instance(cpu, NULL, &mid)) {
  434. sun4d_load_profile_irq(mid >> 3, 0);
  435. cpu++;
  436. }
  437. #ifdef CONFIG_SMP
  438. {
  439. unsigned long flags;
  440. extern unsigned long lvl14_save[4];
  441. struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
  442. extern unsigned int real_irq_entry[], smp4d_ticker[];
  443. extern unsigned int patchme_maybe_smp_msg[];
  444. /* Adjust so that we jump directly to smp4d_ticker */
  445. lvl14_save[2] += smp4d_ticker - real_irq_entry;
  446. /* For SMP we use the level 14 ticker, however the bootup code
  447. * has copied the firmware's level 14 vector into the boot cpu's
  448. * trap table, we must fix this now or we get squashed.
  449. */
  450. local_irq_save(flags);
  451. patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
  452. trap_table->inst_one = lvl14_save[0];
  453. trap_table->inst_two = lvl14_save[1];
  454. trap_table->inst_three = lvl14_save[2];
  455. trap_table->inst_four = lvl14_save[3];
  456. local_flush_cache_all();
  457. local_irq_restore(flags);
  458. }
  459. #endif
  460. }
  461. void __init sun4d_init_sbi_irq(void)
  462. {
  463. struct device_node *dp;
  464. nsbi = 0;
  465. for_each_node_by_name(dp, "sbi")
  466. nsbi++;
  467. sbus_actions = kzalloc (nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC);
  468. if (!sbus_actions) {
  469. prom_printf("SUN4D: Cannot allocate sbus_actions, halting.\n");
  470. prom_halt();
  471. }
  472. for_each_node_by_name(dp, "sbi") {
  473. int devid = of_getintprop_default(dp, "device-id", 0);
  474. int board = of_getintprop_default(dp, "board#", 0);
  475. unsigned int mask;
  476. #ifdef CONFIG_SMP
  477. {
  478. extern unsigned char boot_cpu_id;
  479. set_sbi_tid(devid, boot_cpu_id << 3);
  480. sbus_tid[board] = boot_cpu_id;
  481. }
  482. #endif
  483. /* Get rid of pending irqs from PROM */
  484. mask = acquire_sbi(devid, 0xffffffff);
  485. if (mask) {
  486. printk ("Clearing pending IRQs %08x on SBI %d\n", mask, board);
  487. release_sbi(devid, mask);
  488. }
  489. }
  490. }
  491. void __init sun4d_init_IRQ(void)
  492. {
  493. local_irq_disable();
  494. BTFIXUPSET_CALL(enable_irq, sun4d_enable_irq, BTFIXUPCALL_NORM);
  495. BTFIXUPSET_CALL(disable_irq, sun4d_disable_irq, BTFIXUPCALL_NORM);
  496. BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM);
  497. BTFIXUPSET_CALL(clear_profile_irq, sun4d_clear_profile_irq, BTFIXUPCALL_NORM);
  498. BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM);
  499. sparc_init_timers = sun4d_init_timers;
  500. #ifdef CONFIG_SMP
  501. BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM);
  502. BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP);
  503. BTFIXUPSET_CALL(set_irq_udt, sun4d_set_udt, BTFIXUPCALL_NOP);
  504. #endif
  505. /* Cannot enable interrupts until OBP ticker is disabled. */
  506. }