irq.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492
  1. /*
  2. * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
  3. * Licensed under the GPL
  4. * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
  5. * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  6. */
  7. #include "linux/config.h"
  8. #include "linux/kernel.h"
  9. #include "linux/module.h"
  10. #include "linux/smp.h"
  11. #include "linux/kernel_stat.h"
  12. #include "linux/interrupt.h"
  13. #include "linux/random.h"
  14. #include "linux/slab.h"
  15. #include "linux/file.h"
  16. #include "linux/proc_fs.h"
  17. #include "linux/init.h"
  18. #include "linux/seq_file.h"
  19. #include "linux/profile.h"
  20. #include "linux/hardirq.h"
  21. #include "asm/irq.h"
  22. #include "asm/hw_irq.h"
  23. #include "asm/atomic.h"
  24. #include "asm/signal.h"
  25. #include "asm/system.h"
  26. #include "asm/errno.h"
  27. #include "asm/uaccess.h"
  28. #include "user_util.h"
  29. #include "kern_util.h"
  30. #include "irq_user.h"
  31. #include "irq_kern.h"
  32. #include "os.h"
  33. #include "sigio.h"
  34. #include "misc_constants.h"
  35. /*
  36. * Generic, controller-independent functions:
  37. */
  38. int show_interrupts(struct seq_file *p, void *v)
  39. {
  40. int i = *(loff_t *) v, j;
  41. struct irqaction * action;
  42. unsigned long flags;
  43. if (i == 0) {
  44. seq_printf(p, " ");
  45. for_each_online_cpu(j)
  46. seq_printf(p, "CPU%d ",j);
  47. seq_putc(p, '\n');
  48. }
  49. if (i < NR_IRQS) {
  50. spin_lock_irqsave(&irq_desc[i].lock, flags);
  51. action = irq_desc[i].action;
  52. if (!action)
  53. goto skip;
  54. seq_printf(p, "%3d: ",i);
  55. #ifndef CONFIG_SMP
  56. seq_printf(p, "%10u ", kstat_irqs(i));
  57. #else
  58. for_each_online_cpu(j)
  59. seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
  60. #endif
  61. seq_printf(p, " %14s", irq_desc[i].handler->typename);
  62. seq_printf(p, " %s", action->name);
  63. for (action=action->next; action; action = action->next)
  64. seq_printf(p, ", %s", action->name);
  65. seq_putc(p, '\n');
  66. skip:
  67. spin_unlock_irqrestore(&irq_desc[i].lock, flags);
  68. } else if (i == NR_IRQS) {
  69. seq_putc(p, '\n');
  70. }
  71. return 0;
  72. }
  73. struct irq_fd *active_fds = NULL;
  74. static struct irq_fd **last_irq_ptr = &active_fds;
  75. extern void free_irqs(void);
  76. void sigio_handler(int sig, union uml_pt_regs *regs)
  77. {
  78. struct irq_fd *irq_fd;
  79. int n;
  80. if(smp_sigio_handler()) return;
  81. while(1){
  82. n = os_waiting_for_events(active_fds);
  83. if (n <= 0) {
  84. if(n == -EINTR) continue;
  85. else break;
  86. }
  87. for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
  88. if(irq_fd->current_events != 0){
  89. irq_fd->current_events = 0;
  90. do_IRQ(irq_fd->irq, regs);
  91. }
  92. }
  93. }
  94. free_irqs();
  95. }
  96. static void maybe_sigio_broken(int fd, int type)
  97. {
  98. if(os_isatty(fd)){
  99. if((type == IRQ_WRITE) && !pty_output_sigio){
  100. write_sigio_workaround();
  101. add_sigio_fd(fd, 0);
  102. }
  103. else if((type == IRQ_READ) && !pty_close_sigio){
  104. write_sigio_workaround();
  105. add_sigio_fd(fd, 1);
  106. }
  107. }
  108. }
  109. int activate_fd(int irq, int fd, int type, void *dev_id)
  110. {
  111. struct pollfd *tmp_pfd;
  112. struct irq_fd *new_fd, *irq_fd;
  113. unsigned long flags;
  114. int pid, events, err, n;
  115. pid = os_getpid();
  116. err = os_set_fd_async(fd, pid);
  117. if(err < 0)
  118. goto out;
  119. new_fd = um_kmalloc(sizeof(*new_fd));
  120. err = -ENOMEM;
  121. if(new_fd == NULL)
  122. goto out;
  123. if(type == IRQ_READ) events = UM_POLLIN | UM_POLLPRI;
  124. else events = UM_POLLOUT;
  125. *new_fd = ((struct irq_fd) { .next = NULL,
  126. .id = dev_id,
  127. .fd = fd,
  128. .type = type,
  129. .irq = irq,
  130. .pid = pid,
  131. .events = events,
  132. .current_events = 0 } );
  133. /* Critical section - locked by a spinlock because this stuff can
  134. * be changed from interrupt handlers. The stuff above is done
  135. * outside the lock because it allocates memory.
  136. */
  137. /* Actually, it only looks like it can be called from interrupt
  138. * context. The culprit is reactivate_fd, which calls
  139. * maybe_sigio_broken, which calls write_sigio_workaround,
  140. * which calls activate_fd. However, write_sigio_workaround should
  141. * only be called once, at boot time. That would make it clear that
  142. * this is called only from process context, and can be locked with
  143. * a semaphore.
  144. */
  145. flags = irq_lock();
  146. for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){
  147. if((irq_fd->fd == fd) && (irq_fd->type == type)){
  148. printk("Registering fd %d twice\n", fd);
  149. printk("Irqs : %d, %d\n", irq_fd->irq, irq);
  150. printk("Ids : 0x%p, 0x%p\n", irq_fd->id, dev_id);
  151. goto out_unlock;
  152. }
  153. }
  154. /*-------------*/
  155. if(type == IRQ_WRITE)
  156. fd = -1;
  157. tmp_pfd = NULL;
  158. n = 0;
  159. while(1){
  160. n = os_create_pollfd(fd, events, tmp_pfd, n);
  161. if (n == 0)
  162. break;
  163. /* n > 0
  164. * It means we couldn't put new pollfd to current pollfds
  165. * and tmp_fds is NULL or too small for new pollfds array.
  166. * Needed size is equal to n as minimum.
  167. *
  168. * Here we have to drop the lock in order to call
  169. * kmalloc, which might sleep.
  170. * If something else came in and changed the pollfds array
  171. * so we will not be able to put new pollfd struct to pollfds
  172. * then we free the buffer tmp_fds and try again.
  173. */
  174. irq_unlock(flags);
  175. if (tmp_pfd != NULL) {
  176. kfree(tmp_pfd);
  177. tmp_pfd = NULL;
  178. }
  179. tmp_pfd = um_kmalloc(n);
  180. if (tmp_pfd == NULL)
  181. goto out_kfree;
  182. flags = irq_lock();
  183. }
  184. /*-------------*/
  185. *last_irq_ptr = new_fd;
  186. last_irq_ptr = &new_fd->next;
  187. irq_unlock(flags);
  188. /* This calls activate_fd, so it has to be outside the critical
  189. * section.
  190. */
  191. maybe_sigio_broken(fd, type);
  192. return(0);
  193. out_unlock:
  194. irq_unlock(flags);
  195. out_kfree:
  196. kfree(new_fd);
  197. out:
  198. return(err);
  199. }
  200. static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
  201. {
  202. unsigned long flags;
  203. flags = irq_lock();
  204. os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
  205. irq_unlock(flags);
  206. }
  207. struct irq_and_dev {
  208. int irq;
  209. void *dev;
  210. };
  211. static int same_irq_and_dev(struct irq_fd *irq, void *d)
  212. {
  213. struct irq_and_dev *data = d;
  214. return((irq->irq == data->irq) && (irq->id == data->dev));
  215. }
  216. void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
  217. {
  218. struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
  219. .dev = dev });
  220. free_irq_by_cb(same_irq_and_dev, &data);
  221. }
  222. static int same_fd(struct irq_fd *irq, void *fd)
  223. {
  224. return(irq->fd == *((int *) fd));
  225. }
  226. void free_irq_by_fd(int fd)
  227. {
  228. free_irq_by_cb(same_fd, &fd);
  229. }
  230. static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
  231. {
  232. struct irq_fd *irq;
  233. int i = 0;
  234. int fdi;
  235. for(irq=active_fds; irq != NULL; irq = irq->next){
  236. if((irq->fd == fd) && (irq->irq == irqnum)) break;
  237. i++;
  238. }
  239. if(irq == NULL){
  240. printk("find_irq_by_fd doesn't have descriptor %d\n", fd);
  241. goto out;
  242. }
  243. fdi = os_get_pollfd(i);
  244. if((fdi != -1) && (fdi != fd)){
  245. printk("find_irq_by_fd - mismatch between active_fds and "
  246. "pollfds, fd %d vs %d, need %d\n", irq->fd,
  247. fdi, fd);
  248. irq = NULL;
  249. goto out;
  250. }
  251. *index_out = i;
  252. out:
  253. return(irq);
  254. }
  255. void reactivate_fd(int fd, int irqnum)
  256. {
  257. struct irq_fd *irq;
  258. unsigned long flags;
  259. int i;
  260. flags = irq_lock();
  261. irq = find_irq_by_fd(fd, irqnum, &i);
  262. if(irq == NULL){
  263. irq_unlock(flags);
  264. return;
  265. }
  266. os_set_pollfd(i, irq->fd);
  267. irq_unlock(flags);
  268. /* This calls activate_fd, so it has to be outside the critical
  269. * section.
  270. */
  271. maybe_sigio_broken(fd, irq->type);
  272. }
  273. void deactivate_fd(int fd, int irqnum)
  274. {
  275. struct irq_fd *irq;
  276. unsigned long flags;
  277. int i;
  278. flags = irq_lock();
  279. irq = find_irq_by_fd(fd, irqnum, &i);
  280. if(irq == NULL)
  281. goto out;
  282. os_set_pollfd(i, -1);
  283. out:
  284. irq_unlock(flags);
  285. }
  286. int deactivate_all_fds(void)
  287. {
  288. struct irq_fd *irq;
  289. int err;
  290. for(irq=active_fds;irq != NULL;irq = irq->next){
  291. err = os_clear_fd_async(irq->fd);
  292. if(err)
  293. return(err);
  294. }
  295. /* If there is a signal already queued, after unblocking ignore it */
  296. os_set_ioignore();
  297. return(0);
  298. }
  299. void forward_interrupts(int pid)
  300. {
  301. struct irq_fd *irq;
  302. unsigned long flags;
  303. int err;
  304. flags = irq_lock();
  305. for(irq=active_fds;irq != NULL;irq = irq->next){
  306. err = os_set_owner(irq->fd, pid);
  307. if(err < 0){
  308. /* XXX Just remove the irq rather than
  309. * print out an infinite stream of these
  310. */
  311. printk("Failed to forward %d to pid %d, err = %d\n",
  312. irq->fd, pid, -err);
  313. }
  314. irq->pid = pid;
  315. }
  316. irq_unlock(flags);
  317. }
  318. /*
  319. * do_IRQ handles all normal device IRQ's (the special
  320. * SMP cross-CPU interrupts have their own specific
  321. * handlers).
  322. */
  323. unsigned int do_IRQ(int irq, union uml_pt_regs *regs)
  324. {
  325. irq_enter();
  326. __do_IRQ(irq, (struct pt_regs *) regs);
  327. irq_exit();
  328. return 1;
  329. }
  330. int um_request_irq(unsigned int irq, int fd, int type,
  331. irqreturn_t (*handler)(int, void *, struct pt_regs *),
  332. unsigned long irqflags, const char * devname,
  333. void *dev_id)
  334. {
  335. int err;
  336. err = request_irq(irq, handler, irqflags, devname, dev_id);
  337. if(err)
  338. return(err);
  339. if(fd != -1)
  340. err = activate_fd(irq, fd, type, dev_id);
  341. return(err);
  342. }
  343. EXPORT_SYMBOL(um_request_irq);
  344. EXPORT_SYMBOL(reactivate_fd);
  345. static DEFINE_SPINLOCK(irq_spinlock);
  346. unsigned long irq_lock(void)
  347. {
  348. unsigned long flags;
  349. spin_lock_irqsave(&irq_spinlock, flags);
  350. return(flags);
  351. }
  352. void irq_unlock(unsigned long flags)
  353. {
  354. spin_unlock_irqrestore(&irq_spinlock, flags);
  355. }
  356. /* hw_interrupt_type must define (startup || enable) &&
  357. * (shutdown || disable) && end */
  358. static void dummy(unsigned int irq)
  359. {
  360. }
  361. /* This is used for everything else than the timer. */
  362. static struct hw_interrupt_type normal_irq_type = {
  363. .typename = "SIGIO",
  364. .release = free_irq_by_irq_and_dev,
  365. .disable = dummy,
  366. .enable = dummy,
  367. .ack = dummy,
  368. .end = dummy
  369. };
  370. static struct hw_interrupt_type SIGVTALRM_irq_type = {
  371. .typename = "SIGVTALRM",
  372. .release = free_irq_by_irq_and_dev,
  373. .shutdown = dummy, /* never called */
  374. .disable = dummy,
  375. .enable = dummy,
  376. .ack = dummy,
  377. .end = dummy
  378. };
  379. void __init init_IRQ(void)
  380. {
  381. int i;
  382. irq_desc[TIMER_IRQ].status = IRQ_DISABLED;
  383. irq_desc[TIMER_IRQ].action = NULL;
  384. irq_desc[TIMER_IRQ].depth = 1;
  385. irq_desc[TIMER_IRQ].handler = &SIGVTALRM_irq_type;
  386. enable_irq(TIMER_IRQ);
  387. for(i=1;i<NR_IRQS;i++){
  388. irq_desc[i].status = IRQ_DISABLED;
  389. irq_desc[i].action = NULL;
  390. irq_desc[i].depth = 1;
  391. irq_desc[i].handler = &normal_irq_type;
  392. enable_irq(i);
  393. }
  394. }
  395. int init_aio_irq(int irq, char *name, irqreturn_t (*handler)(int, void *,
  396. struct pt_regs *))
  397. {
  398. int fds[2], err;
  399. err = os_pipe(fds, 1, 1);
  400. if(err){
  401. printk("init_aio_irq - os_pipe failed, err = %d\n", -err);
  402. goto out;
  403. }
  404. err = um_request_irq(irq, fds[0], IRQ_READ, handler,
  405. SA_INTERRUPT | SA_SAMPLE_RANDOM, name,
  406. (void *) (long) fds[0]);
  407. if(err){
  408. printk("init_aio_irq - : um_request_irq failed, err = %d\n",
  409. err);
  410. goto out_close;
  411. }
  412. err = fds[1];
  413. goto out;
  414. out_close:
  415. os_close_file(fds[0]);
  416. os_close_file(fds[1]);
  417. out:
  418. return(err);
  419. }