ItLpQueue.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. /*
  2. * ItLpQueue.c
  3. * Copyright (C) 2001 Mike Corrigan IBM Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. */
  10. #include <linux/stddef.h>
  11. #include <linux/kernel.h>
  12. #include <linux/sched.h>
  13. #include <linux/bootmem.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/proc_fs.h>
  16. #include <asm/system.h>
  17. #include <asm/paca.h>
  18. #include <asm/iSeries/ItLpQueue.h>
  19. #include <asm/iSeries/HvLpEvent.h>
  20. #include <asm/iSeries/HvCallEvent.h>
  21. /*
  22. * The LpQueue is used to pass event data from the hypervisor to
  23. * the partition. This is where I/O interrupt events are communicated.
  24. *
  25. * It is written to by the hypervisor so cannot end up in the BSS.
  26. */
  27. struct ItLpQueue xItLpQueue __attribute__((__section__(".data")));
  28. static char *event_types[9] = {
  29. "Hypervisor\t\t",
  30. "Machine Facilities\t",
  31. "Session Manager\t",
  32. "SPD I/O\t\t",
  33. "Virtual Bus\t\t",
  34. "PCI I/O\t\t",
  35. "RIO I/O\t\t",
  36. "Virtual Lan\t\t",
  37. "Virtual I/O\t\t"
  38. };
  39. static __inline__ int set_inUse(void)
  40. {
  41. int t;
  42. u32 * inUseP = &xItLpQueue.xInUseWord;
  43. __asm__ __volatile__("\n\
  44. 1: lwarx %0,0,%2 \n\
  45. cmpwi 0,%0,0 \n\
  46. li %0,0 \n\
  47. bne- 2f \n\
  48. addi %0,%0,1 \n\
  49. stwcx. %0,0,%2 \n\
  50. bne- 1b \n\
  51. 2: eieio"
  52. : "=&r" (t), "=m" (xItLpQueue.xInUseWord)
  53. : "r" (inUseP), "m" (xItLpQueue.xInUseWord)
  54. : "cc");
  55. return t;
  56. }
  57. static __inline__ void clear_inUse(void)
  58. {
  59. xItLpQueue.xInUseWord = 0;
  60. }
  61. /* Array of LpEvent handler functions */
  62. extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
  63. unsigned long ItLpQueueInProcess = 0;
  64. static struct HvLpEvent * ItLpQueue_getNextLpEvent(void)
  65. {
  66. struct HvLpEvent * nextLpEvent =
  67. (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr;
  68. if ( nextLpEvent->xFlags.xValid ) {
  69. /* rmb() needed only for weakly consistent machines (regatta) */
  70. rmb();
  71. /* Set pointer to next potential event */
  72. xItLpQueue.xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 +
  73. LpEventAlign ) /
  74. LpEventAlign ) *
  75. LpEventAlign;
  76. /* Wrap to beginning if no room at end */
  77. if (xItLpQueue.xSlicCurEventPtr > xItLpQueue.xSlicLastValidEventPtr)
  78. xItLpQueue.xSlicCurEventPtr = xItLpQueue.xSlicEventStackPtr;
  79. }
  80. else
  81. nextLpEvent = NULL;
  82. return nextLpEvent;
  83. }
  84. static unsigned long spread_lpevents = NR_CPUS;
  85. int ItLpQueue_isLpIntPending(void)
  86. {
  87. struct HvLpEvent *next_event;
  88. if (smp_processor_id() >= spread_lpevents)
  89. return 0;
  90. next_event = (struct HvLpEvent *)xItLpQueue.xSlicCurEventPtr;
  91. return next_event->xFlags.xValid | xItLpQueue.xPlicOverflowIntPending;
  92. }
  93. static void ItLpQueue_clearValid( struct HvLpEvent * event )
  94. {
  95. /* Clear the valid bit of the event
  96. * Also clear bits within this event that might
  97. * look like valid bits (on 64-byte boundaries)
  98. */
  99. unsigned extra = (( event->xSizeMinus1 + LpEventAlign ) /
  100. LpEventAlign ) - 1;
  101. switch ( extra ) {
  102. case 3:
  103. ((struct HvLpEvent*)((char*)event+3*LpEventAlign))->xFlags.xValid=0;
  104. case 2:
  105. ((struct HvLpEvent*)((char*)event+2*LpEventAlign))->xFlags.xValid=0;
  106. case 1:
  107. ((struct HvLpEvent*)((char*)event+1*LpEventAlign))->xFlags.xValid=0;
  108. case 0:
  109. ;
  110. }
  111. mb();
  112. event->xFlags.xValid = 0;
  113. }
  114. unsigned ItLpQueue_process(struct pt_regs *regs)
  115. {
  116. unsigned numIntsProcessed = 0;
  117. struct HvLpEvent * nextLpEvent;
  118. /* If we have recursed, just return */
  119. if ( !set_inUse() )
  120. return 0;
  121. if (ItLpQueueInProcess == 0)
  122. ItLpQueueInProcess = 1;
  123. else
  124. BUG();
  125. for (;;) {
  126. nextLpEvent = ItLpQueue_getNextLpEvent();
  127. if ( nextLpEvent ) {
  128. /* Count events to return to caller
  129. * and count processed events in xItLpQueue
  130. */
  131. ++numIntsProcessed;
  132. xItLpQueue.xLpIntCount++;
  133. /* Call appropriate handler here, passing
  134. * a pointer to the LpEvent. The handler
  135. * must make a copy of the LpEvent if it
  136. * needs it in a bottom half. (perhaps for
  137. * an ACK)
  138. *
  139. * Handlers are responsible for ACK processing
  140. *
  141. * The Hypervisor guarantees that LpEvents will
  142. * only be delivered with types that we have
  143. * registered for, so no type check is necessary
  144. * here!
  145. */
  146. if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes )
  147. xItLpQueue.xLpIntCountByType[nextLpEvent->xType]++;
  148. if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
  149. lpEventHandler[nextLpEvent->xType] )
  150. lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
  151. else
  152. printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType );
  153. ItLpQueue_clearValid( nextLpEvent );
  154. } else if ( xItLpQueue.xPlicOverflowIntPending )
  155. /*
  156. * No more valid events. If overflow events are
  157. * pending process them
  158. */
  159. HvCallEvent_getOverflowLpEvents( xItLpQueue.xIndex);
  160. else
  161. break;
  162. }
  163. ItLpQueueInProcess = 0;
  164. mb();
  165. clear_inUse();
  166. get_paca()->lpevent_count += numIntsProcessed;
  167. return numIntsProcessed;
  168. }
  169. static int set_spread_lpevents(char *str)
  170. {
  171. unsigned long val = simple_strtoul(str, NULL, 0);
  172. /*
  173. * The parameter is the number of processors to share in processing
  174. * lp events.
  175. */
  176. if (( val > 0) && (val <= NR_CPUS)) {
  177. spread_lpevents = val;
  178. printk("lpevent processing spread over %ld processors\n", val);
  179. } else {
  180. printk("invalid spread_lpevents %ld\n", val);
  181. }
  182. return 1;
  183. }
  184. __setup("spread_lpevents=", set_spread_lpevents);
  185. void setup_hvlpevent_queue(void)
  186. {
  187. void *eventStack;
  188. /*
  189. * Allocate a page for the Event Stack. The Hypervisor needs the
  190. * absolute real address, so we subtract out the KERNELBASE and add
  191. * in the absolute real address of the kernel load area.
  192. */
  193. eventStack = alloc_bootmem_pages(LpEventStackSize);
  194. memset(eventStack, 0, LpEventStackSize);
  195. /* Invoke the hypervisor to initialize the event stack */
  196. HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
  197. xItLpQueue.xSlicEventStackPtr = (char *)eventStack;
  198. xItLpQueue.xSlicCurEventPtr = (char *)eventStack;
  199. xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack +
  200. (LpEventStackSize - LpEventMaxSize);
  201. xItLpQueue.xIndex = 0;
  202. }
  203. static int proc_lpevents_show(struct seq_file *m, void *v)
  204. {
  205. unsigned int i;
  206. seq_printf(m, "LpEventQueue 0\n");
  207. seq_printf(m, " events processed:\t%lu\n",
  208. (unsigned long)xItLpQueue.xLpIntCount);
  209. for (i = 0; i < 9; ++i)
  210. seq_printf(m, " %s %10lu\n", event_types[i],
  211. (unsigned long)xItLpQueue.xLpIntCountByType[i]);
  212. seq_printf(m, "\n events processed by processor:\n");
  213. for_each_online_cpu(i)
  214. seq_printf(m, " CPU%02d %10u\n", i, paca[i].lpevent_count);
  215. return 0;
  216. }
  217. static int proc_lpevents_open(struct inode *inode, struct file *file)
  218. {
  219. return single_open(file, proc_lpevents_show, NULL);
  220. }
  221. static struct file_operations proc_lpevents_operations = {
  222. .open = proc_lpevents_open,
  223. .read = seq_read,
  224. .llseek = seq_lseek,
  225. .release = single_release,
  226. };
  227. static int __init proc_lpevents_init(void)
  228. {
  229. struct proc_dir_entry *e;
  230. e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
  231. if (e)
  232. e->proc_fops = &proc_lpevents_operations;
  233. return 0;
  234. }
  235. __initcall(proc_lpevents_init);