ItLpQueue.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. /*
  2. * ItLpQueue.c
  3. * Copyright (C) 2001 Mike Corrigan IBM Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. */
  10. #include <linux/stddef.h>
  11. #include <linux/kernel.h>
  12. #include <linux/sched.h>
  13. #include <asm/system.h>
  14. #include <asm/paca.h>
  15. #include <asm/iSeries/ItLpQueue.h>
  16. #include <asm/iSeries/HvLpEvent.h>
  17. #include <asm/iSeries/HvCallEvent.h>
  18. static __inline__ int set_inUse( struct ItLpQueue * lpQueue )
  19. {
  20. int t;
  21. u32 * inUseP = &(lpQueue->xInUseWord);
  22. __asm__ __volatile__("\n\
  23. 1: lwarx %0,0,%2 \n\
  24. cmpwi 0,%0,0 \n\
  25. li %0,0 \n\
  26. bne- 2f \n\
  27. addi %0,%0,1 \n\
  28. stwcx. %0,0,%2 \n\
  29. bne- 1b \n\
  30. 2: eieio"
  31. : "=&r" (t), "=m" (lpQueue->xInUseWord)
  32. : "r" (inUseP), "m" (lpQueue->xInUseWord)
  33. : "cc");
  34. return t;
  35. }
  36. static __inline__ void clear_inUse( struct ItLpQueue * lpQueue )
  37. {
  38. lpQueue->xInUseWord = 0;
  39. }
  40. /* Array of LpEvent handler functions */
  41. extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
  42. unsigned long ItLpQueueInProcess = 0;
  43. struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * lpQueue )
  44. {
  45. struct HvLpEvent * nextLpEvent =
  46. (struct HvLpEvent *)lpQueue->xSlicCurEventPtr;
  47. if ( nextLpEvent->xFlags.xValid ) {
  48. /* rmb() needed only for weakly consistent machines (regatta) */
  49. rmb();
  50. /* Set pointer to next potential event */
  51. lpQueue->xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 +
  52. LpEventAlign ) /
  53. LpEventAlign ) *
  54. LpEventAlign;
  55. /* Wrap to beginning if no room at end */
  56. if (lpQueue->xSlicCurEventPtr > lpQueue->xSlicLastValidEventPtr)
  57. lpQueue->xSlicCurEventPtr = lpQueue->xSlicEventStackPtr;
  58. }
  59. else
  60. nextLpEvent = NULL;
  61. return nextLpEvent;
  62. }
  63. unsigned long spread_lpevents = 1;
  64. int ItLpQueue_isLpIntPending( struct ItLpQueue * lpQueue )
  65. {
  66. struct HvLpEvent *next_event;
  67. if (smp_processor_id() >= spread_lpevents)
  68. return 0;
  69. next_event = (struct HvLpEvent *)lpQueue->xSlicCurEventPtr;
  70. return next_event->xFlags.xValid | lpQueue->xPlicOverflowIntPending;
  71. }
  72. void ItLpQueue_clearValid( struct HvLpEvent * event )
  73. {
  74. /* Clear the valid bit of the event
  75. * Also clear bits within this event that might
  76. * look like valid bits (on 64-byte boundaries)
  77. */
  78. unsigned extra = (( event->xSizeMinus1 + LpEventAlign ) /
  79. LpEventAlign ) - 1;
  80. switch ( extra ) {
  81. case 3:
  82. ((struct HvLpEvent*)((char*)event+3*LpEventAlign))->xFlags.xValid=0;
  83. case 2:
  84. ((struct HvLpEvent*)((char*)event+2*LpEventAlign))->xFlags.xValid=0;
  85. case 1:
  86. ((struct HvLpEvent*)((char*)event+1*LpEventAlign))->xFlags.xValid=0;
  87. case 0:
  88. ;
  89. }
  90. mb();
  91. event->xFlags.xValid = 0;
  92. }
  93. unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs )
  94. {
  95. unsigned numIntsProcessed = 0;
  96. struct HvLpEvent * nextLpEvent;
  97. /* If we have recursed, just return */
  98. if ( !set_inUse( lpQueue ) )
  99. return 0;
  100. if (ItLpQueueInProcess == 0)
  101. ItLpQueueInProcess = 1;
  102. else
  103. BUG();
  104. for (;;) {
  105. nextLpEvent = ItLpQueue_getNextLpEvent( lpQueue );
  106. if ( nextLpEvent ) {
  107. /* Count events to return to caller
  108. * and count processed events in lpQueue
  109. */
  110. ++numIntsProcessed;
  111. lpQueue->xLpIntCount++;
  112. /* Call appropriate handler here, passing
  113. * a pointer to the LpEvent. The handler
  114. * must make a copy of the LpEvent if it
  115. * needs it in a bottom half. (perhaps for
  116. * an ACK)
  117. *
  118. * Handlers are responsible for ACK processing
  119. *
  120. * The Hypervisor guarantees that LpEvents will
  121. * only be delivered with types that we have
  122. * registered for, so no type check is necessary
  123. * here!
  124. */
  125. if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes )
  126. lpQueue->xLpIntCountByType[nextLpEvent->xType]++;
  127. if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
  128. lpEventHandler[nextLpEvent->xType] )
  129. lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
  130. else
  131. printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType );
  132. ItLpQueue_clearValid( nextLpEvent );
  133. } else if ( lpQueue->xPlicOverflowIntPending )
  134. /*
  135. * No more valid events. If overflow events are
  136. * pending process them
  137. */
  138. HvCallEvent_getOverflowLpEvents( lpQueue->xIndex);
  139. else
  140. break;
  141. }
  142. ItLpQueueInProcess = 0;
  143. mb();
  144. clear_inUse( lpQueue );
  145. get_paca()->lpevent_count += numIntsProcessed;
  146. return numIntsProcessed;
  147. }