mips-atomic.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
  7. * Copyright (C) 1996 by Paul M. Antoine
  8. * Copyright (C) 1999 Silicon Graphics
  9. * Copyright (C) 2000 MIPS Technologies, Inc.
  10. */
  11. #include <asm/irqflags.h>
  12. #include <asm/hazards.h>
  13. #include <linux/compiler.h>
  14. #include <linux/preempt.h>
  15. #include <linux/export.h>
  16. #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
  17. /*
  18. * For cli() we have to insert nops to make sure that the new value
  19. * has actually arrived in the status register before the end of this
  20. * macro.
  21. * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
  22. * no nops at all.
  23. */
  24. /*
  25. * For TX49, operating only IE bit is not enough.
  26. *
  27. * If mfc0 $12 follows store and the mfc0 is last instruction of a
  28. * page and fetching the next instruction causes TLB miss, the result
  29. * of the mfc0 might wrongly contain EXL bit.
  30. *
  31. * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
  32. *
  33. * Workaround: mask EXL bit of the result or place a nop before mfc0.
  34. */
  35. __asm__(
  36. " .macro arch_local_irq_disable\n"
  37. " .set push \n"
  38. " .set noat \n"
  39. #ifdef CONFIG_MIPS_MT_SMTC
  40. " mfc0 $1, $2, 1 \n"
  41. " ori $1, 0x400 \n"
  42. " .set noreorder \n"
  43. " mtc0 $1, $2, 1 \n"
  44. #elif defined(CONFIG_CPU_MIPSR2)
  45. /* see irqflags.h for inline function */
  46. #else
  47. " mfc0 $1,$12 \n"
  48. " ori $1,0x1f \n"
  49. " xori $1,0x1f \n"
  50. " .set noreorder \n"
  51. " mtc0 $1,$12 \n"
  52. #endif
  53. " irq_disable_hazard \n"
  54. " .set pop \n"
  55. " .endm \n");
  56. notrace void arch_local_irq_disable(void)
  57. {
  58. preempt_disable();
  59. __asm__ __volatile__(
  60. "arch_local_irq_disable"
  61. : /* no outputs */
  62. : /* no inputs */
  63. : "memory");
  64. preempt_enable();
  65. }
  66. EXPORT_SYMBOL(arch_local_irq_disable);
  67. __asm__(
  68. " .macro arch_local_irq_save result \n"
  69. " .set push \n"
  70. " .set reorder \n"
  71. " .set noat \n"
  72. #ifdef CONFIG_MIPS_MT_SMTC
  73. " mfc0 \\result, $2, 1 \n"
  74. " ori $1, \\result, 0x400 \n"
  75. " .set noreorder \n"
  76. " mtc0 $1, $2, 1 \n"
  77. " andi \\result, \\result, 0x400 \n"
  78. #elif defined(CONFIG_CPU_MIPSR2)
  79. /* see irqflags.h for inline function */
  80. #else
  81. " mfc0 \\result, $12 \n"
  82. " ori $1, \\result, 0x1f \n"
  83. " xori $1, 0x1f \n"
  84. " .set noreorder \n"
  85. " mtc0 $1, $12 \n"
  86. #endif
  87. " irq_disable_hazard \n"
  88. " .set pop \n"
  89. " .endm \n");
  90. notrace unsigned long arch_local_irq_save(void)
  91. {
  92. unsigned long flags;
  93. preempt_disable();
  94. asm volatile("arch_local_irq_save\t%0"
  95. : "=r" (flags)
  96. : /* no inputs */
  97. : "memory");
  98. preempt_enable();
  99. return flags;
  100. }
  101. EXPORT_SYMBOL(arch_local_irq_save);
  102. __asm__(
  103. " .macro arch_local_irq_restore flags \n"
  104. " .set push \n"
  105. " .set noreorder \n"
  106. " .set noat \n"
  107. #ifdef CONFIG_MIPS_MT_SMTC
  108. "mfc0 $1, $2, 1 \n"
  109. "andi \\flags, 0x400 \n"
  110. "ori $1, 0x400 \n"
  111. "xori $1, 0x400 \n"
  112. "or \\flags, $1 \n"
  113. "mtc0 \\flags, $2, 1 \n"
  114. #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
  115. /* see irqflags.h for inline function */
  116. #elif defined(CONFIG_CPU_MIPSR2)
  117. /* see irqflags.h for inline function */
  118. #else
  119. " mfc0 $1, $12 \n"
  120. " andi \\flags, 1 \n"
  121. " ori $1, 0x1f \n"
  122. " xori $1, 0x1f \n"
  123. " or \\flags, $1 \n"
  124. " mtc0 \\flags, $12 \n"
  125. #endif
  126. " irq_disable_hazard \n"
  127. " .set pop \n"
  128. " .endm \n");
  129. notrace void arch_local_irq_restore(unsigned long flags)
  130. {
  131. unsigned long __tmp1;
  132. #ifdef CONFIG_MIPS_MT_SMTC
  133. /*
  134. * SMTC kernel needs to do a software replay of queued
  135. * IPIs, at the cost of branch and call overhead on each
  136. * local_irq_restore()
  137. */
  138. if (unlikely(!(flags & 0x0400)))
  139. smtc_ipi_replay();
  140. #endif
  141. preempt_disable();
  142. __asm__ __volatile__(
  143. "arch_local_irq_restore\t%0"
  144. : "=r" (__tmp1)
  145. : "0" (flags)
  146. : "memory");
  147. preempt_enable();
  148. }
  149. EXPORT_SYMBOL(arch_local_irq_restore);
  150. notrace void __arch_local_irq_restore(unsigned long flags)
  151. {
  152. unsigned long __tmp1;
  153. preempt_disable();
  154. __asm__ __volatile__(
  155. "arch_local_irq_restore\t%0"
  156. : "=r" (__tmp1)
  157. : "0" (flags)
  158. : "memory");
  159. preempt_enable();
  160. }
  161. EXPORT_SYMBOL(__arch_local_irq_restore);
  162. #endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */