system.h 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #ifndef _ASM_TILE_SYSTEM_H
  15. #define _ASM_TILE_SYSTEM_H
  16. #ifndef __ASSEMBLY__
  17. #include <linux/types.h>
  18. #include <linux/irqflags.h>
  19. /* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */
  20. #include <asm/ptrace.h>
  21. #include <arch/chip.h>
  22. #include <arch/sim_def.h>
  23. #include <arch/spr_def.h>
  24. /*
  25. * read_barrier_depends - Flush all pending reads that subsequents reads
  26. * depend on.
  27. *
  28. * No data-dependent reads from memory-like regions are ever reordered
  29. * over this barrier. All reads preceding this primitive are guaranteed
  30. * to access memory (but not necessarily other CPUs' caches) before any
  31. * reads following this primitive that depend on the data return by
  32. * any of the preceding reads. This primitive is much lighter weight than
  33. * rmb() on most CPUs, and is never heavier weight than is
  34. * rmb().
  35. *
  36. * These ordering constraints are respected by both the local CPU
  37. * and the compiler.
  38. *
  39. * Ordering is not guaranteed by anything other than these primitives,
  40. * not even by data dependencies. See the documentation for
  41. * memory_barrier() for examples and URLs to more information.
  42. *
  43. * For example, the following code would force ordering (the initial
  44. * value of "a" is zero, "b" is one, and "p" is "&a"):
  45. *
  46. * <programlisting>
  47. * CPU 0 CPU 1
  48. *
  49. * b = 2;
  50. * memory_barrier();
  51. * p = &b; q = p;
  52. * read_barrier_depends();
  53. * d = *q;
  54. * </programlisting>
  55. *
  56. * because the read of "*q" depends on the read of "p" and these
  57. * two reads are separated by a read_barrier_depends(). However,
  58. * the following code, with the same initial values for "a" and "b":
  59. *
  60. * <programlisting>
  61. * CPU 0 CPU 1
  62. *
  63. * a = 2;
  64. * memory_barrier();
  65. * b = 3; y = b;
  66. * read_barrier_depends();
  67. * x = a;
  68. * </programlisting>
  69. *
  70. * does not enforce ordering, since there is no data dependency between
  71. * the read of "a" and the read of "b". Therefore, on some CPUs, such
  72. * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
  73. * in cases like this where there are no data dependencies.
  74. */
  75. #define read_barrier_depends() do { } while (0)
  76. #define __sync() __insn_mf()
  77. #if CHIP_HAS_SPLIT_CYCLE()
  78. #define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW)
  79. #else
  80. #define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */
  81. #endif
  82. /* Fence to guarantee visibility of stores to incoherent memory. */
  83. static inline void
  84. mb_incoherent(void)
  85. {
  86. __insn_mf();
  87. #if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
  88. {
  89. int __mb_incoherent(void);
  90. #if CHIP_HAS_TILE_WRITE_PENDING()
  91. const unsigned long WRITE_TIMEOUT_CYCLES = 400;
  92. unsigned long start = get_cycles_low();
  93. do {
  94. if (__insn_mfspr(SPR_TILE_WRITE_PENDING) == 0)
  95. return;
  96. } while ((get_cycles_low() - start) < WRITE_TIMEOUT_CYCLES);
  97. #endif /* CHIP_HAS_TILE_WRITE_PENDING() */
  98. (void) __mb_incoherent();
  99. }
  100. #endif /* CHIP_HAS_MF_WAITS_FOR_VICTIMS() */
  101. }
  102. #define fast_wmb() __sync()
  103. #define fast_rmb() __sync()
  104. #define fast_mb() __sync()
  105. #define fast_iob() mb_incoherent()
  106. #define wmb() fast_wmb()
  107. #define rmb() fast_rmb()
  108. #define mb() fast_mb()
  109. #define iob() fast_iob()
  110. #ifdef CONFIG_SMP
  111. #define smp_mb() mb()
  112. #define smp_rmb() rmb()
  113. #define smp_wmb() wmb()
  114. #define smp_read_barrier_depends() read_barrier_depends()
  115. #else
  116. #define smp_mb() barrier()
  117. #define smp_rmb() barrier()
  118. #define smp_wmb() barrier()
  119. #define smp_read_barrier_depends() do { } while (0)
  120. #endif
  121. #define set_mb(var, value) \
  122. do { var = value; mb(); } while (0)
  123. /*
  124. * Pause the DMA engine and static network before task switching.
  125. */
  126. #define prepare_arch_switch(next) _prepare_arch_switch(next)
  127. void _prepare_arch_switch(struct task_struct *next);
  128. /*
  129. * switch_to(n) should switch tasks to task nr n, first
  130. * checking that n isn't the current task, in which case it does nothing.
  131. * The number of callee-saved registers saved on the kernel stack
  132. * is defined here for use in copy_thread() and must agree with __switch_to().
  133. */
  134. #endif /* !__ASSEMBLY__ */
  135. #define CALLEE_SAVED_FIRST_REG 30
  136. #define CALLEE_SAVED_REGS_COUNT 24 /* r30 to r52, plus an empty to align */
  137. #ifndef __ASSEMBLY__
  138. struct task_struct;
  139. #define switch_to(prev, next, last) ((last) = _switch_to((prev), (next)))
  140. extern struct task_struct *_switch_to(struct task_struct *prev,
  141. struct task_struct *next);
  142. /* Helper function for _switch_to(). */
  143. extern struct task_struct *__switch_to(struct task_struct *prev,
  144. struct task_struct *next,
  145. unsigned long new_system_save_1_0);
  146. /* Address that switched-away from tasks are at. */
  147. extern unsigned long get_switch_to_pc(void);
  148. /*
  149. * On SMP systems, when the scheduler does migration-cost autodetection,
  150. * it needs a way to flush as much of the CPU's caches as possible:
  151. *
  152. * TODO: fill this in!
  153. */
  154. static inline void sched_cacheflush(void)
  155. {
  156. }
  157. #define arch_align_stack(x) (x)
  158. /*
  159. * Is the kernel doing fixups of unaligned accesses? If <0, no kernel
  160. * intervention occurs and SIGBUS is delivered with no data address
  161. * info. If 0, the kernel single-steps the instruction to discover
  162. * the data address to provide with the SIGBUS. If 1, the kernel does
  163. * a fixup.
  164. */
  165. extern int unaligned_fixup;
  166. /* Is the kernel printing on each unaligned fixup? */
  167. extern int unaligned_printk;
  168. /* Number of unaligned fixups performed */
  169. extern unsigned int unaligned_fixup_count;
  170. /* Init-time routine to do tile-specific per-cpu setup. */
  171. void setup_cpu(int boot);
  172. /* User-level DMA management functions */
  173. void grant_dma_mpls(void);
  174. void restrict_dma_mpls(void);
  175. #ifdef CONFIG_HARDWALL
  176. /* User-level network management functions */
  177. void reset_network_state(void);
  178. void grant_network_mpls(void);
  179. void restrict_network_mpls(void);
  180. int hardwall_deactivate(struct task_struct *task);
  181. /* Hook hardwall code into changes in affinity. */
  182. #define arch_set_cpus_allowed(p, new_mask) do { \
  183. if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \
  184. hardwall_deactivate(p); \
  185. } while (0)
  186. #endif
  187. /* Invoke the simulator "syscall" mechanism (see arch/tile/kernel/entry.S). */
  188. extern int _sim_syscall(int syscall_num, ...);
  189. #define sim_syscall(syscall_num, ...) \
  190. _sim_syscall(SIM_CONTROL_SYSCALL + \
  191. ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS), \
  192. ## __VA_ARGS__)
  193. /*
  194. * Kernel threads can check to see if they need to migrate their
  195. * stack whenever they return from a context switch; for user
  196. * threads, we defer until they are returning to user-space.
  197. */
  198. #define finish_arch_switch(prev) do { \
  199. if (unlikely((prev)->state == TASK_DEAD)) \
  200. __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \
  201. ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \
  202. __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \
  203. (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \
  204. if (current->mm == NULL && !kstack_hash && \
  205. current_thread_info()->homecache_cpu != smp_processor_id()) \
  206. homecache_migrate_kthread(); \
  207. } while (0)
  208. /* Support function for forking a new task. */
  209. void ret_from_fork(void);
  210. /* Called from ret_from_fork() when a new process starts up. */
  211. struct task_struct *sim_notify_fork(struct task_struct *prev);
  212. #endif /* !__ASSEMBLY__ */
  213. #endif /* _ASM_TILE_SYSTEM_H */