system.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. /*
  2. * linux/arch/unicore32/include/asm/system.h
  3. *
  4. * Code specific to PKUnity SoC and UniCore ISA
  5. *
  6. * Copyright (C) 2001-2010 GUAN Xue-tao
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #ifndef __UNICORE_SYSTEM_H__
  13. #define __UNICORE_SYSTEM_H__
  14. #ifdef __KERNEL__
  15. /*
  16. * CR1 bits (CP#0 CR1)
  17. */
  18. #define CR_M (1 << 0) /* MMU enable */
  19. #define CR_A (1 << 1) /* Alignment abort enable */
  20. #define CR_D (1 << 2) /* Dcache enable */
  21. #define CR_I (1 << 3) /* Icache enable */
  22. #define CR_B (1 << 4) /* Dcache write mechanism: write back */
  23. #define CR_T (1 << 5) /* Burst enable */
  24. #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
  25. #ifndef __ASSEMBLY__
  26. #include <linux/linkage.h>
  27. #include <linux/irqflags.h>
  28. struct thread_info;
  29. struct task_struct;
  30. struct pt_regs;
  31. void die(const char *msg, struct pt_regs *regs, int err);
  32. struct siginfo;
  33. void uc32_notify_die(const char *str, struct pt_regs *regs,
  34. struct siginfo *info, unsigned long err, unsigned long trap);
  35. void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
  36. struct pt_regs *),
  37. int sig, int code, const char *name);
  38. #define xchg(ptr, x) \
  39. ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
  40. extern asmlinkage void __backtrace(void);
  41. extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
  42. struct mm_struct;
  43. extern void show_pte(struct mm_struct *mm, unsigned long addr);
  44. extern void __show_regs(struct pt_regs *);
  45. extern int cpu_architecture(void);
  46. extern void cpu_init(void);
  47. #define vectors_high() (cr_alignment & CR_V)
  48. #define isb() __asm__ __volatile__ ("" : : : "memory")
  49. #define dsb() __asm__ __volatile__ ("" : : : "memory")
  50. #define dmb() __asm__ __volatile__ ("" : : : "memory")
  51. #define mb() barrier()
  52. #define rmb() barrier()
  53. #define wmb() barrier()
  54. #define smp_mb() barrier()
  55. #define smp_rmb() barrier()
  56. #define smp_wmb() barrier()
  57. #define read_barrier_depends() do { } while (0)
  58. #define smp_read_barrier_depends() do { } while (0)
  59. #define set_mb(var, value) do { var = value; smp_mb(); } while (0)
  60. #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
  61. extern unsigned long cr_no_alignment; /* defined in entry-unicore.S */
  62. extern unsigned long cr_alignment; /* defined in entry-unicore.S */
  63. static inline unsigned int get_cr(void)
  64. {
  65. unsigned int val;
  66. asm("movc %0, p0.c1, #0" : "=r" (val) : : "cc");
  67. return val;
  68. }
  69. static inline void set_cr(unsigned int val)
  70. {
  71. asm volatile("movc p0.c1, %0, #0 @set CR"
  72. : : "r" (val) : "cc");
  73. isb();
  74. }
  75. extern void adjust_cr(unsigned long mask, unsigned long set);
  76. /*
  77. * switch_to(prev, next) should switch from task `prev' to `next'
  78. * `prev' will never be the same as `next'. schedule() itself
  79. * contains the memory barrier to tell GCC not to cache `current'.
  80. */
  81. extern struct task_struct *__switch_to(struct task_struct *,
  82. struct thread_info *, struct thread_info *);
  83. extern void panic(const char *fmt, ...);
  84. #define switch_to(prev, next, last) \
  85. do { \
  86. last = __switch_to(prev, \
  87. task_thread_info(prev), task_thread_info(next)); \
  88. } while (0)
  89. static inline unsigned long
  90. __xchg(unsigned long x, volatile void *ptr, int size)
  91. {
  92. unsigned long ret;
  93. switch (size) {
  94. case 1:
  95. asm volatile("@ __xchg1\n"
  96. " swapb %0, %1, [%2]"
  97. : "=&r" (ret)
  98. : "r" (x), "r" (ptr)
  99. : "memory", "cc");
  100. break;
  101. case 4:
  102. asm volatile("@ __xchg4\n"
  103. " swapw %0, %1, [%2]"
  104. : "=&r" (ret)
  105. : "r" (x), "r" (ptr)
  106. : "memory", "cc");
  107. break;
  108. default:
  109. panic("xchg: bad data size: ptr 0x%p, size %d\n",
  110. ptr, size);
  111. }
  112. return ret;
  113. }
  114. #include <asm-generic/cmpxchg-local.h>
  115. /*
  116. * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
  117. * them available.
  118. */
  119. #define cmpxchg_local(ptr, o, n) \
  120. ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), \
  121. (unsigned long)(o), (unsigned long)(n), sizeof(*(ptr))))
  122. #define cmpxchg64_local(ptr, o, n) \
  123. __cmpxchg64_local_generic((ptr), (o), (n))
  124. #include <asm-generic/cmpxchg.h>
  125. #endif /* __ASSEMBLY__ */
  126. #define arch_align_stack(x) (x)
  127. #endif /* __KERNEL__ */
  128. #endif