acpi.h 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. /*
  2. * Copyright (C) 1999 VA Linux Systems
  3. * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
  4. * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
  5. * Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. *
  7. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  22. *
  23. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  24. */
  25. #ifndef _ASM_ACPI_H
  26. #define _ASM_ACPI_H
  27. #ifdef __KERNEL__
  28. #include <acpi/pdc_intel.h>
  29. #include <linux/init.h>
  30. #include <linux/numa.h>
  31. #include <asm/numa.h>
  32. #define COMPILER_DEPENDENT_INT64 long
  33. #define COMPILER_DEPENDENT_UINT64 unsigned long
  34. /*
  35. * Calling conventions:
  36. *
  37. * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
  38. * ACPI_EXTERNAL_XFACE - External ACPI interfaces
  39. * ACPI_INTERNAL_XFACE - Internal ACPI interfaces
  40. * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
  41. */
  42. #define ACPI_SYSTEM_XFACE
  43. #define ACPI_EXTERNAL_XFACE
  44. #define ACPI_INTERNAL_XFACE
  45. #define ACPI_INTERNAL_VAR_XFACE
  46. /* Asm macros */
  47. #define ACPI_FLUSH_CPU_CACHE()
  48. static inline int
  49. ia64_acpi_acquire_global_lock (unsigned int *lock)
  50. {
  51. unsigned int old, new, val;
  52. do {
  53. old = *lock;
  54. new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
  55. val = ia64_cmpxchg4_acq(lock, new, old);
  56. } while (unlikely (val != old));
  57. return (new < 3) ? -1 : 0;
  58. }
  59. static inline int
  60. ia64_acpi_release_global_lock (unsigned int *lock)
  61. {
  62. unsigned int old, new, val;
  63. do {
  64. old = *lock;
  65. new = old & ~0x3;
  66. val = ia64_cmpxchg4_acq(lock, new, old);
  67. } while (unlikely (val != old));
  68. return old & 0x1;
  69. }
  70. #define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
  71. ((Acq) = ia64_acpi_acquire_global_lock(&facs->global_lock))
  72. #define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
  73. ((Acq) = ia64_acpi_release_global_lock(&facs->global_lock))
  74. #ifdef CONFIG_ACPI
  75. #define acpi_disabled 0 /* ACPI always enabled on IA64 */
  76. #define acpi_noirq 0 /* ACPI always enabled on IA64 */
  77. #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
  78. #define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
  79. #endif
  80. #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
  81. static inline void disable_acpi(void) { }
  82. static inline void pci_acpi_crs_quirks(void) { }
  83. #ifdef CONFIG_IA64_GENERIC
  84. const char *acpi_get_sysname (void);
  85. #else
  86. static inline const char *acpi_get_sysname (void)
  87. {
  88. # if defined (CONFIG_IA64_HP_SIM)
  89. return "hpsim";
  90. # elif defined (CONFIG_IA64_HP_ZX1)
  91. return "hpzx1";
  92. # elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
  93. return "hpzx1_swiotlb";
  94. # elif defined (CONFIG_IA64_SGI_SN2)
  95. return "sn2";
  96. # elif defined (CONFIG_IA64_SGI_UV)
  97. return "uv";
  98. # elif defined (CONFIG_IA64_DIG)
  99. return "dig";
  100. # elif defined (CONFIG_IA64_XEN_GUEST)
  101. return "xen";
  102. # elif defined(CONFIG_IA64_DIG_VTD)
  103. return "dig_vtd";
  104. # else
  105. # error Unknown platform. Fix acpi.c.
  106. # endif
  107. }
  108. #endif
  109. int acpi_request_vector (u32 int_type);
  110. int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
  111. /* Low-level suspend routine. */
  112. extern int acpi_suspend_lowlevel(void);
  113. extern unsigned long acpi_wakeup_address;
  114. /*
  115. * Record the cpei override flag and current logical cpu. This is
  116. * useful for CPU removal.
  117. */
  118. extern unsigned int can_cpei_retarget(void);
  119. extern unsigned int is_cpu_cpei_target(unsigned int cpu);
  120. extern void set_cpei_target_cpu(unsigned int cpu);
  121. extern unsigned int get_cpei_target_cpu(void);
  122. extern void prefill_possible_map(void);
  123. #ifdef CONFIG_ACPI_HOTPLUG_CPU
  124. extern int additional_cpus;
  125. #else
  126. #define additional_cpus 0
  127. #endif
  128. #ifdef CONFIG_ACPI_NUMA
  129. #if MAX_NUMNODES > 256
  130. #define MAX_PXM_DOMAINS MAX_NUMNODES
  131. #else
  132. #define MAX_PXM_DOMAINS (256)
  133. #endif
  134. extern int pxm_to_nid_map[MAX_PXM_DOMAINS];
  135. extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
  136. #endif
  137. static inline bool arch_has_acpi_pdc(void) { return true; }
  138. static inline void arch_acpi_set_pdc_bits(u32 *buf)
  139. {
  140. buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
  141. }
  142. #define acpi_unlazy_tlb(x)
  143. #ifdef CONFIG_ACPI_NUMA
  144. extern cpumask_t early_cpu_possible_map;
  145. #define for_each_possible_early_cpu(cpu) \
  146. for_each_cpu_mask((cpu), early_cpu_possible_map)
  147. static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
  148. {
  149. int low_cpu, high_cpu;
  150. int cpu;
  151. int next_nid = 0;
  152. low_cpu = cpus_weight(early_cpu_possible_map);
  153. high_cpu = max(low_cpu, min_cpus);
  154. high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
  155. for (cpu = low_cpu; cpu < high_cpu; cpu++) {
  156. cpu_set(cpu, early_cpu_possible_map);
  157. if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
  158. node_cpuid[cpu].nid = next_nid;
  159. next_nid++;
  160. if (next_nid >= num_online_nodes())
  161. next_nid = 0;
  162. }
  163. }
  164. }
  165. #endif /* CONFIG_ACPI_NUMA */
  166. #endif /*__KERNEL__*/
  167. #endif /*_ASM_ACPI_H*/