vsmp_64.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. /*
  2. * vSMPowered(tm) systems specific initialization
  3. * Copyright (C) 2005 ScaleMP Inc.
  4. *
  5. * Use of this code is subject to the terms and conditions of the
  6. * GNU general public license version 2. See "COPYING" or
  7. * http://www.gnu.org/licenses/gpl.html
  8. *
  9. * Ravikiran Thirumalai <kiran@scalemp.com>,
  10. * Shai Fultheim <shai@scalemp.com>
  11. * Paravirt ops integration: Glauber de Oliveira Costa <gcosta@redhat.com>,
  12. * Ravikiran Thirumalai <kiran@scalemp.com>
  13. */
  14. #include <linux/init.h>
  15. #include <linux/pci_ids.h>
  16. #include <linux/pci_regs.h>
  17. #include <linux/smp.h>
  18. #include <linux/irq.h>
  19. #include <asm/apic.h>
  20. #include <asm/pci-direct.h>
  21. #include <asm/io.h>
  22. #include <asm/paravirt.h>
  23. #include <asm/setup.h>
  24. #define TOPOLOGY_REGISTER_OFFSET 0x10
  25. #if defined CONFIG_PCI && defined CONFIG_PARAVIRT
  26. /*
  27. * Interrupt control on vSMPowered systems:
  28. * ~AC is a shadow of IF. If IF is 'on' AC should be 'off'
  29. * and vice versa.
  30. */
  31. static unsigned long vsmp_save_fl(void)
  32. {
  33. unsigned long flags = native_save_fl();
  34. if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC))
  35. flags &= ~X86_EFLAGS_IF;
  36. return flags;
  37. }
  38. PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
  39. static void vsmp_restore_fl(unsigned long flags)
  40. {
  41. if (flags & X86_EFLAGS_IF)
  42. flags &= ~X86_EFLAGS_AC;
  43. else
  44. flags |= X86_EFLAGS_AC;
  45. native_restore_fl(flags);
  46. }
  47. PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
  48. static void vsmp_irq_disable(void)
  49. {
  50. unsigned long flags = native_save_fl();
  51. native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
  52. }
  53. PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
  54. static void vsmp_irq_enable(void)
  55. {
  56. unsigned long flags = native_save_fl();
  57. native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
  58. }
  59. PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
  60. static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
  61. unsigned long addr, unsigned len)
  62. {
  63. switch (type) {
  64. case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
  65. case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
  66. case PARAVIRT_PATCH(pv_irq_ops.save_fl):
  67. case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
  68. return paravirt_patch_default(type, clobbers, ibuf, addr, len);
  69. default:
  70. return native_patch(type, clobbers, ibuf, addr, len);
  71. }
  72. }
  73. static void __init set_vsmp_pv_ops(void)
  74. {
  75. void __iomem *address;
  76. unsigned int cap, ctl, cfg;
  77. /* set vSMP magic bits to indicate vSMP capable kernel */
  78. cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0);
  79. address = early_ioremap(cfg, 8);
  80. cap = readl(address);
  81. ctl = readl(address + 4);
  82. printk(KERN_INFO "vSMP CTL: capabilities:0x%08x control:0x%08x\n",
  83. cap, ctl);
  84. /* If possible, let the vSMP foundation route the interrupt optimally */
  85. #ifdef CONFIG_SMP
  86. if (cap & ctl & BIT(8)) {
  87. ctl &= ~BIT(8);
  88. #ifdef CONFIG_PROC_FS
  89. /* Don't let users change irq affinity via procfs */
  90. no_irq_affinity = 1;
  91. #endif
  92. }
  93. #endif
  94. if (cap & ctl & (1 << 4)) {
  95. /* Setup irq ops and turn on vSMP IRQ fastpath handling */
  96. pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
  97. pv_irq_ops.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
  98. pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
  99. pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
  100. pv_init_ops.patch = vsmp_patch;
  101. ctl &= ~(1 << 4);
  102. }
  103. writel(ctl, address + 4);
  104. ctl = readl(address + 4);
  105. pr_info("vSMP CTL: control set to:0x%08x\n", ctl);
  106. early_iounmap(address, 8);
  107. }
  108. #else
  109. static void __init set_vsmp_pv_ops(void)
  110. {
  111. }
  112. #endif
  113. #ifdef CONFIG_PCI
  114. static int is_vsmp = -1;
  115. static void __init detect_vsmp_box(void)
  116. {
  117. is_vsmp = 0;
  118. if (!early_pci_allowed())
  119. return;
  120. /* Check if we are running on a ScaleMP vSMPowered box */
  121. if (read_pci_config(0, 0x1f, 0, PCI_VENDOR_ID) ==
  122. (PCI_VENDOR_ID_SCALEMP | (PCI_DEVICE_ID_SCALEMP_VSMP_CTL << 16)))
  123. is_vsmp = 1;
  124. }
  125. int is_vsmp_box(void)
  126. {
  127. if (is_vsmp != -1)
  128. return is_vsmp;
  129. else {
  130. WARN_ON_ONCE(1);
  131. return 0;
  132. }
  133. }
  134. #else
  135. static void __init detect_vsmp_box(void)
  136. {
  137. }
  138. int is_vsmp_box(void)
  139. {
  140. return 0;
  141. }
  142. #endif
  143. static void __init vsmp_cap_cpus(void)
  144. {
  145. #if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP)
  146. void __iomem *address;
  147. unsigned int cfg, topology, node_shift, maxcpus;
  148. /*
  149. * CONFIG_X86_VSMP is not configured, so limit the number CPUs to the
  150. * ones present in the first board, unless explicitly overridden by
  151. * setup_max_cpus
  152. */
  153. if (setup_max_cpus != NR_CPUS)
  154. return;
  155. /* Read the vSMP Foundation topology register */
  156. cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0);
  157. address = early_ioremap(cfg + TOPOLOGY_REGISTER_OFFSET, 4);
  158. if (WARN_ON(!address))
  159. return;
  160. topology = readl(address);
  161. node_shift = (topology >> 16) & 0x7;
  162. if (!node_shift)
  163. /* The value 0 should be decoded as 8 */
  164. node_shift = 8;
  165. maxcpus = (topology & ((1 << node_shift) - 1)) + 1;
  166. pr_info("vSMP CTL: Capping CPUs to %d (CONFIG_X86_VSMP is unset)\n",
  167. maxcpus);
  168. setup_max_cpus = maxcpus;
  169. early_iounmap(address, 4);
  170. #endif
  171. }
  172. static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
  173. {
  174. return hard_smp_processor_id() >> index_msb;
  175. }
  176. /*
  177. * In vSMP, all cpus should be capable of handling interrupts, regardless of
  178. * the APIC used.
  179. */
  180. static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask,
  181. const struct cpumask *mask)
  182. {
  183. cpumask_setall(retmask);
  184. }
  185. static void vsmp_apic_post_init(void)
  186. {
  187. /* need to update phys_pkg_id */
  188. apic->phys_pkg_id = apicid_phys_pkg_id;
  189. apic->vector_allocation_domain = fill_vector_allocation_domain;
  190. }
  191. void __init vsmp_init(void)
  192. {
  193. detect_vsmp_box();
  194. if (!is_vsmp_box())
  195. return;
  196. x86_platform.apic_post_init = vsmp_apic_post_init;
  197. vsmp_cap_cpus();
  198. set_vsmp_pv_ops();
  199. return;
  200. }