vsmp_64.c 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. /*
  2. * vSMPowered(tm) systems specific initialization
  3. * Copyright (C) 2005 ScaleMP Inc.
  4. *
  5. * Use of this code is subject to the terms and conditions of the
  6. * GNU general public license version 2. See "COPYING" or
  7. * http://www.gnu.org/licenses/gpl.html
  8. *
  9. * Ravikiran Thirumalai <kiran@scalemp.com>,
  10. * Shai Fultheim <shai@scalemp.com>
  11. * Paravirt ops integration: Glauber de Oliveira Costa <gcosta@redhat.com>,
  12. * Ravikiran Thirumalai <kiran@scalemp.com>
  13. */
  14. #include <linux/init.h>
  15. #include <linux/pci_ids.h>
  16. #include <linux/pci_regs.h>
  17. #include <asm/pci-direct.h>
  18. #include <asm/io.h>
  19. #include <asm/paravirt.h>
  20. #if defined CONFIG_PCI && defined CONFIG_PARAVIRT
  21. /*
  22. * Interrupt control on vSMPowered systems:
  23. * ~AC is a shadow of IF. If IF is 'on' AC should be 'off'
  24. * and vice versa.
  25. */
  26. static unsigned long vsmp_save_fl(void)
  27. {
  28. unsigned long flags = native_save_fl();
  29. if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC))
  30. flags &= ~X86_EFLAGS_IF;
  31. return flags;
  32. }
  33. static void vsmp_restore_fl(unsigned long flags)
  34. {
  35. if (flags & X86_EFLAGS_IF)
  36. flags &= ~X86_EFLAGS_AC;
  37. else
  38. flags |= X86_EFLAGS_AC;
  39. native_restore_fl(flags);
  40. }
  41. static void vsmp_irq_disable(void)
  42. {
  43. unsigned long flags = native_save_fl();
  44. native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
  45. }
  46. static void vsmp_irq_enable(void)
  47. {
  48. unsigned long flags = native_save_fl();
  49. native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
  50. }
  51. static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf,
  52. unsigned long addr, unsigned len)
  53. {
  54. switch (type) {
  55. case PARAVIRT_PATCH(pv_irq_ops.irq_enable):
  56. case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
  57. case PARAVIRT_PATCH(pv_irq_ops.save_fl):
  58. case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
  59. return paravirt_patch_default(type, clobbers, ibuf, addr, len);
  60. default:
  61. return native_patch(type, clobbers, ibuf, addr, len);
  62. }
  63. }
  64. static void __init set_vsmp_pv_ops(void)
  65. {
  66. void *address;
  67. unsigned int cap, ctl, cfg;
  68. /* set vSMP magic bits to indicate vSMP capable kernel */
  69. cfg = read_pci_config(0, 0x1f, 0, PCI_BASE_ADDRESS_0);
  70. address = early_ioremap(cfg, 8);
  71. cap = readl(address);
  72. ctl = readl(address + 4);
  73. printk(KERN_INFO "vSMP CTL: capabilities:0x%08x control:0x%08x\n",
  74. cap, ctl);
  75. if (cap & ctl & (1 << 4)) {
  76. /* Setup irq ops and turn on vSMP IRQ fastpath handling */
  77. pv_irq_ops.irq_disable = vsmp_irq_disable;
  78. pv_irq_ops.irq_enable = vsmp_irq_enable;
  79. pv_irq_ops.save_fl = vsmp_save_fl;
  80. pv_irq_ops.restore_fl = vsmp_restore_fl;
  81. pv_init_ops.patch = vsmp_patch;
  82. ctl &= ~(1 << 4);
  83. writel(ctl, address + 4);
  84. ctl = readl(address + 4);
  85. printk(KERN_INFO "vSMP CTL: control set to:0x%08x\n", ctl);
  86. }
  87. early_iounmap(address, 8);
  88. }
  89. #else
  90. static void __init set_vsmp_pv_ops(void)
  91. {
  92. }
  93. #endif
  94. #ifdef CONFIG_PCI
  95. static int is_vsmp = -1;
  96. static void __init detect_vsmp_box(void)
  97. {
  98. is_vsmp = 0;
  99. if (!early_pci_allowed())
  100. return;
  101. /* Check if we are running on a ScaleMP vSMPowered box */
  102. if (read_pci_config(0, 0x1f, 0, PCI_VENDOR_ID) ==
  103. (PCI_VENDOR_ID_SCALEMP | (PCI_DEVICE_ID_SCALEMP_VSMP_CTL << 16)))
  104. is_vsmp = 1;
  105. }
  106. int is_vsmp_box(void)
  107. {
  108. if (is_vsmp != -1)
  109. return is_vsmp;
  110. else {
  111. WARN_ON_ONCE(1);
  112. return 0;
  113. }
  114. }
  115. #else
  116. static int __init detect_vsmp_box(void)
  117. {
  118. }
  119. int is_vsmp_box(void)
  120. {
  121. return 0;
  122. }
  123. #endif
  124. void __init vsmp_init(void)
  125. {
  126. detect_vsmp_box();
  127. if (!is_vsmp_box())
  128. return;
  129. set_vsmp_pv_ops();
  130. return;
  131. }