quirks.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. /*
  2. * This file contains work-arounds for x86 and x86_64 platform bugs.
  3. */
  4. #include <linux/pci.h>
  5. #include <linux/irq.h>
  6. #include <asm/hpet.h>
  7. #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
  8. static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
  9. {
  10. u8 config, rev;
  11. u16 word;
  12. /* BIOS may enable hardware IRQ balancing for
  13. * E7520/E7320/E7525(revision ID 0x9 and below)
  14. * based platforms.
  15. * Disable SW irqbalance/affinity on those platforms.
  16. */
  17. pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
  18. if (rev > 0x9)
  19. return;
  20. /* enable access to config space*/
  21. pci_read_config_byte(dev, 0xf4, &config);
  22. pci_write_config_byte(dev, 0xf4, config|0x2);
  23. /*
  24. * read xTPR register. We may not have a pci_dev for device 8
  25. * because it might be hidden until the above write.
  26. */
  27. pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word);
  28. if (!(word & (1 << 13))) {
  29. dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
  30. "disabling irq balancing and affinity\n");
  31. #ifdef CONFIG_IRQBALANCE
  32. irqbalance_disable("");
  33. #endif
  34. noirqdebug_setup("");
  35. #ifdef CONFIG_PROC_FS
  36. no_irq_affinity = 1;
  37. #endif
  38. }
  39. /* put back the original value for config space*/
  40. if (!(config & 0x2))
  41. pci_write_config_byte(dev, 0xf4, config);
  42. }
  43. DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
  44. quirk_intel_irqbalance);
  45. DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
  46. quirk_intel_irqbalance);
  47. DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
  48. quirk_intel_irqbalance);
  49. #endif
  50. #if defined(CONFIG_HPET_TIMER)
  51. unsigned long force_hpet_address;
  52. static enum {
  53. NONE_FORCE_HPET_RESUME,
  54. OLD_ICH_FORCE_HPET_RESUME,
  55. ICH_FORCE_HPET_RESUME,
  56. VT8237_FORCE_HPET_RESUME,
  57. NVIDIA_FORCE_HPET_RESUME,
  58. } force_hpet_resume_type;
  59. static void __iomem *rcba_base;
  60. static void ich_force_hpet_resume(void)
  61. {
  62. u32 val;
  63. if (!force_hpet_address)
  64. return;
  65. if (rcba_base == NULL)
  66. BUG();
  67. /* read the Function Disable register, dword mode only */
  68. val = readl(rcba_base + 0x3404);
  69. if (!(val & 0x80)) {
  70. /* HPET disabled in HPTC. Trying to enable */
  71. writel(val | 0x80, rcba_base + 0x3404);
  72. }
  73. val = readl(rcba_base + 0x3404);
  74. if (!(val & 0x80))
  75. BUG();
  76. else
  77. printk(KERN_DEBUG "Force enabled HPET at resume\n");
  78. return;
  79. }
  80. static void ich_force_enable_hpet(struct pci_dev *dev)
  81. {
  82. u32 val;
  83. u32 uninitialized_var(rcba);
  84. int err = 0;
  85. if (hpet_address || force_hpet_address)
  86. return;
  87. pci_read_config_dword(dev, 0xF0, &rcba);
  88. rcba &= 0xFFFFC000;
  89. if (rcba == 0) {
  90. dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; "
  91. "cannot force enable HPET\n");
  92. return;
  93. }
  94. /* use bits 31:14, 16 kB aligned */
  95. rcba_base = ioremap_nocache(rcba, 0x4000);
  96. if (rcba_base == NULL) {
  97. dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; "
  98. "cannot force enable HPET\n");
  99. return;
  100. }
  101. /* read the Function Disable register, dword mode only */
  102. val = readl(rcba_base + 0x3404);
  103. if (val & 0x80) {
  104. /* HPET is enabled in HPTC. Just not reported by BIOS */
  105. val = val & 0x3;
  106. force_hpet_address = 0xFED00000 | (val << 12);
  107. dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
  108. "0x%lx\n", force_hpet_address);
  109. iounmap(rcba_base);
  110. return;
  111. }
  112. /* HPET disabled in HPTC. Trying to enable */
  113. writel(val | 0x80, rcba_base + 0x3404);
  114. val = readl(rcba_base + 0x3404);
  115. if (!(val & 0x80)) {
  116. err = 1;
  117. } else {
  118. val = val & 0x3;
  119. force_hpet_address = 0xFED00000 | (val << 12);
  120. }
  121. if (err) {
  122. force_hpet_address = 0;
  123. iounmap(rcba_base);
  124. dev_printk(KERN_DEBUG, &dev->dev,
  125. "Failed to force enable HPET\n");
  126. } else {
  127. force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
  128. dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
  129. "0x%lx\n", force_hpet_address);
  130. }
  131. }
  132. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
  133. ich_force_enable_hpet);
  134. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0,
  135. ich_force_enable_hpet);
  136. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
  137. ich_force_enable_hpet);
  138. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
  139. ich_force_enable_hpet);
  140. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
  141. ich_force_enable_hpet);
  142. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
  143. ich_force_enable_hpet);
  144. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
  145. ich_force_enable_hpet);
  146. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
  147. ich_force_enable_hpet);
  148. static struct pci_dev *cached_dev;
  149. static void old_ich_force_hpet_resume(void)
  150. {
  151. u32 val;
  152. u32 uninitialized_var(gen_cntl);
  153. if (!force_hpet_address || !cached_dev)
  154. return;
  155. pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
  156. gen_cntl &= (~(0x7 << 15));
  157. gen_cntl |= (0x4 << 15);
  158. pci_write_config_dword(cached_dev, 0xD0, gen_cntl);
  159. pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
  160. val = gen_cntl >> 15;
  161. val &= 0x7;
  162. if (val == 0x4)
  163. printk(KERN_DEBUG "Force enabled HPET at resume\n");
  164. else
  165. BUG();
  166. }
  167. static void old_ich_force_enable_hpet(struct pci_dev *dev)
  168. {
  169. u32 val;
  170. u32 uninitialized_var(gen_cntl);
  171. if (hpet_address || force_hpet_address)
  172. return;
  173. pci_read_config_dword(dev, 0xD0, &gen_cntl);
  174. /*
  175. * Bit 17 is HPET enable bit.
  176. * Bit 16:15 control the HPET base address.
  177. */
  178. val = gen_cntl >> 15;
  179. val &= 0x7;
  180. if (val & 0x4) {
  181. val &= 0x3;
  182. force_hpet_address = 0xFED00000 | (val << 12);
  183. dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
  184. force_hpet_address);
  185. return;
  186. }
  187. /*
  188. * HPET is disabled. Trying enabling at FED00000 and check
  189. * whether it sticks
  190. */
  191. gen_cntl &= (~(0x7 << 15));
  192. gen_cntl |= (0x4 << 15);
  193. pci_write_config_dword(dev, 0xD0, gen_cntl);
  194. pci_read_config_dword(dev, 0xD0, &gen_cntl);
  195. val = gen_cntl >> 15;
  196. val &= 0x7;
  197. if (val & 0x4) {
  198. /* HPET is enabled in HPTC. Just not reported by BIOS */
  199. val &= 0x3;
  200. force_hpet_address = 0xFED00000 | (val << 12);
  201. dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
  202. "0x%lx\n", force_hpet_address);
  203. cached_dev = dev;
  204. force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
  205. return;
  206. }
  207. dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
  208. }
  209. /*
  210. * Undocumented chipset features. Make sure that the user enforced
  211. * this.
  212. */
  213. static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
  214. {
  215. if (hpet_force_user)
  216. old_ich_force_enable_hpet(dev);
  217. }
  218. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
  219. old_ich_force_enable_hpet_user);
  220. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
  221. old_ich_force_enable_hpet_user);
  222. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
  223. old_ich_force_enable_hpet_user);
  224. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
  225. old_ich_force_enable_hpet_user);
  226. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
  227. old_ich_force_enable_hpet);
  228. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
  229. old_ich_force_enable_hpet);
  230. static void vt8237_force_hpet_resume(void)
  231. {
  232. u32 val;
  233. if (!force_hpet_address || !cached_dev)
  234. return;
  235. val = 0xfed00000 | 0x80;
  236. pci_write_config_dword(cached_dev, 0x68, val);
  237. pci_read_config_dword(cached_dev, 0x68, &val);
  238. if (val & 0x80)
  239. printk(KERN_DEBUG "Force enabled HPET at resume\n");
  240. else
  241. BUG();
  242. }
  243. static void vt8237_force_enable_hpet(struct pci_dev *dev)
  244. {
  245. u32 uninitialized_var(val);
  246. if (!hpet_force_user || hpet_address || force_hpet_address)
  247. return;
  248. pci_read_config_dword(dev, 0x68, &val);
  249. /*
  250. * Bit 7 is HPET enable bit.
  251. * Bit 31:10 is HPET base address (contrary to what datasheet claims)
  252. */
  253. if (val & 0x80) {
  254. force_hpet_address = (val & ~0x3ff);
  255. dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
  256. force_hpet_address);
  257. return;
  258. }
  259. /*
  260. * HPET is disabled. Trying enabling at FED00000 and check
  261. * whether it sticks
  262. */
  263. val = 0xfed00000 | 0x80;
  264. pci_write_config_dword(dev, 0x68, val);
  265. pci_read_config_dword(dev, 0x68, &val);
  266. if (val & 0x80) {
  267. force_hpet_address = (val & ~0x3ff);
  268. dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
  269. "0x%lx\n", force_hpet_address);
  270. cached_dev = dev;
  271. force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
  272. return;
  273. }
  274. dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
  275. }
  276. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
  277. vt8237_force_enable_hpet);
  278. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
  279. vt8237_force_enable_hpet);
  280. /*
  281. * Undocumented chipset feature taken from LinuxBIOS.
  282. */
  283. static void nvidia_force_hpet_resume(void)
  284. {
  285. pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
  286. printk(KERN_DEBUG "Force enabled HPET at resume\n");
  287. }
  288. static void nvidia_force_enable_hpet(struct pci_dev *dev)
  289. {
  290. u32 uninitialized_var(val);
  291. if (!hpet_force_user || hpet_address || force_hpet_address)
  292. return;
  293. pci_write_config_dword(dev, 0x44, 0xfed00001);
  294. pci_read_config_dword(dev, 0x44, &val);
  295. force_hpet_address = val & 0xfffffffe;
  296. force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
  297. dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
  298. force_hpet_address);
  299. cached_dev = dev;
  300. return;
  301. }
  302. /* ISA Bridges */
  303. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
  304. nvidia_force_enable_hpet);
  305. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
  306. nvidia_force_enable_hpet);
  307. /* LPC bridges */
  308. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260,
  309. nvidia_force_enable_hpet);
  310. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
  311. nvidia_force_enable_hpet);
  312. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
  313. nvidia_force_enable_hpet);
  314. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
  315. nvidia_force_enable_hpet);
  316. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
  317. nvidia_force_enable_hpet);
  318. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
  319. nvidia_force_enable_hpet);
  320. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
  321. nvidia_force_enable_hpet);
  322. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
  323. nvidia_force_enable_hpet);
  324. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
  325. nvidia_force_enable_hpet);
  326. void force_hpet_resume(void)
  327. {
  328. switch (force_hpet_resume_type) {
  329. case ICH_FORCE_HPET_RESUME:
  330. ich_force_hpet_resume();
  331. return;
  332. case OLD_ICH_FORCE_HPET_RESUME:
  333. old_ich_force_hpet_resume();
  334. return;
  335. case VT8237_FORCE_HPET_RESUME:
  336. vt8237_force_hpet_resume();
  337. return;
  338. case NVIDIA_FORCE_HPET_RESUME:
  339. nvidia_force_hpet_resume();
  340. return;
  341. default:
  342. break;
  343. }
  344. }
  345. #endif