quirks.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495
  1. /*
  2. * This file contains work-arounds for x86 and x86_64 platform bugs.
  3. */
  4. #include <linux/pci.h>
  5. #include <linux/irq.h>
  6. #include <asm/hpet.h>
  7. #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
  8. static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
  9. {
  10. u8 config, rev;
  11. u16 word;
  12. /* BIOS may enable hardware IRQ balancing for
  13. * E7520/E7320/E7525(revision ID 0x9 and below)
  14. * based platforms.
  15. * Disable SW irqbalance/affinity on those platforms.
  16. */
  17. pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
  18. if (rev > 0x9)
  19. return;
  20. /* enable access to config space*/
  21. pci_read_config_byte(dev, 0xf4, &config);
  22. pci_write_config_byte(dev, 0xf4, config|0x2);
  23. /*
  24. * read xTPR register. We may not have a pci_dev for device 8
  25. * because it might be hidden until the above write.
  26. */
  27. pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word);
  28. if (!(word & (1 << 13))) {
  29. dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
  30. "disabling irq balancing and affinity\n");
  31. noirqdebug_setup("");
  32. #ifdef CONFIG_PROC_FS
  33. no_irq_affinity = 1;
  34. #endif
  35. }
  36. /* put back the original value for config space*/
  37. if (!(config & 0x2))
  38. pci_write_config_byte(dev, 0xf4, config);
  39. }
  40. DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
  41. quirk_intel_irqbalance);
  42. DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
  43. quirk_intel_irqbalance);
  44. DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
  45. quirk_intel_irqbalance);
  46. #endif
  47. #if defined(CONFIG_HPET_TIMER)
  48. unsigned long force_hpet_address;
  49. static enum {
  50. NONE_FORCE_HPET_RESUME,
  51. OLD_ICH_FORCE_HPET_RESUME,
  52. ICH_FORCE_HPET_RESUME,
  53. VT8237_FORCE_HPET_RESUME,
  54. NVIDIA_FORCE_HPET_RESUME,
  55. ATI_FORCE_HPET_RESUME,
  56. } force_hpet_resume_type;
  57. static void __iomem *rcba_base;
  58. static void ich_force_hpet_resume(void)
  59. {
  60. u32 val;
  61. if (!force_hpet_address)
  62. return;
  63. if (rcba_base == NULL)
  64. BUG();
  65. /* read the Function Disable register, dword mode only */
  66. val = readl(rcba_base + 0x3404);
  67. if (!(val & 0x80)) {
  68. /* HPET disabled in HPTC. Trying to enable */
  69. writel(val | 0x80, rcba_base + 0x3404);
  70. }
  71. val = readl(rcba_base + 0x3404);
  72. if (!(val & 0x80))
  73. BUG();
  74. else
  75. printk(KERN_DEBUG "Force enabled HPET at resume\n");
  76. return;
  77. }
  78. static void ich_force_enable_hpet(struct pci_dev *dev)
  79. {
  80. u32 val;
  81. u32 uninitialized_var(rcba);
  82. int err = 0;
  83. if (hpet_address || force_hpet_address)
  84. return;
  85. pci_read_config_dword(dev, 0xF0, &rcba);
  86. rcba &= 0xFFFFC000;
  87. if (rcba == 0) {
  88. dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; "
  89. "cannot force enable HPET\n");
  90. return;
  91. }
  92. /* use bits 31:14, 16 kB aligned */
  93. rcba_base = ioremap_nocache(rcba, 0x4000);
  94. if (rcba_base == NULL) {
  95. dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; "
  96. "cannot force enable HPET\n");
  97. return;
  98. }
  99. /* read the Function Disable register, dword mode only */
  100. val = readl(rcba_base + 0x3404);
  101. if (val & 0x80) {
  102. /* HPET is enabled in HPTC. Just not reported by BIOS */
  103. val = val & 0x3;
  104. force_hpet_address = 0xFED00000 | (val << 12);
  105. dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
  106. "0x%lx\n", force_hpet_address);
  107. iounmap(rcba_base);
  108. return;
  109. }
  110. /* HPET disabled in HPTC. Trying to enable */
  111. writel(val | 0x80, rcba_base + 0x3404);
  112. val = readl(rcba_base + 0x3404);
  113. if (!(val & 0x80)) {
  114. err = 1;
  115. } else {
  116. val = val & 0x3;
  117. force_hpet_address = 0xFED00000 | (val << 12);
  118. }
  119. if (err) {
  120. force_hpet_address = 0;
  121. iounmap(rcba_base);
  122. dev_printk(KERN_DEBUG, &dev->dev,
  123. "Failed to force enable HPET\n");
  124. } else {
  125. force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
  126. dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
  127. "0x%lx\n", force_hpet_address);
  128. }
  129. }
  130. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
  131. ich_force_enable_hpet);
  132. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0,
  133. ich_force_enable_hpet);
  134. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
  135. ich_force_enable_hpet);
  136. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
  137. ich_force_enable_hpet);
  138. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
  139. ich_force_enable_hpet);
  140. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
  141. ich_force_enable_hpet);
  142. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
  143. ich_force_enable_hpet);
  144. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
  145. ich_force_enable_hpet);
  146. static struct pci_dev *cached_dev;
  147. static void hpet_print_force_info(void)
  148. {
  149. printk(KERN_INFO "HPET not enabled in BIOS. "
  150. "You might try hpet=force boot option\n");
  151. }
  152. static void old_ich_force_hpet_resume(void)
  153. {
  154. u32 val;
  155. u32 uninitialized_var(gen_cntl);
  156. if (!force_hpet_address || !cached_dev)
  157. return;
  158. pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
  159. gen_cntl &= (~(0x7 << 15));
  160. gen_cntl |= (0x4 << 15);
  161. pci_write_config_dword(cached_dev, 0xD0, gen_cntl);
  162. pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
  163. val = gen_cntl >> 15;
  164. val &= 0x7;
  165. if (val == 0x4)
  166. printk(KERN_DEBUG "Force enabled HPET at resume\n");
  167. else
  168. BUG();
  169. }
  170. static void old_ich_force_enable_hpet(struct pci_dev *dev)
  171. {
  172. u32 val;
  173. u32 uninitialized_var(gen_cntl);
  174. if (hpet_address || force_hpet_address)
  175. return;
  176. pci_read_config_dword(dev, 0xD0, &gen_cntl);
  177. /*
  178. * Bit 17 is HPET enable bit.
  179. * Bit 16:15 control the HPET base address.
  180. */
  181. val = gen_cntl >> 15;
  182. val &= 0x7;
  183. if (val & 0x4) {
  184. val &= 0x3;
  185. force_hpet_address = 0xFED00000 | (val << 12);
  186. dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
  187. force_hpet_address);
  188. return;
  189. }
  190. /*
  191. * HPET is disabled. Trying enabling at FED00000 and check
  192. * whether it sticks
  193. */
  194. gen_cntl &= (~(0x7 << 15));
  195. gen_cntl |= (0x4 << 15);
  196. pci_write_config_dword(dev, 0xD0, gen_cntl);
  197. pci_read_config_dword(dev, 0xD0, &gen_cntl);
  198. val = gen_cntl >> 15;
  199. val &= 0x7;
  200. if (val & 0x4) {
  201. /* HPET is enabled in HPTC. Just not reported by BIOS */
  202. val &= 0x3;
  203. force_hpet_address = 0xFED00000 | (val << 12);
  204. dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
  205. "0x%lx\n", force_hpet_address);
  206. cached_dev = dev;
  207. force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
  208. return;
  209. }
  210. dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
  211. }
  212. /*
  213. * Undocumented chipset features. Make sure that the user enforced
  214. * this.
  215. */
  216. static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
  217. {
  218. if (hpet_force_user)
  219. old_ich_force_enable_hpet(dev);
  220. else
  221. hpet_print_force_info();
  222. }
  223. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
  224. old_ich_force_enable_hpet_user);
  225. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
  226. old_ich_force_enable_hpet_user);
  227. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
  228. old_ich_force_enable_hpet_user);
  229. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
  230. old_ich_force_enable_hpet_user);
  231. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
  232. old_ich_force_enable_hpet_user);
  233. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
  234. old_ich_force_enable_hpet);
  235. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
  236. old_ich_force_enable_hpet);
  237. static void vt8237_force_hpet_resume(void)
  238. {
  239. u32 val;
  240. if (!force_hpet_address || !cached_dev)
  241. return;
  242. val = 0xfed00000 | 0x80;
  243. pci_write_config_dword(cached_dev, 0x68, val);
  244. pci_read_config_dword(cached_dev, 0x68, &val);
  245. if (val & 0x80)
  246. printk(KERN_DEBUG "Force enabled HPET at resume\n");
  247. else
  248. BUG();
  249. }
  250. static void vt8237_force_enable_hpet(struct pci_dev *dev)
  251. {
  252. u32 uninitialized_var(val);
  253. if (hpet_address || force_hpet_address)
  254. return;
  255. if (!hpet_force_user) {
  256. hpet_print_force_info();
  257. return;
  258. }
  259. pci_read_config_dword(dev, 0x68, &val);
  260. /*
  261. * Bit 7 is HPET enable bit.
  262. * Bit 31:10 is HPET base address (contrary to what datasheet claims)
  263. */
  264. if (val & 0x80) {
  265. force_hpet_address = (val & ~0x3ff);
  266. dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
  267. force_hpet_address);
  268. return;
  269. }
  270. /*
  271. * HPET is disabled. Trying enabling at FED00000 and check
  272. * whether it sticks
  273. */
  274. val = 0xfed00000 | 0x80;
  275. pci_write_config_dword(dev, 0x68, val);
  276. pci_read_config_dword(dev, 0x68, &val);
  277. if (val & 0x80) {
  278. force_hpet_address = (val & ~0x3ff);
  279. dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
  280. "0x%lx\n", force_hpet_address);
  281. cached_dev = dev;
  282. force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
  283. return;
  284. }
  285. dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
  286. }
  287. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
  288. vt8237_force_enable_hpet);
  289. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
  290. vt8237_force_enable_hpet);
  291. static void ati_force_hpet_resume(void)
  292. {
  293. pci_write_config_dword(cached_dev, 0x14, 0xfed00000);
  294. printk(KERN_DEBUG "Force enabled HPET at resume\n");
  295. }
  296. static u32 ati_ixp4x0_rev(struct pci_dev *dev)
  297. {
  298. u32 d;
  299. u8 b;
  300. pci_read_config_byte(dev, 0xac, &b);
  301. b &= ~(1<<5);
  302. pci_write_config_byte(dev, 0xac, b);
  303. pci_read_config_dword(dev, 0x70, &d);
  304. d |= 1<<8;
  305. pci_write_config_dword(dev, 0x70, d);
  306. pci_read_config_dword(dev, 0x8, &d);
  307. d &= 0xff;
  308. dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
  309. return d;
  310. }
  311. static void ati_force_enable_hpet(struct pci_dev *dev)
  312. {
  313. u32 d, val;
  314. u8 b;
  315. if (hpet_address || force_hpet_address)
  316. return;
  317. if (!hpet_force_user) {
  318. hpet_print_force_info();
  319. return;
  320. }
  321. d = ati_ixp4x0_rev(dev);
  322. if (d < 0x82)
  323. return;
  324. /* base address */
  325. pci_write_config_dword(dev, 0x14, 0xfed00000);
  326. pci_read_config_dword(dev, 0x14, &val);
  327. /* enable interrupt */
  328. outb(0x72, 0xcd6); b = inb(0xcd7);
  329. b |= 0x1;
  330. outb(0x72, 0xcd6); outb(b, 0xcd7);
  331. outb(0x72, 0xcd6); b = inb(0xcd7);
  332. if (!(b & 0x1))
  333. return;
  334. pci_read_config_dword(dev, 0x64, &d);
  335. d |= (1<<10);
  336. pci_write_config_dword(dev, 0x64, d);
  337. pci_read_config_dword(dev, 0x64, &d);
  338. if (!(d & (1<<10)))
  339. return;
  340. force_hpet_address = val;
  341. force_hpet_resume_type = ATI_FORCE_HPET_RESUME;
  342. dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
  343. force_hpet_address);
  344. cached_dev = dev;
  345. }
  346. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS,
  347. ati_force_enable_hpet);
  348. /*
  349. * Undocumented chipset feature taken from LinuxBIOS.
  350. */
  351. static void nvidia_force_hpet_resume(void)
  352. {
  353. pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
  354. printk(KERN_DEBUG "Force enabled HPET at resume\n");
  355. }
  356. static void nvidia_force_enable_hpet(struct pci_dev *dev)
  357. {
  358. u32 uninitialized_var(val);
  359. if (hpet_address || force_hpet_address)
  360. return;
  361. if (!hpet_force_user) {
  362. hpet_print_force_info();
  363. return;
  364. }
  365. pci_write_config_dword(dev, 0x44, 0xfed00001);
  366. pci_read_config_dword(dev, 0x44, &val);
  367. force_hpet_address = val & 0xfffffffe;
  368. force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
  369. dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
  370. force_hpet_address);
  371. cached_dev = dev;
  372. return;
  373. }
  374. /* ISA Bridges */
  375. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
  376. nvidia_force_enable_hpet);
  377. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
  378. nvidia_force_enable_hpet);
  379. /* LPC bridges */
  380. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260,
  381. nvidia_force_enable_hpet);
  382. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
  383. nvidia_force_enable_hpet);
  384. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
  385. nvidia_force_enable_hpet);
  386. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
  387. nvidia_force_enable_hpet);
  388. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
  389. nvidia_force_enable_hpet);
  390. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
  391. nvidia_force_enable_hpet);
  392. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
  393. nvidia_force_enable_hpet);
  394. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
  395. nvidia_force_enable_hpet);
  396. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
  397. nvidia_force_enable_hpet);
  398. void force_hpet_resume(void)
  399. {
  400. switch (force_hpet_resume_type) {
  401. case ICH_FORCE_HPET_RESUME:
  402. ich_force_hpet_resume();
  403. return;
  404. case OLD_ICH_FORCE_HPET_RESUME:
  405. old_ich_force_hpet_resume();
  406. return;
  407. case VT8237_FORCE_HPET_RESUME:
  408. vt8237_force_hpet_resume();
  409. return;
  410. case NVIDIA_FORCE_HPET_RESUME:
  411. nvidia_force_hpet_resume();
  412. return;
  413. case ATI_FORCE_HPET_RESUME:
  414. ati_force_hpet_resume();
  415. return;
  416. default:
  417. break;
  418. }
  419. }
  420. #endif