pci-quirks.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886
  1. /*
  2. * This file contains code to reset and initialize USB host controllers.
  3. * Some of it includes work-arounds for PCI hardware and BIOS quirks.
  4. * It may need to run early during booting -- before USB would normally
  5. * initialize -- to ensure that Linux doesn't use any legacy modes.
  6. *
  7. * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
  8. * (and others)
  9. */
  10. #include <linux/types.h>
  11. #include <linux/kernel.h>
  12. #include <linux/pci.h>
  13. #include <linux/init.h>
  14. #include <linux/delay.h>
  15. #include <linux/export.h>
  16. #include <linux/acpi.h>
  17. #include <linux/dmi.h>
  18. #include "pci-quirks.h"
  19. #include "xhci-ext-caps.h"
  20. #define UHCI_USBLEGSUP 0xc0 /* legacy support */
  21. #define UHCI_USBCMD 0 /* command register */
  22. #define UHCI_USBINTR 4 /* interrupt register */
  23. #define UHCI_USBLEGSUP_RWC 0x8f00 /* the R/WC bits */
  24. #define UHCI_USBLEGSUP_RO 0x5040 /* R/O and reserved bits */
  25. #define UHCI_USBCMD_RUN 0x0001 /* RUN/STOP bit */
  26. #define UHCI_USBCMD_HCRESET 0x0002 /* Host Controller reset */
  27. #define UHCI_USBCMD_EGSM 0x0008 /* Global Suspend Mode */
  28. #define UHCI_USBCMD_CONFIGURE 0x0040 /* Config Flag */
  29. #define UHCI_USBINTR_RESUME 0x0002 /* Resume interrupt enable */
  30. #define OHCI_CONTROL 0x04
  31. #define OHCI_CMDSTATUS 0x08
  32. #define OHCI_INTRSTATUS 0x0c
  33. #define OHCI_INTRENABLE 0x10
  34. #define OHCI_INTRDISABLE 0x14
  35. #define OHCI_FMINTERVAL 0x34
  36. #define OHCI_HCR (1 << 0) /* host controller reset */
  37. #define OHCI_OCR (1 << 3) /* ownership change request */
  38. #define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */
  39. #define OHCI_CTRL_IR (1 << 8) /* interrupt routing */
  40. #define OHCI_INTR_OC (1 << 30) /* ownership change */
  41. #define EHCI_HCC_PARAMS 0x08 /* extended capabilities */
  42. #define EHCI_USBCMD 0 /* command register */
  43. #define EHCI_USBCMD_RUN (1 << 0) /* RUN/STOP bit */
  44. #define EHCI_USBSTS 4 /* status register */
  45. #define EHCI_USBSTS_HALTED (1 << 12) /* HCHalted bit */
  46. #define EHCI_USBINTR 8 /* interrupt register */
  47. #define EHCI_CONFIGFLAG 0x40 /* configured flag register */
  48. #define EHCI_USBLEGSUP 0 /* legacy support register */
  49. #define EHCI_USBLEGSUP_BIOS (1 << 16) /* BIOS semaphore */
  50. #define EHCI_USBLEGSUP_OS (1 << 24) /* OS semaphore */
  51. #define EHCI_USBLEGCTLSTS 4 /* legacy control/status */
  52. #define EHCI_USBLEGCTLSTS_SOOE (1 << 13) /* SMI on ownership change */
  53. /* AMD quirk use */
  54. #define AB_REG_BAR_LOW 0xe0
  55. #define AB_REG_BAR_HIGH 0xe1
  56. #define AB_REG_BAR_SB700 0xf0
  57. #define AB_INDX(addr) ((addr) + 0x00)
  58. #define AB_DATA(addr) ((addr) + 0x04)
  59. #define AX_INDXC 0x30
  60. #define AX_DATAC 0x34
  61. #define NB_PCIE_INDX_ADDR 0xe0
  62. #define NB_PCIE_INDX_DATA 0xe4
  63. #define PCIE_P_CNTL 0x10040
  64. #define BIF_NB 0x10002
  65. #define NB_PIF0_PWRDOWN_0 0x01100012
  66. #define NB_PIF0_PWRDOWN_1 0x01100013
  67. #define USB_INTEL_XUSB2PR 0xD0
  68. #define USB_INTEL_USB3_PSSEN 0xD8
  69. static struct amd_chipset_info {
  70. struct pci_dev *nb_dev;
  71. struct pci_dev *smbus_dev;
  72. int nb_type;
  73. int sb_type;
  74. int isoc_reqs;
  75. int probe_count;
  76. int probe_result;
  77. } amd_chipset;
  78. static DEFINE_SPINLOCK(amd_lock);
  79. int usb_amd_find_chipset_info(void)
  80. {
  81. u8 rev = 0;
  82. unsigned long flags;
  83. struct amd_chipset_info info;
  84. int ret;
  85. spin_lock_irqsave(&amd_lock, flags);
  86. /* probe only once */
  87. if (amd_chipset.probe_count > 0) {
  88. amd_chipset.probe_count++;
  89. spin_unlock_irqrestore(&amd_lock, flags);
  90. return amd_chipset.probe_result;
  91. }
  92. memset(&info, 0, sizeof(info));
  93. spin_unlock_irqrestore(&amd_lock, flags);
  94. info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
  95. if (info.smbus_dev) {
  96. rev = info.smbus_dev->revision;
  97. if (rev >= 0x40)
  98. info.sb_type = 1;
  99. else if (rev >= 0x30 && rev <= 0x3b)
  100. info.sb_type = 3;
  101. } else {
  102. info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
  103. 0x780b, NULL);
  104. if (!info.smbus_dev) {
  105. ret = 0;
  106. goto commit;
  107. }
  108. rev = info.smbus_dev->revision;
  109. if (rev >= 0x11 && rev <= 0x18)
  110. info.sb_type = 2;
  111. }
  112. if (info.sb_type == 0) {
  113. if (info.smbus_dev) {
  114. pci_dev_put(info.smbus_dev);
  115. info.smbus_dev = NULL;
  116. }
  117. ret = 0;
  118. goto commit;
  119. }
  120. info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
  121. if (info.nb_dev) {
  122. info.nb_type = 1;
  123. } else {
  124. info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
  125. if (info.nb_dev) {
  126. info.nb_type = 2;
  127. } else {
  128. info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
  129. 0x9600, NULL);
  130. if (info.nb_dev)
  131. info.nb_type = 3;
  132. }
  133. }
  134. ret = info.probe_result = 1;
  135. printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
  136. commit:
  137. spin_lock_irqsave(&amd_lock, flags);
  138. if (amd_chipset.probe_count > 0) {
  139. /* race - someone else was faster - drop devices */
  140. /* Mark that we where here */
  141. amd_chipset.probe_count++;
  142. ret = amd_chipset.probe_result;
  143. spin_unlock_irqrestore(&amd_lock, flags);
  144. if (info.nb_dev)
  145. pci_dev_put(info.nb_dev);
  146. if (info.smbus_dev)
  147. pci_dev_put(info.smbus_dev);
  148. } else {
  149. /* no race - commit the result */
  150. info.probe_count++;
  151. amd_chipset = info;
  152. spin_unlock_irqrestore(&amd_lock, flags);
  153. }
  154. return ret;
  155. }
  156. EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
  157. /*
  158. * The hardware normally enables the A-link power management feature, which
  159. * lets the system lower the power consumption in idle states.
  160. *
  161. * This USB quirk prevents the link going into that lower power state
  162. * during isochronous transfers.
  163. *
  164. * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of
  165. * some AMD platforms may stutter or have breaks occasionally.
  166. */
  167. static void usb_amd_quirk_pll(int disable)
  168. {
  169. u32 addr, addr_low, addr_high, val;
  170. u32 bit = disable ? 0 : 1;
  171. unsigned long flags;
  172. spin_lock_irqsave(&amd_lock, flags);
  173. if (disable) {
  174. amd_chipset.isoc_reqs++;
  175. if (amd_chipset.isoc_reqs > 1) {
  176. spin_unlock_irqrestore(&amd_lock, flags);
  177. return;
  178. }
  179. } else {
  180. amd_chipset.isoc_reqs--;
  181. if (amd_chipset.isoc_reqs > 0) {
  182. spin_unlock_irqrestore(&amd_lock, flags);
  183. return;
  184. }
  185. }
  186. if (amd_chipset.sb_type == 1 || amd_chipset.sb_type == 2) {
  187. outb_p(AB_REG_BAR_LOW, 0xcd6);
  188. addr_low = inb_p(0xcd7);
  189. outb_p(AB_REG_BAR_HIGH, 0xcd6);
  190. addr_high = inb_p(0xcd7);
  191. addr = addr_high << 8 | addr_low;
  192. outl_p(0x30, AB_INDX(addr));
  193. outl_p(0x40, AB_DATA(addr));
  194. outl_p(0x34, AB_INDX(addr));
  195. val = inl_p(AB_DATA(addr));
  196. } else if (amd_chipset.sb_type == 3) {
  197. pci_read_config_dword(amd_chipset.smbus_dev,
  198. AB_REG_BAR_SB700, &addr);
  199. outl(AX_INDXC, AB_INDX(addr));
  200. outl(0x40, AB_DATA(addr));
  201. outl(AX_DATAC, AB_INDX(addr));
  202. val = inl(AB_DATA(addr));
  203. } else {
  204. spin_unlock_irqrestore(&amd_lock, flags);
  205. return;
  206. }
  207. if (disable) {
  208. val &= ~0x08;
  209. val |= (1 << 4) | (1 << 9);
  210. } else {
  211. val |= 0x08;
  212. val &= ~((1 << 4) | (1 << 9));
  213. }
  214. outl_p(val, AB_DATA(addr));
  215. if (!amd_chipset.nb_dev) {
  216. spin_unlock_irqrestore(&amd_lock, flags);
  217. return;
  218. }
  219. if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) {
  220. addr = PCIE_P_CNTL;
  221. pci_write_config_dword(amd_chipset.nb_dev,
  222. NB_PCIE_INDX_ADDR, addr);
  223. pci_read_config_dword(amd_chipset.nb_dev,
  224. NB_PCIE_INDX_DATA, &val);
  225. val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
  226. val |= bit | (bit << 3) | (bit << 12);
  227. val |= ((!bit) << 4) | ((!bit) << 9);
  228. pci_write_config_dword(amd_chipset.nb_dev,
  229. NB_PCIE_INDX_DATA, val);
  230. addr = BIF_NB;
  231. pci_write_config_dword(amd_chipset.nb_dev,
  232. NB_PCIE_INDX_ADDR, addr);
  233. pci_read_config_dword(amd_chipset.nb_dev,
  234. NB_PCIE_INDX_DATA, &val);
  235. val &= ~(1 << 8);
  236. val |= bit << 8;
  237. pci_write_config_dword(amd_chipset.nb_dev,
  238. NB_PCIE_INDX_DATA, val);
  239. } else if (amd_chipset.nb_type == 2) {
  240. addr = NB_PIF0_PWRDOWN_0;
  241. pci_write_config_dword(amd_chipset.nb_dev,
  242. NB_PCIE_INDX_ADDR, addr);
  243. pci_read_config_dword(amd_chipset.nb_dev,
  244. NB_PCIE_INDX_DATA, &val);
  245. if (disable)
  246. val &= ~(0x3f << 7);
  247. else
  248. val |= 0x3f << 7;
  249. pci_write_config_dword(amd_chipset.nb_dev,
  250. NB_PCIE_INDX_DATA, val);
  251. addr = NB_PIF0_PWRDOWN_1;
  252. pci_write_config_dword(amd_chipset.nb_dev,
  253. NB_PCIE_INDX_ADDR, addr);
  254. pci_read_config_dword(amd_chipset.nb_dev,
  255. NB_PCIE_INDX_DATA, &val);
  256. if (disable)
  257. val &= ~(0x3f << 7);
  258. else
  259. val |= 0x3f << 7;
  260. pci_write_config_dword(amd_chipset.nb_dev,
  261. NB_PCIE_INDX_DATA, val);
  262. }
  263. spin_unlock_irqrestore(&amd_lock, flags);
  264. return;
  265. }
  266. void usb_amd_quirk_pll_disable(void)
  267. {
  268. usb_amd_quirk_pll(1);
  269. }
  270. EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
  271. void usb_amd_quirk_pll_enable(void)
  272. {
  273. usb_amd_quirk_pll(0);
  274. }
  275. EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
  276. void usb_amd_dev_put(void)
  277. {
  278. struct pci_dev *nb, *smbus;
  279. unsigned long flags;
  280. spin_lock_irqsave(&amd_lock, flags);
  281. amd_chipset.probe_count--;
  282. if (amd_chipset.probe_count > 0) {
  283. spin_unlock_irqrestore(&amd_lock, flags);
  284. return;
  285. }
  286. /* save them to pci_dev_put outside of spinlock */
  287. nb = amd_chipset.nb_dev;
  288. smbus = amd_chipset.smbus_dev;
  289. amd_chipset.nb_dev = NULL;
  290. amd_chipset.smbus_dev = NULL;
  291. amd_chipset.nb_type = 0;
  292. amd_chipset.sb_type = 0;
  293. amd_chipset.isoc_reqs = 0;
  294. amd_chipset.probe_result = 0;
  295. spin_unlock_irqrestore(&amd_lock, flags);
  296. if (nb)
  297. pci_dev_put(nb);
  298. if (smbus)
  299. pci_dev_put(smbus);
  300. }
  301. EXPORT_SYMBOL_GPL(usb_amd_dev_put);
  302. /*
  303. * Make sure the controller is completely inactive, unable to
  304. * generate interrupts or do DMA.
  305. */
  306. void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
  307. {
  308. /* Turn off PIRQ enable and SMI enable. (This also turns off the
  309. * BIOS's USB Legacy Support.) Turn off all the R/WC bits too.
  310. */
  311. pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC);
  312. /* Reset the HC - this will force us to get a
  313. * new notification of any already connected
  314. * ports due to the virtual disconnect that it
  315. * implies.
  316. */
  317. outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD);
  318. mb();
  319. udelay(5);
  320. if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET)
  321. dev_warn(&pdev->dev, "HCRESET not completed yet!\n");
  322. /* Just to be safe, disable interrupt requests and
  323. * make sure the controller is stopped.
  324. */
  325. outw(0, base + UHCI_USBINTR);
  326. outw(0, base + UHCI_USBCMD);
  327. }
  328. EXPORT_SYMBOL_GPL(uhci_reset_hc);
  329. /*
  330. * Initialize a controller that was newly discovered or has just been
  331. * resumed. In either case we can't be sure of its previous state.
  332. *
  333. * Returns: 1 if the controller was reset, 0 otherwise.
  334. */
  335. int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
  336. {
  337. u16 legsup;
  338. unsigned int cmd, intr;
  339. /*
  340. * When restarting a suspended controller, we expect all the
  341. * settings to be the same as we left them:
  342. *
  343. * PIRQ and SMI disabled, no R/W bits set in USBLEGSUP;
  344. * Controller is stopped and configured with EGSM set;
  345. * No interrupts enabled except possibly Resume Detect.
  346. *
  347. * If any of these conditions are violated we do a complete reset.
  348. */
  349. pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup);
  350. if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) {
  351. dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n",
  352. __func__, legsup);
  353. goto reset_needed;
  354. }
  355. cmd = inw(base + UHCI_USBCMD);
  356. if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) ||
  357. !(cmd & UHCI_USBCMD_EGSM)) {
  358. dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n",
  359. __func__, cmd);
  360. goto reset_needed;
  361. }
  362. intr = inw(base + UHCI_USBINTR);
  363. if (intr & (~UHCI_USBINTR_RESUME)) {
  364. dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n",
  365. __func__, intr);
  366. goto reset_needed;
  367. }
  368. return 0;
  369. reset_needed:
  370. dev_dbg(&pdev->dev, "Performing full reset\n");
  371. uhci_reset_hc(pdev, base);
  372. return 1;
  373. }
  374. EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
  375. static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
  376. {
  377. u16 cmd;
  378. return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask);
  379. }
  380. #define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO)
  381. #define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
  382. static void __devinit quirk_usb_handoff_uhci(struct pci_dev *pdev)
  383. {
  384. unsigned long base = 0;
  385. int i;
  386. if (!pio_enabled(pdev))
  387. return;
  388. for (i = 0; i < PCI_ROM_RESOURCE; i++)
  389. if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
  390. base = pci_resource_start(pdev, i);
  391. break;
  392. }
  393. if (base)
  394. uhci_check_and_reset_hc(pdev, base);
  395. }
  396. static int __devinit mmio_resource_enabled(struct pci_dev *pdev, int idx)
  397. {
  398. return pci_resource_start(pdev, idx) && mmio_enabled(pdev);
  399. }
  400. static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
  401. {
  402. void __iomem *base;
  403. u32 control;
  404. if (!mmio_resource_enabled(pdev, 0))
  405. return;
  406. base = pci_ioremap_bar(pdev, 0);
  407. if (base == NULL)
  408. return;
  409. control = readl(base + OHCI_CONTROL);
  410. /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
  411. #ifdef __hppa__
  412. #define OHCI_CTRL_MASK (OHCI_CTRL_RWC | OHCI_CTRL_IR)
  413. #else
  414. #define OHCI_CTRL_MASK OHCI_CTRL_RWC
  415. if (control & OHCI_CTRL_IR) {
  416. int wait_time = 500; /* arbitrary; 5 seconds */
  417. writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
  418. writel(OHCI_OCR, base + OHCI_CMDSTATUS);
  419. while (wait_time > 0 &&
  420. readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) {
  421. wait_time -= 10;
  422. msleep(10);
  423. }
  424. if (wait_time <= 0)
  425. dev_warn(&pdev->dev, "OHCI: BIOS handoff failed"
  426. " (BIOS bug?) %08x\n",
  427. readl(base + OHCI_CONTROL));
  428. }
  429. #endif
  430. /* reset controller, preserving RWC (and possibly IR) */
  431. writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
  432. readl(base + OHCI_CONTROL);
  433. /* Some NVIDIA controllers stop working if kept in RESET for too long */
  434. if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) {
  435. u32 fminterval;
  436. int cnt;
  437. /* drive reset for at least 50 ms (7.1.7.5) */
  438. msleep(50);
  439. /* software reset of the controller, preserving HcFmInterval */
  440. fminterval = readl(base + OHCI_FMINTERVAL);
  441. writel(OHCI_HCR, base + OHCI_CMDSTATUS);
  442. /* reset requires max 10 us delay */
  443. for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */
  444. if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
  445. break;
  446. udelay(1);
  447. }
  448. writel(fminterval, base + OHCI_FMINTERVAL);
  449. /* Now we're in the SUSPEND state with all devices reset
  450. * and wakeups and interrupts disabled
  451. */
  452. }
  453. /*
  454. * disable interrupts
  455. */
  456. writel(~(u32)0, base + OHCI_INTRDISABLE);
  457. writel(~(u32)0, base + OHCI_INTRSTATUS);
  458. iounmap(base);
  459. }
  460. static const struct dmi_system_id __devinitconst ehci_dmi_nohandoff_table[] = {
  461. {
  462. /* Pegatron Lucid (ExoPC) */
  463. .matches = {
  464. DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"),
  465. DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"),
  466. },
  467. },
  468. {
  469. /* Pegatron Lucid (Ordissimo AIRIS) */
  470. .matches = {
  471. DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
  472. DMI_MATCH(DMI_BIOS_VERSION, "Lucid-GE-133"),
  473. },
  474. },
  475. { }
  476. };
  477. static void __devinit ehci_bios_handoff(struct pci_dev *pdev,
  478. void __iomem *op_reg_base,
  479. u32 cap, u8 offset)
  480. {
  481. int try_handoff = 1, tried_handoff = 0;
  482. /* The Pegatron Lucid tablet sporadically waits for 98 seconds trying
  483. * the handoff on its unused controller. Skip it. */
  484. if (pdev->vendor == 0x8086 && pdev->device == 0x283a) {
  485. if (dmi_check_system(ehci_dmi_nohandoff_table))
  486. try_handoff = 0;
  487. }
  488. if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) {
  489. dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n");
  490. #if 0
  491. /* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on,
  492. * but that seems dubious in general (the BIOS left it off intentionally)
  493. * and is known to prevent some systems from booting. so we won't do this
  494. * unless maybe we can determine when we're on a system that needs SMI forced.
  495. */
  496. /* BIOS workaround (?): be sure the pre-Linux code
  497. * receives the SMI
  498. */
  499. pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val);
  500. pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS,
  501. val | EHCI_USBLEGCTLSTS_SOOE);
  502. #endif
  503. /* some systems get upset if this semaphore is
  504. * set for any other reason than forcing a BIOS
  505. * handoff..
  506. */
  507. pci_write_config_byte(pdev, offset + 3, 1);
  508. }
  509. /* if boot firmware now owns EHCI, spin till it hands it over. */
  510. if (try_handoff) {
  511. int msec = 1000;
  512. while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
  513. tried_handoff = 1;
  514. msleep(10);
  515. msec -= 10;
  516. pci_read_config_dword(pdev, offset, &cap);
  517. }
  518. }
  519. if (cap & EHCI_USBLEGSUP_BIOS) {
  520. /* well, possibly buggy BIOS... try to shut it down,
  521. * and hope nothing goes too wrong
  522. */
  523. if (try_handoff)
  524. dev_warn(&pdev->dev, "EHCI: BIOS handoff failed"
  525. " (BIOS bug?) %08x\n", cap);
  526. pci_write_config_byte(pdev, offset + 2, 0);
  527. }
  528. /* just in case, always disable EHCI SMIs */
  529. pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0);
  530. /* If the BIOS ever owned the controller then we can't expect
  531. * any power sessions to remain intact.
  532. */
  533. if (tried_handoff)
  534. writel(0, op_reg_base + EHCI_CONFIGFLAG);
  535. }
  536. static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
  537. {
  538. void __iomem *base, *op_reg_base;
  539. u32 hcc_params, cap, val;
  540. u8 offset, cap_length;
  541. int wait_time, delta, count = 256/4;
  542. if (!mmio_resource_enabled(pdev, 0))
  543. return;
  544. base = pci_ioremap_bar(pdev, 0);
  545. if (base == NULL)
  546. return;
  547. cap_length = readb(base);
  548. op_reg_base = base + cap_length;
  549. /* EHCI 0.96 and later may have "extended capabilities"
  550. * spec section 5.1 explains the bios handoff, e.g. for
  551. * booting from USB disk or using a usb keyboard
  552. */
  553. hcc_params = readl(base + EHCI_HCC_PARAMS);
  554. offset = (hcc_params >> 8) & 0xff;
  555. while (offset && --count) {
  556. pci_read_config_dword(pdev, offset, &cap);
  557. switch (cap & 0xff) {
  558. case 1:
  559. ehci_bios_handoff(pdev, op_reg_base, cap, offset);
  560. break;
  561. case 0: /* Illegal reserved cap, set cap=0 so we exit */
  562. cap = 0; /* then fallthrough... */
  563. default:
  564. dev_warn(&pdev->dev, "EHCI: unrecognized capability "
  565. "%02x\n", cap & 0xff);
  566. }
  567. offset = (cap >> 8) & 0xff;
  568. }
  569. if (!count)
  570. dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n");
  571. /*
  572. * halt EHCI & disable its interrupts in any case
  573. */
  574. val = readl(op_reg_base + EHCI_USBSTS);
  575. if ((val & EHCI_USBSTS_HALTED) == 0) {
  576. val = readl(op_reg_base + EHCI_USBCMD);
  577. val &= ~EHCI_USBCMD_RUN;
  578. writel(val, op_reg_base + EHCI_USBCMD);
  579. wait_time = 2000;
  580. delta = 100;
  581. do {
  582. writel(0x3f, op_reg_base + EHCI_USBSTS);
  583. udelay(delta);
  584. wait_time -= delta;
  585. val = readl(op_reg_base + EHCI_USBSTS);
  586. if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
  587. break;
  588. }
  589. } while (wait_time > 0);
  590. }
  591. writel(0, op_reg_base + EHCI_USBINTR);
  592. writel(0x3f, op_reg_base + EHCI_USBSTS);
  593. iounmap(base);
  594. }
  595. /*
  596. * handshake - spin reading a register until handshake completes
  597. * @ptr: address of hc register to be read
  598. * @mask: bits to look at in result of read
  599. * @done: value of those bits when handshake succeeds
  600. * @wait_usec: timeout in microseconds
  601. * @delay_usec: delay in microseconds to wait between polling
  602. *
  603. * Polls a register every delay_usec microseconds.
  604. * Returns 0 when the mask bits have the value done.
  605. * Returns -ETIMEDOUT if this condition is not true after
  606. * wait_usec microseconds have passed.
  607. */
  608. static int handshake(void __iomem *ptr, u32 mask, u32 done,
  609. int wait_usec, int delay_usec)
  610. {
  611. u32 result;
  612. do {
  613. result = readl(ptr);
  614. result &= mask;
  615. if (result == done)
  616. return 0;
  617. udelay(delay_usec);
  618. wait_usec -= delay_usec;
  619. } while (wait_usec > 0);
  620. return -ETIMEDOUT;
  621. }
  622. bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
  623. {
  624. return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
  625. pdev->vendor == PCI_VENDOR_ID_INTEL &&
  626. pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI;
  627. }
  628. EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci);
  629. /*
  630. * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
  631. * share some number of ports. These ports can be switched between either
  632. * controller. Not all of the ports under the EHCI host controller may be
  633. * switchable.
  634. *
  635. * The ports should be switched over to xHCI before PCI probes for any device
  636. * start. This avoids active devices under EHCI being disconnected during the
  637. * port switchover, which could cause loss of data on USB storage devices, or
  638. * failed boot when the root file system is on a USB mass storage device and is
  639. * enumerated under EHCI first.
  640. *
  641. * We write into the xHC's PCI configuration space in some Intel-specific
  642. * registers to switch the ports over. The USB 3.0 terminations and the USB
  643. * 2.0 data wires are switched separately. We want to enable the SuperSpeed
  644. * terminations before switching the USB 2.0 wires over, so that USB 3.0
  645. * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
  646. */
  647. void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
  648. {
  649. u32 ports_available;
  650. ports_available = 0xffffffff;
  651. /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
  652. * Register, to turn on SuperSpeed terminations for all
  653. * available ports.
  654. */
  655. pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
  656. cpu_to_le32(ports_available));
  657. pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
  658. &ports_available);
  659. dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
  660. "under xHCI: 0x%x\n", ports_available);
  661. ports_available = 0xffffffff;
  662. /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
  663. * switch the USB 2.0 power and data lines over to the xHCI
  664. * host.
  665. */
  666. pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
  667. cpu_to_le32(ports_available));
  668. pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
  669. &ports_available);
  670. dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over "
  671. "to xHCI: 0x%x\n", ports_available);
  672. }
  673. EXPORT_SYMBOL_GPL(usb_enable_xhci_ports);
  674. /**
  675. * PCI Quirks for xHCI.
  676. *
  677. * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
  678. * It signals to the BIOS that the OS wants control of the host controller,
  679. * and then waits 5 seconds for the BIOS to hand over control.
  680. * If we timeout, assume the BIOS is broken and take control anyway.
  681. */
  682. static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
  683. {
  684. void __iomem *base;
  685. int ext_cap_offset;
  686. void __iomem *op_reg_base;
  687. u32 val;
  688. int timeout;
  689. if (!mmio_resource_enabled(pdev, 0))
  690. return;
  691. base = ioremap_nocache(pci_resource_start(pdev, 0),
  692. pci_resource_len(pdev, 0));
  693. if (base == NULL)
  694. return;
  695. /*
  696. * Find the Legacy Support Capability register -
  697. * this is optional for xHCI host controllers.
  698. */
  699. ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
  700. do {
  701. if (!ext_cap_offset)
  702. /* We've reached the end of the extended capabilities */
  703. goto hc_init;
  704. val = readl(base + ext_cap_offset);
  705. if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
  706. break;
  707. ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset);
  708. } while (1);
  709. /* If the BIOS owns the HC, signal that the OS wants it, and wait */
  710. if (val & XHCI_HC_BIOS_OWNED) {
  711. writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
  712. /* Wait for 5 seconds with 10 microsecond polling interval */
  713. timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
  714. 0, 5000, 10);
  715. /* Assume a buggy BIOS and take HC ownership anyway */
  716. if (timeout) {
  717. dev_warn(&pdev->dev, "xHCI BIOS handoff failed"
  718. " (BIOS bug ?) %08x\n", val);
  719. writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
  720. }
  721. }
  722. /* Disable any BIOS SMIs */
  723. writel(XHCI_LEGACY_DISABLE_SMI,
  724. base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
  725. if (usb_is_intel_switchable_xhci(pdev))
  726. usb_enable_xhci_ports(pdev);
  727. hc_init:
  728. op_reg_base = base + XHCI_HC_LENGTH(readl(base));
  729. /* Wait for the host controller to be ready before writing any
  730. * operational or runtime registers. Wait 5 seconds and no more.
  731. */
  732. timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
  733. 5000, 10);
  734. /* Assume a buggy HC and start HC initialization anyway */
  735. if (timeout) {
  736. val = readl(op_reg_base + XHCI_STS_OFFSET);
  737. dev_warn(&pdev->dev,
  738. "xHCI HW not ready after 5 sec (HC bug?) "
  739. "status = 0x%x\n", val);
  740. }
  741. /* Send the halt and disable interrupts command */
  742. val = readl(op_reg_base + XHCI_CMD_OFFSET);
  743. val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
  744. writel(val, op_reg_base + XHCI_CMD_OFFSET);
  745. /* Wait for the HC to halt - poll every 125 usec (one microframe). */
  746. timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
  747. XHCI_MAX_HALT_USEC, 125);
  748. if (timeout) {
  749. val = readl(op_reg_base + XHCI_STS_OFFSET);
  750. dev_warn(&pdev->dev,
  751. "xHCI HW did not halt within %d usec "
  752. "status = 0x%x\n", XHCI_MAX_HALT_USEC, val);
  753. }
  754. iounmap(base);
  755. }
  756. static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
  757. {
  758. if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
  759. quirk_usb_handoff_uhci(pdev);
  760. else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
  761. quirk_usb_handoff_ohci(pdev);
  762. else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
  763. quirk_usb_disable_ehci(pdev);
  764. else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
  765. quirk_usb_handoff_xhci(pdev);
  766. }
  767. DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);