driver_pci_host.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610
  1. /*
  2. * Broadcom specific AMBA
  3. * PCI Core in hostmode
  4. *
  5. * Copyright 2005 - 2011, Broadcom Corporation
  6. * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
  7. * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
  8. *
  9. * Licensed under the GNU/GPL. See COPYING for details.
  10. */
  11. #include "bcma_private.h"
  12. #include <linux/pci.h>
  13. #include <linux/export.h>
  14. #include <linux/bcma/bcma.h>
  15. #include <asm/paccess.h>
  16. /* Probe a 32bit value on the bus and catch bus exceptions.
  17. * Returns nonzero on a bus exception.
  18. * This is MIPS specific */
  19. #define mips_busprobe32(val, addr) get_dbe((val), ((u32 *)(addr)))
  20. /* Assume one-hot slot wiring */
  21. #define BCMA_PCI_SLOT_MAX 16
  22. #define PCI_CONFIG_SPACE_SIZE 256
  23. bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
  24. {
  25. struct bcma_bus *bus = pc->core->bus;
  26. u16 chipid_top;
  27. u32 tmp;
  28. chipid_top = (bus->chipinfo.id & 0xFF00);
  29. if (chipid_top != 0x4700 &&
  30. chipid_top != 0x5300)
  31. return false;
  32. bcma_core_enable(pc->core, 0);
  33. return !mips_busprobe32(tmp, pc->core->io_addr);
  34. }
  35. static u32 bcma_pcie_read_config(struct bcma_drv_pci *pc, u32 address)
  36. {
  37. pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address);
  38. pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR);
  39. return pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_DATA);
  40. }
  41. static void bcma_pcie_write_config(struct bcma_drv_pci *pc, u32 address,
  42. u32 data)
  43. {
  44. pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address);
  45. pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR);
  46. pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_DATA, data);
  47. }
  48. static u32 bcma_get_cfgspace_addr(struct bcma_drv_pci *pc, unsigned int dev,
  49. unsigned int func, unsigned int off)
  50. {
  51. u32 addr = 0;
  52. /* Issue config commands only when the data link is up (atleast
  53. * one external pcie device is present).
  54. */
  55. if (dev >= 2 || !(bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_LSREG)
  56. & BCMA_CORE_PCI_DLLP_LSREG_LINKUP))
  57. goto out;
  58. /* Type 0 transaction */
  59. /* Slide the PCI window to the appropriate slot */
  60. pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0);
  61. /* Calculate the address */
  62. addr = pc->host_controller->host_cfg_addr;
  63. addr |= (dev << BCMA_CORE_PCI_CFG_SLOT_SHIFT);
  64. addr |= (func << BCMA_CORE_PCI_CFG_FUN_SHIFT);
  65. addr |= (off & ~3);
  66. out:
  67. return addr;
  68. }
  69. static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev,
  70. unsigned int func, unsigned int off,
  71. void *buf, int len)
  72. {
  73. int err = -EINVAL;
  74. u32 addr, val;
  75. void __iomem *mmio = 0;
  76. WARN_ON(!pc->hostmode);
  77. if (unlikely(len != 1 && len != 2 && len != 4))
  78. goto out;
  79. if (dev == 0) {
  80. /* we support only two functions on device 0 */
  81. if (func > 1)
  82. return -EINVAL;
  83. /* accesses to config registers with offsets >= 256
  84. * requires indirect access.
  85. */
  86. if (off >= PCI_CONFIG_SPACE_SIZE) {
  87. addr = (func << 12);
  88. addr |= (off & 0x0FFF);
  89. val = bcma_pcie_read_config(pc, addr);
  90. } else {
  91. addr = BCMA_CORE_PCI_PCICFG0;
  92. addr |= (func << 8);
  93. addr |= (off & 0xfc);
  94. val = pcicore_read32(pc, addr);
  95. }
  96. } else {
  97. addr = bcma_get_cfgspace_addr(pc, dev, func, off);
  98. if (unlikely(!addr))
  99. goto out;
  100. err = -ENOMEM;
  101. mmio = ioremap_nocache(addr, sizeof(val));
  102. if (!mmio)
  103. goto out;
  104. if (mips_busprobe32(val, mmio)) {
  105. val = 0xffffffff;
  106. goto unmap;
  107. }
  108. }
  109. val >>= (8 * (off & 3));
  110. switch (len) {
  111. case 1:
  112. *((u8 *)buf) = (u8)val;
  113. break;
  114. case 2:
  115. *((u16 *)buf) = (u16)val;
  116. break;
  117. case 4:
  118. *((u32 *)buf) = (u32)val;
  119. break;
  120. }
  121. err = 0;
  122. unmap:
  123. if (mmio)
  124. iounmap(mmio);
  125. out:
  126. return err;
  127. }
  128. static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
  129. unsigned int func, unsigned int off,
  130. const void *buf, int len)
  131. {
  132. int err = -EINVAL;
  133. u32 addr = 0, val = 0;
  134. void __iomem *mmio = 0;
  135. u16 chipid = pc->core->bus->chipinfo.id;
  136. WARN_ON(!pc->hostmode);
  137. if (unlikely(len != 1 && len != 2 && len != 4))
  138. goto out;
  139. if (dev == 0) {
  140. /* accesses to config registers with offsets >= 256
  141. * requires indirect access.
  142. */
  143. if (off < PCI_CONFIG_SPACE_SIZE) {
  144. addr = pc->core->addr + BCMA_CORE_PCI_PCICFG0;
  145. addr |= (func << 8);
  146. addr |= (off & 0xfc);
  147. mmio = ioremap_nocache(addr, sizeof(val));
  148. if (!mmio)
  149. goto out;
  150. }
  151. } else {
  152. addr = bcma_get_cfgspace_addr(pc, dev, func, off);
  153. if (unlikely(!addr))
  154. goto out;
  155. err = -ENOMEM;
  156. mmio = ioremap_nocache(addr, sizeof(val));
  157. if (!mmio)
  158. goto out;
  159. if (mips_busprobe32(val, mmio)) {
  160. val = 0xffffffff;
  161. goto unmap;
  162. }
  163. }
  164. switch (len) {
  165. case 1:
  166. val = readl(mmio);
  167. val &= ~(0xFF << (8 * (off & 3)));
  168. val |= *((const u8 *)buf) << (8 * (off & 3));
  169. break;
  170. case 2:
  171. val = readl(mmio);
  172. val &= ~(0xFFFF << (8 * (off & 3)));
  173. val |= *((const u16 *)buf) << (8 * (off & 3));
  174. break;
  175. case 4:
  176. val = *((const u32 *)buf);
  177. break;
  178. }
  179. if (dev == 0 && !addr) {
  180. /* accesses to config registers with offsets >= 256
  181. * requires indirect access.
  182. */
  183. addr = (func << 12);
  184. addr |= (off & 0x0FFF);
  185. bcma_pcie_write_config(pc, addr, val);
  186. } else {
  187. writel(val, mmio);
  188. if (chipid == BCMA_CHIP_ID_BCM4716 ||
  189. chipid == BCMA_CHIP_ID_BCM4748)
  190. readl(mmio);
  191. }
  192. err = 0;
  193. unmap:
  194. if (mmio)
  195. iounmap(mmio);
  196. out:
  197. return err;
  198. }
  199. static int bcma_core_pci_hostmode_read_config(struct pci_bus *bus,
  200. unsigned int devfn,
  201. int reg, int size, u32 *val)
  202. {
  203. unsigned long flags;
  204. int err;
  205. struct bcma_drv_pci *pc;
  206. struct bcma_drv_pci_host *pc_host;
  207. pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops);
  208. pc = pc_host->pdev;
  209. spin_lock_irqsave(&pc_host->cfgspace_lock, flags);
  210. err = bcma_extpci_read_config(pc, PCI_SLOT(devfn),
  211. PCI_FUNC(devfn), reg, val, size);
  212. spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags);
  213. return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
  214. }
  215. static int bcma_core_pci_hostmode_write_config(struct pci_bus *bus,
  216. unsigned int devfn,
  217. int reg, int size, u32 val)
  218. {
  219. unsigned long flags;
  220. int err;
  221. struct bcma_drv_pci *pc;
  222. struct bcma_drv_pci_host *pc_host;
  223. pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops);
  224. pc = pc_host->pdev;
  225. spin_lock_irqsave(&pc_host->cfgspace_lock, flags);
  226. err = bcma_extpci_write_config(pc, PCI_SLOT(devfn),
  227. PCI_FUNC(devfn), reg, &val, size);
  228. spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags);
  229. return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
  230. }
  231. /* return cap_offset if requested capability exists in the PCI config space */
  232. static u8 __devinit bcma_find_pci_capability(struct bcma_drv_pci *pc,
  233. unsigned int dev,
  234. unsigned int func, u8 req_cap_id,
  235. unsigned char *buf, u32 *buflen)
  236. {
  237. u8 cap_id;
  238. u8 cap_ptr = 0;
  239. u32 bufsize;
  240. u8 byte_val;
  241. /* check for Header type 0 */
  242. bcma_extpci_read_config(pc, dev, func, PCI_HEADER_TYPE, &byte_val,
  243. sizeof(u8));
  244. if ((byte_val & 0x7f) != PCI_HEADER_TYPE_NORMAL)
  245. return cap_ptr;
  246. /* check if the capability pointer field exists */
  247. bcma_extpci_read_config(pc, dev, func, PCI_STATUS, &byte_val,
  248. sizeof(u8));
  249. if (!(byte_val & PCI_STATUS_CAP_LIST))
  250. return cap_ptr;
  251. /* check if the capability pointer is 0x00 */
  252. bcma_extpci_read_config(pc, dev, func, PCI_CAPABILITY_LIST, &cap_ptr,
  253. sizeof(u8));
  254. if (cap_ptr == 0x00)
  255. return cap_ptr;
  256. /* loop thr'u the capability list and see if the requested capabilty
  257. * exists */
  258. bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id, sizeof(u8));
  259. while (cap_id != req_cap_id) {
  260. bcma_extpci_read_config(pc, dev, func, cap_ptr + 1, &cap_ptr,
  261. sizeof(u8));
  262. if (cap_ptr == 0x00)
  263. return cap_ptr;
  264. bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id,
  265. sizeof(u8));
  266. }
  267. /* found the caller requested capability */
  268. if ((buf != NULL) && (buflen != NULL)) {
  269. u8 cap_data;
  270. bufsize = *buflen;
  271. if (!bufsize)
  272. return cap_ptr;
  273. *buflen = 0;
  274. /* copy the cpability data excluding cap ID and next ptr */
  275. cap_data = cap_ptr + 2;
  276. if ((bufsize + cap_data) > PCI_CONFIG_SPACE_SIZE)
  277. bufsize = PCI_CONFIG_SPACE_SIZE - cap_data;
  278. *buflen = bufsize;
  279. while (bufsize--) {
  280. bcma_extpci_read_config(pc, dev, func, cap_data, buf,
  281. sizeof(u8));
  282. cap_data++;
  283. buf++;
  284. }
  285. }
  286. return cap_ptr;
  287. }
  288. /* If the root port is capable of returning Config Request
  289. * Retry Status (CRS) Completion Status to software then
  290. * enable the feature.
  291. */
  292. static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
  293. {
  294. struct bcma_bus *bus = pc->core->bus;
  295. u8 cap_ptr, root_ctrl, root_cap, dev;
  296. u16 val16;
  297. int i;
  298. cap_ptr = bcma_find_pci_capability(pc, 0, 0, PCI_CAP_ID_EXP, NULL,
  299. NULL);
  300. root_cap = cap_ptr + PCI_EXP_RTCAP;
  301. bcma_extpci_read_config(pc, 0, 0, root_cap, &val16, sizeof(u16));
  302. if (val16 & BCMA_CORE_PCI_RC_CRS_VISIBILITY) {
  303. /* Enable CRS software visibility */
  304. root_ctrl = cap_ptr + PCI_EXP_RTCTL;
  305. val16 = PCI_EXP_RTCTL_CRSSVE;
  306. bcma_extpci_read_config(pc, 0, 0, root_ctrl, &val16,
  307. sizeof(u16));
  308. /* Initiate a configuration request to read the vendor id
  309. * field of the device function's config space header after
  310. * 100 ms wait time from the end of Reset. If the device is
  311. * not done with its internal initialization, it must at
  312. * least return a completion TLP, with a completion status
  313. * of "Configuration Request Retry Status (CRS)". The root
  314. * complex must complete the request to the host by returning
  315. * a read-data value of 0001h for the Vendor ID field and
  316. * all 1s for any additional bytes included in the request.
  317. * Poll using the config reads for max wait time of 1 sec or
  318. * until we receive the successful completion status. Repeat
  319. * the procedure for all the devices.
  320. */
  321. for (dev = 1; dev < BCMA_PCI_SLOT_MAX; dev++) {
  322. for (i = 0; i < 100000; i++) {
  323. bcma_extpci_read_config(pc, dev, 0,
  324. PCI_VENDOR_ID, &val16,
  325. sizeof(val16));
  326. if (val16 != 0x1)
  327. break;
  328. udelay(10);
  329. }
  330. if (val16 == 0x1)
  331. bcma_err(bus, "PCI: Broken device in slot %d\n",
  332. dev);
  333. }
  334. }
  335. }
  336. void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
  337. {
  338. struct bcma_bus *bus = pc->core->bus;
  339. struct bcma_drv_pci_host *pc_host;
  340. u32 tmp;
  341. u32 pci_membase_1G;
  342. unsigned long io_map_base;
  343. bcma_info(bus, "PCIEcore in host mode found\n");
  344. if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
  345. bcma_info(bus, "This PCIE core is disabled and not working\n");
  346. return;
  347. }
  348. pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL);
  349. if (!pc_host) {
  350. bcma_err(bus, "can not allocate memory");
  351. return;
  352. }
  353. pc->host_controller = pc_host;
  354. pc_host->pci_controller.io_resource = &pc_host->io_resource;
  355. pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
  356. pc_host->pci_controller.pci_ops = &pc_host->pci_ops;
  357. pc_host->pdev = pc;
  358. pci_membase_1G = BCMA_SOC_PCI_DMA;
  359. pc_host->host_cfg_addr = BCMA_SOC_PCI_CFG;
  360. pc_host->pci_ops.read = bcma_core_pci_hostmode_read_config;
  361. pc_host->pci_ops.write = bcma_core_pci_hostmode_write_config;
  362. pc_host->mem_resource.name = "BCMA PCIcore external memory",
  363. pc_host->mem_resource.start = BCMA_SOC_PCI_DMA;
  364. pc_host->mem_resource.end = BCMA_SOC_PCI_DMA + BCMA_SOC_PCI_DMA_SZ - 1;
  365. pc_host->mem_resource.flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED;
  366. pc_host->io_resource.name = "BCMA PCIcore external I/O",
  367. pc_host->io_resource.start = 0x100;
  368. pc_host->io_resource.end = 0x7FF;
  369. pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED;
  370. /* Reset RC */
  371. usleep_range(3000, 5000);
  372. pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE);
  373. msleep(50);
  374. pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST |
  375. BCMA_CORE_PCI_CTL_RST_OE);
  376. /* 64 MB I/O access window. On 4716, use
  377. * sbtopcie0 to access the device registers. We
  378. * can't use address match 2 (1 GB window) region
  379. * as mips can't generate 64-bit address on the
  380. * backplane.
  381. */
  382. if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4716 ||
  383. bus->chipinfo.id == BCMA_CHIP_ID_BCM4748) {
  384. pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
  385. pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
  386. BCMA_SOC_PCI_MEM_SZ - 1;
  387. pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
  388. BCMA_CORE_PCI_SBTOPCI_MEM | BCMA_SOC_PCI_MEM);
  389. } else if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
  390. tmp = BCMA_CORE_PCI_SBTOPCI_MEM;
  391. tmp |= BCMA_CORE_PCI_SBTOPCI_PREF;
  392. tmp |= BCMA_CORE_PCI_SBTOPCI_BURST;
  393. if (pc->core->core_unit == 0) {
  394. pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
  395. pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
  396. BCMA_SOC_PCI_MEM_SZ - 1;
  397. pc_host->io_resource.start = 0x100;
  398. pc_host->io_resource.end = 0x47F;
  399. pci_membase_1G = BCMA_SOC_PCIE_DMA_H32;
  400. pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
  401. tmp | BCMA_SOC_PCI_MEM);
  402. } else if (pc->core->core_unit == 1) {
  403. pc_host->mem_resource.start = BCMA_SOC_PCI1_MEM;
  404. pc_host->mem_resource.end = BCMA_SOC_PCI1_MEM +
  405. BCMA_SOC_PCI_MEM_SZ - 1;
  406. pc_host->io_resource.start = 0x480;
  407. pc_host->io_resource.end = 0x7FF;
  408. pci_membase_1G = BCMA_SOC_PCIE1_DMA_H32;
  409. pc_host->host_cfg_addr = BCMA_SOC_PCI1_CFG;
  410. pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
  411. tmp | BCMA_SOC_PCI1_MEM);
  412. }
  413. } else
  414. pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
  415. BCMA_CORE_PCI_SBTOPCI_IO);
  416. /* 64 MB configuration access window */
  417. pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0);
  418. /* 1 GB memory access window */
  419. pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI2,
  420. BCMA_CORE_PCI_SBTOPCI_MEM | pci_membase_1G);
  421. /* As per PCI Express Base Spec 1.1 we need to wait for
  422. * at least 100 ms from the end of a reset (cold/warm/hot)
  423. * before issuing configuration requests to PCI Express
  424. * devices.
  425. */
  426. msleep(100);
  427. bcma_core_pci_enable_crs(pc);
  428. if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706 ||
  429. bus->chipinfo.id == BCMA_CHIP_ID_BCM4716) {
  430. u16 val16;
  431. bcma_extpci_read_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL,
  432. &val16, sizeof(val16));
  433. val16 |= (2 << 5); /* Max payload size of 512 */
  434. val16 |= (2 << 12); /* MRRS 512 */
  435. bcma_extpci_write_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL,
  436. &val16, sizeof(val16));
  437. }
  438. /* Enable PCI bridge BAR0 memory & master access */
  439. tmp = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
  440. bcma_extpci_write_config(pc, 0, 0, PCI_COMMAND, &tmp, sizeof(tmp));
  441. /* Enable PCI interrupts */
  442. pcicore_write32(pc, BCMA_CORE_PCI_IMASK, BCMA_CORE_PCI_IMASK_INTA);
  443. /* Ok, ready to run, register it to the system.
  444. * The following needs change, if we want to port hostmode
  445. * to non-MIPS platform. */
  446. io_map_base = (unsigned long)ioremap_nocache(pc_host->mem_resource.start,
  447. resource_size(&pc_host->mem_resource));
  448. pc_host->pci_controller.io_map_base = io_map_base;
  449. set_io_port_base(pc_host->pci_controller.io_map_base);
  450. /* Give some time to the PCI controller to configure itself with the new
  451. * values. Not waiting at this point causes crashes of the machine. */
  452. usleep_range(10000, 15000);
  453. register_pci_controller(&pc_host->pci_controller);
  454. return;
  455. }
  456. /* Early PCI fixup for a device on the PCI-core bridge. */
  457. static void bcma_core_pci_fixup_pcibridge(struct pci_dev *dev)
  458. {
  459. if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
  460. /* This is not a device on the PCI-core bridge. */
  461. return;
  462. }
  463. if (PCI_SLOT(dev->devfn) != 0)
  464. return;
  465. pr_info("PCI: Fixing up bridge %s\n", pci_name(dev));
  466. /* Enable PCI bridge bus mastering and memory space */
  467. pci_set_master(dev);
  468. if (pcibios_enable_device(dev, ~0) < 0) {
  469. pr_err("PCI: BCMA bridge enable failed\n");
  470. return;
  471. }
  472. /* Enable PCI bridge BAR1 prefetch and burst */
  473. pci_write_config_dword(dev, BCMA_PCI_BAR1_CONTROL, 3);
  474. }
  475. DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_pcibridge);
  476. /* Early PCI fixup for all PCI-cores to set the correct memory address. */
  477. static void bcma_core_pci_fixup_addresses(struct pci_dev *dev)
  478. {
  479. struct resource *res;
  480. int pos, err;
  481. if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
  482. /* This is not a device on the PCI-core bridge. */
  483. return;
  484. }
  485. if (PCI_SLOT(dev->devfn) == 0)
  486. return;
  487. pr_info("PCI: Fixing up addresses %s\n", pci_name(dev));
  488. for (pos = 0; pos < 6; pos++) {
  489. res = &dev->resource[pos];
  490. if (res->flags & (IORESOURCE_IO | IORESOURCE_MEM)) {
  491. err = pci_assign_resource(dev, pos);
  492. if (err)
  493. pr_err("PCI: Problem fixing up the addresses on %s\n",
  494. pci_name(dev));
  495. }
  496. }
  497. }
  498. DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses);
  499. /* This function is called when doing a pci_enable_device().
  500. * We must first check if the device is a device on the PCI-core bridge. */
  501. int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
  502. {
  503. struct bcma_drv_pci_host *pc_host;
  504. if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
  505. /* This is not a device on the PCI-core bridge. */
  506. return -ENODEV;
  507. }
  508. pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
  509. pci_ops);
  510. pr_info("PCI: Fixing up device %s\n", pci_name(dev));
  511. /* Fix up interrupt lines */
  512. dev->irq = bcma_core_irq(pc_host->pdev->core);
  513. pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
  514. return 0;
  515. }
  516. EXPORT_SYMBOL(bcma_core_pci_plat_dev_init);
  517. /* PCI device IRQ mapping. */
  518. int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev)
  519. {
  520. struct bcma_drv_pci_host *pc_host;
  521. if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
  522. /* This is not a device on the PCI-core bridge. */
  523. return -ENODEV;
  524. }
  525. pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
  526. pci_ops);
  527. return bcma_core_irq(pc_host->pdev->core);
  528. }
  529. EXPORT_SYMBOL(bcma_core_pci_pcibios_map_irq);