ioat_dca.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. /*
  2. * Intel I/OAT DMA Linux driver
  3. * Copyright(c) 2007 Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17. *
  18. * The full GNU General Public License is included in this distribution in
  19. * the file called "COPYING".
  20. *
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/pci.h>
  24. #include <linux/smp.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/dca.h>
  27. /* either a kernel change is needed, or we need something like this in kernel */
  28. #ifndef CONFIG_SMP
  29. #include <asm/smp.h>
  30. #undef cpu_physical_id
  31. #define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24)
  32. #endif
  33. #include "ioatdma.h"
  34. #include "ioatdma_registers.h"
  35. /*
  36. * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
  37. * contain the bit number of the APIC ID to map into the DCA tag. If the valid
  38. * bit is not set, then the value must be 0 or 1 and defines the bit in the tag.
  39. */
  40. #define DCA_TAG_MAP_VALID 0x80
  41. #define DCA3_TAG_MAP_BIT_TO_INV 0x80
  42. #define DCA3_TAG_MAP_BIT_TO_SEL 0x40
  43. #define DCA3_TAG_MAP_LITERAL_VAL 0x1
  44. #define DCA_TAG_MAP_MASK 0xDF
  45. /*
  46. * "Legacy" DCA systems do not implement the DCA register set in the
  47. * I/OAT device. Software needs direct support for their tag mappings.
  48. */
  49. #define APICID_BIT(x) (DCA_TAG_MAP_VALID | (x))
  50. #define IOAT_TAG_MAP_LEN 8
  51. static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = {
  52. 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
  53. static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = {
  54. 1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
  55. static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = {
  56. 1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), };
  57. static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 };
  58. /* pack PCI B/D/F into a u16 */
  59. static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
  60. {
  61. return (pci->bus->number << 8) | pci->devfn;
  62. }
  63. static int dca_enabled_in_bios(struct pci_dev *pdev)
  64. {
  65. /* CPUID level 9 returns DCA configuration */
  66. /* Bit 0 indicates DCA enabled by the BIOS */
  67. unsigned long cpuid_level_9;
  68. int res;
  69. cpuid_level_9 = cpuid_eax(9);
  70. res = test_bit(0, &cpuid_level_9);
  71. if (!res)
  72. dev_err(&pdev->dev, "DCA is disabled in BIOS\n");
  73. return res;
  74. }
  75. static int system_has_dca_enabled(struct pci_dev *pdev)
  76. {
  77. if (boot_cpu_has(X86_FEATURE_DCA))
  78. return dca_enabled_in_bios(pdev);
  79. dev_err(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
  80. return 0;
  81. }
  82. struct ioat_dca_slot {
  83. struct pci_dev *pdev; /* requester device */
  84. u16 rid; /* requester id, as used by IOAT */
  85. };
  86. #define IOAT_DCA_MAX_REQ 6
  87. #define IOAT3_DCA_MAX_REQ 2
  88. struct ioat_dca_priv {
  89. void __iomem *iobase;
  90. void __iomem *dca_base;
  91. int max_requesters;
  92. int requester_count;
  93. u8 tag_map[IOAT_TAG_MAP_LEN];
  94. struct ioat_dca_slot req_slots[0];
  95. };
  96. /* 5000 series chipset DCA Port Requester ID Table Entry Format
  97. * [15:8] PCI-Express Bus Number
  98. * [7:3] PCI-Express Device Number
  99. * [2:0] PCI-Express Function Number
  100. *
  101. * 5000 series chipset DCA control register format
  102. * [7:1] Reserved (0)
  103. * [0] Ignore Function Number
  104. */
  105. static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
  106. {
  107. struct ioat_dca_priv *ioatdca = dca_priv(dca);
  108. struct pci_dev *pdev;
  109. int i;
  110. u16 id;
  111. /* This implementation only supports PCI-Express */
  112. if (dev->bus != &pci_bus_type)
  113. return -ENODEV;
  114. pdev = to_pci_dev(dev);
  115. id = dcaid_from_pcidev(pdev);
  116. if (ioatdca->requester_count == ioatdca->max_requesters)
  117. return -ENODEV;
  118. for (i = 0; i < ioatdca->max_requesters; i++) {
  119. if (ioatdca->req_slots[i].pdev == NULL) {
  120. /* found an empty slot */
  121. ioatdca->requester_count++;
  122. ioatdca->req_slots[i].pdev = pdev;
  123. ioatdca->req_slots[i].rid = id;
  124. writew(id, ioatdca->dca_base + (i * 4));
  125. /* make sure the ignore function bit is off */
  126. writeb(0, ioatdca->dca_base + (i * 4) + 2);
  127. return i;
  128. }
  129. }
  130. /* Error, ioatdma->requester_count is out of whack */
  131. return -EFAULT;
  132. }
  133. static int ioat_dca_remove_requester(struct dca_provider *dca,
  134. struct device *dev)
  135. {
  136. struct ioat_dca_priv *ioatdca = dca_priv(dca);
  137. struct pci_dev *pdev;
  138. int i;
  139. /* This implementation only supports PCI-Express */
  140. if (dev->bus != &pci_bus_type)
  141. return -ENODEV;
  142. pdev = to_pci_dev(dev);
  143. for (i = 0; i < ioatdca->max_requesters; i++) {
  144. if (ioatdca->req_slots[i].pdev == pdev) {
  145. writew(0, ioatdca->dca_base + (i * 4));
  146. ioatdca->req_slots[i].pdev = NULL;
  147. ioatdca->req_slots[i].rid = 0;
  148. ioatdca->requester_count--;
  149. return i;
  150. }
  151. }
  152. return -ENODEV;
  153. }
  154. static u8 ioat_dca_get_tag(struct dca_provider *dca,
  155. struct device *dev,
  156. int cpu)
  157. {
  158. struct ioat_dca_priv *ioatdca = dca_priv(dca);
  159. int i, apic_id, bit, value;
  160. u8 entry, tag;
  161. tag = 0;
  162. apic_id = cpu_physical_id(cpu);
  163. for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
  164. entry = ioatdca->tag_map[i];
  165. if (entry & DCA_TAG_MAP_VALID) {
  166. bit = entry & ~DCA_TAG_MAP_VALID;
  167. value = (apic_id & (1 << bit)) ? 1 : 0;
  168. } else {
  169. value = entry ? 1 : 0;
  170. }
  171. tag |= (value << i);
  172. }
  173. return tag;
  174. }
  175. static int ioat_dca_dev_managed(struct dca_provider *dca,
  176. struct device *dev)
  177. {
  178. struct ioat_dca_priv *ioatdca = dca_priv(dca);
  179. struct pci_dev *pdev;
  180. int i;
  181. pdev = to_pci_dev(dev);
  182. for (i = 0; i < ioatdca->max_requesters; i++) {
  183. if (ioatdca->req_slots[i].pdev == pdev)
  184. return 1;
  185. }
  186. return 0;
  187. }
  188. static struct dca_ops ioat_dca_ops = {
  189. .add_requester = ioat_dca_add_requester,
  190. .remove_requester = ioat_dca_remove_requester,
  191. .get_tag = ioat_dca_get_tag,
  192. .dev_managed = ioat_dca_dev_managed,
  193. };
  194. struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
  195. {
  196. struct dca_provider *dca;
  197. struct ioat_dca_priv *ioatdca;
  198. u8 *tag_map = NULL;
  199. int i;
  200. int err;
  201. u8 version;
  202. u8 max_requesters;
  203. if (!system_has_dca_enabled(pdev))
  204. return NULL;
  205. /* I/OAT v1 systems must have a known tag_map to support DCA */
  206. switch (pdev->vendor) {
  207. case PCI_VENDOR_ID_INTEL:
  208. switch (pdev->device) {
  209. case PCI_DEVICE_ID_INTEL_IOAT:
  210. tag_map = ioat_tag_map_BNB;
  211. break;
  212. case PCI_DEVICE_ID_INTEL_IOAT_CNB:
  213. tag_map = ioat_tag_map_CNB;
  214. break;
  215. case PCI_DEVICE_ID_INTEL_IOAT_SCNB:
  216. tag_map = ioat_tag_map_SCNB;
  217. break;
  218. }
  219. break;
  220. case PCI_VENDOR_ID_UNISYS:
  221. switch (pdev->device) {
  222. case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR:
  223. tag_map = ioat_tag_map_UNISYS;
  224. break;
  225. }
  226. break;
  227. }
  228. if (tag_map == NULL)
  229. return NULL;
  230. version = readb(iobase + IOAT_VER_OFFSET);
  231. if (version == IOAT_VER_3_0)
  232. max_requesters = IOAT3_DCA_MAX_REQ;
  233. else
  234. max_requesters = IOAT_DCA_MAX_REQ;
  235. dca = alloc_dca_provider(&ioat_dca_ops,
  236. sizeof(*ioatdca) +
  237. (sizeof(struct ioat_dca_slot) * max_requesters));
  238. if (!dca)
  239. return NULL;
  240. ioatdca = dca_priv(dca);
  241. ioatdca->max_requesters = max_requesters;
  242. ioatdca->dca_base = iobase + 0x54;
  243. /* copy over the APIC ID to DCA tag mapping */
  244. for (i = 0; i < IOAT_TAG_MAP_LEN; i++)
  245. ioatdca->tag_map[i] = tag_map[i];
  246. err = register_dca_provider(dca, &pdev->dev);
  247. if (err) {
  248. free_dca_provider(dca);
  249. return NULL;
  250. }
  251. return dca;
  252. }
  253. static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
  254. {
  255. struct ioat_dca_priv *ioatdca = dca_priv(dca);
  256. struct pci_dev *pdev;
  257. int i;
  258. u16 id;
  259. u16 global_req_table;
  260. /* This implementation only supports PCI-Express */
  261. if (dev->bus != &pci_bus_type)
  262. return -ENODEV;
  263. pdev = to_pci_dev(dev);
  264. id = dcaid_from_pcidev(pdev);
  265. if (ioatdca->requester_count == ioatdca->max_requesters)
  266. return -ENODEV;
  267. for (i = 0; i < ioatdca->max_requesters; i++) {
  268. if (ioatdca->req_slots[i].pdev == NULL) {
  269. /* found an empty slot */
  270. ioatdca->requester_count++;
  271. ioatdca->req_slots[i].pdev = pdev;
  272. ioatdca->req_slots[i].rid = id;
  273. global_req_table =
  274. readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
  275. writel(id | IOAT_DCA_GREQID_VALID,
  276. ioatdca->iobase + global_req_table + (i * 4));
  277. return i;
  278. }
  279. }
  280. /* Error, ioatdma->requester_count is out of whack */
  281. return -EFAULT;
  282. }
  283. static int ioat2_dca_remove_requester(struct dca_provider *dca,
  284. struct device *dev)
  285. {
  286. struct ioat_dca_priv *ioatdca = dca_priv(dca);
  287. struct pci_dev *pdev;
  288. int i;
  289. u16 global_req_table;
  290. /* This implementation only supports PCI-Express */
  291. if (dev->bus != &pci_bus_type)
  292. return -ENODEV;
  293. pdev = to_pci_dev(dev);
  294. for (i = 0; i < ioatdca->max_requesters; i++) {
  295. if (ioatdca->req_slots[i].pdev == pdev) {
  296. global_req_table =
  297. readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
  298. writel(0, ioatdca->iobase + global_req_table + (i * 4));
  299. ioatdca->req_slots[i].pdev = NULL;
  300. ioatdca->req_slots[i].rid = 0;
  301. ioatdca->requester_count--;
  302. return i;
  303. }
  304. }
  305. return -ENODEV;
  306. }
  307. static u8 ioat2_dca_get_tag(struct dca_provider *dca,
  308. struct device *dev,
  309. int cpu)
  310. {
  311. u8 tag;
  312. tag = ioat_dca_get_tag(dca, dev, cpu);
  313. tag = (~tag) & 0x1F;
  314. return tag;
  315. }
  316. static struct dca_ops ioat2_dca_ops = {
  317. .add_requester = ioat2_dca_add_requester,
  318. .remove_requester = ioat2_dca_remove_requester,
  319. .get_tag = ioat2_dca_get_tag,
  320. .dev_managed = ioat_dca_dev_managed,
  321. };
  322. static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
  323. {
  324. int slots = 0;
  325. u32 req;
  326. u16 global_req_table;
  327. global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET);
  328. if (global_req_table == 0)
  329. return 0;
  330. do {
  331. req = readl(iobase + global_req_table + (slots * sizeof(u32)));
  332. slots++;
  333. } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
  334. return slots;
  335. }
  336. struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
  337. {
  338. struct dca_provider *dca;
  339. struct ioat_dca_priv *ioatdca;
  340. int slots;
  341. int i;
  342. int err;
  343. u32 tag_map;
  344. u16 dca_offset;
  345. u16 csi_fsb_control;
  346. u16 pcie_control;
  347. u8 bit;
  348. if (!system_has_dca_enabled(pdev))
  349. return NULL;
  350. dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
  351. if (dca_offset == 0)
  352. return NULL;
  353. slots = ioat2_dca_count_dca_slots(iobase, dca_offset);
  354. if (slots == 0)
  355. return NULL;
  356. dca = alloc_dca_provider(&ioat2_dca_ops,
  357. sizeof(*ioatdca)
  358. + (sizeof(struct ioat_dca_slot) * slots));
  359. if (!dca)
  360. return NULL;
  361. ioatdca = dca_priv(dca);
  362. ioatdca->iobase = iobase;
  363. ioatdca->dca_base = iobase + dca_offset;
  364. ioatdca->max_requesters = slots;
  365. /* some bios might not know to turn these on */
  366. csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
  367. if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) {
  368. csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH;
  369. writew(csi_fsb_control,
  370. ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
  371. }
  372. pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
  373. if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) {
  374. pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR;
  375. writew(pcie_control,
  376. ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
  377. }
  378. /* TODO version, compatibility and configuration checks */
  379. /* copy out the APIC to DCA tag map */
  380. tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET);
  381. for (i = 0; i < 5; i++) {
  382. bit = (tag_map >> (4 * i)) & 0x0f;
  383. if (bit < 8)
  384. ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID;
  385. else
  386. ioatdca->tag_map[i] = 0;
  387. }
  388. err = register_dca_provider(dca, &pdev->dev);
  389. if (err) {
  390. free_dca_provider(dca);
  391. return NULL;
  392. }
  393. return dca;
  394. }
  395. static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
  396. {
  397. struct ioat_dca_priv *ioatdca = dca_priv(dca);
  398. struct pci_dev *pdev;
  399. int i;
  400. u16 id;
  401. u16 global_req_table;
  402. /* This implementation only supports PCI-Express */
  403. if (dev->bus != &pci_bus_type)
  404. return -ENODEV;
  405. pdev = to_pci_dev(dev);
  406. id = dcaid_from_pcidev(pdev);
  407. if (ioatdca->requester_count == ioatdca->max_requesters)
  408. return -ENODEV;
  409. for (i = 0; i < ioatdca->max_requesters; i++) {
  410. if (ioatdca->req_slots[i].pdev == NULL) {
  411. /* found an empty slot */
  412. ioatdca->requester_count++;
  413. ioatdca->req_slots[i].pdev = pdev;
  414. ioatdca->req_slots[i].rid = id;
  415. global_req_table =
  416. readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
  417. writel(id | IOAT_DCA_GREQID_VALID,
  418. ioatdca->iobase + global_req_table + (i * 4));
  419. return i;
  420. }
  421. }
  422. /* Error, ioatdma->requester_count is out of whack */
  423. return -EFAULT;
  424. }
  425. static int ioat3_dca_remove_requester(struct dca_provider *dca,
  426. struct device *dev)
  427. {
  428. struct ioat_dca_priv *ioatdca = dca_priv(dca);
  429. struct pci_dev *pdev;
  430. int i;
  431. u16 global_req_table;
  432. /* This implementation only supports PCI-Express */
  433. if (dev->bus != &pci_bus_type)
  434. return -ENODEV;
  435. pdev = to_pci_dev(dev);
  436. for (i = 0; i < ioatdca->max_requesters; i++) {
  437. if (ioatdca->req_slots[i].pdev == pdev) {
  438. global_req_table =
  439. readw(ioatdca->dca_base + IOAT3_DCA_GREQID_OFFSET);
  440. writel(0, ioatdca->iobase + global_req_table + (i * 4));
  441. ioatdca->req_slots[i].pdev = NULL;
  442. ioatdca->req_slots[i].rid = 0;
  443. ioatdca->requester_count--;
  444. return i;
  445. }
  446. }
  447. return -ENODEV;
  448. }
  449. static u8 ioat3_dca_get_tag(struct dca_provider *dca,
  450. struct device *dev,
  451. int cpu)
  452. {
  453. u8 tag;
  454. struct ioat_dca_priv *ioatdca = dca_priv(dca);
  455. int i, apic_id, bit, value;
  456. u8 entry;
  457. tag = 0;
  458. apic_id = cpu_physical_id(cpu);
  459. for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
  460. entry = ioatdca->tag_map[i];
  461. if (entry & DCA3_TAG_MAP_BIT_TO_SEL) {
  462. bit = entry &
  463. ~(DCA3_TAG_MAP_BIT_TO_SEL | DCA3_TAG_MAP_BIT_TO_INV);
  464. value = (apic_id & (1 << bit)) ? 1 : 0;
  465. } else if (entry & DCA3_TAG_MAP_BIT_TO_INV) {
  466. bit = entry & ~DCA3_TAG_MAP_BIT_TO_INV;
  467. value = (apic_id & (1 << bit)) ? 0 : 1;
  468. } else {
  469. value = (entry & DCA3_TAG_MAP_LITERAL_VAL) ? 1 : 0;
  470. }
  471. tag |= (value << i);
  472. }
  473. return tag;
  474. }
  475. static struct dca_ops ioat3_dca_ops = {
  476. .add_requester = ioat3_dca_add_requester,
  477. .remove_requester = ioat3_dca_remove_requester,
  478. .get_tag = ioat3_dca_get_tag,
  479. .dev_managed = ioat_dca_dev_managed,
  480. };
  481. static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
  482. {
  483. int slots = 0;
  484. u32 req;
  485. u16 global_req_table;
  486. global_req_table = readw(iobase + dca_offset + IOAT3_DCA_GREQID_OFFSET);
  487. if (global_req_table == 0)
  488. return 0;
  489. do {
  490. req = readl(iobase + global_req_table + (slots * sizeof(u32)));
  491. slots++;
  492. } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
  493. return slots;
  494. }
  495. struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
  496. {
  497. struct dca_provider *dca;
  498. struct ioat_dca_priv *ioatdca;
  499. int slots;
  500. int i;
  501. int err;
  502. u16 dca_offset;
  503. u16 csi_fsb_control;
  504. u16 pcie_control;
  505. u8 bit;
  506. union {
  507. u64 full;
  508. struct {
  509. u32 low;
  510. u32 high;
  511. };
  512. } tag_map;
  513. if (!system_has_dca_enabled(pdev))
  514. return NULL;
  515. dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
  516. if (dca_offset == 0)
  517. return NULL;
  518. slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
  519. if (slots == 0)
  520. return NULL;
  521. dca = alloc_dca_provider(&ioat3_dca_ops,
  522. sizeof(*ioatdca)
  523. + (sizeof(struct ioat_dca_slot) * slots));
  524. if (!dca)
  525. return NULL;
  526. ioatdca = dca_priv(dca);
  527. ioatdca->iobase = iobase;
  528. ioatdca->dca_base = iobase + dca_offset;
  529. ioatdca->max_requesters = slots;
  530. /* some bios might not know to turn these on */
  531. csi_fsb_control = readw(ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
  532. if ((csi_fsb_control & IOAT3_CSI_CONTROL_PREFETCH) == 0) {
  533. csi_fsb_control |= IOAT3_CSI_CONTROL_PREFETCH;
  534. writew(csi_fsb_control,
  535. ioatdca->dca_base + IOAT3_CSI_CONTROL_OFFSET);
  536. }
  537. pcie_control = readw(ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
  538. if ((pcie_control & IOAT3_PCI_CONTROL_MEMWR) == 0) {
  539. pcie_control |= IOAT3_PCI_CONTROL_MEMWR;
  540. writew(pcie_control,
  541. ioatdca->dca_base + IOAT3_PCI_CONTROL_OFFSET);
  542. }
  543. /* TODO version, compatibility and configuration checks */
  544. /* copy out the APIC to DCA tag map */
  545. tag_map.low =
  546. readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_LOW);
  547. tag_map.high =
  548. readl(ioatdca->dca_base + IOAT3_APICID_TAG_MAP_OFFSET_HIGH);
  549. for (i = 0; i < 8; i++) {
  550. bit = tag_map.full >> (8 * i);
  551. ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
  552. }
  553. err = register_dca_provider(dca, &pdev->dev);
  554. if (err) {
  555. free_dca_provider(dca);
  556. return NULL;
  557. }
  558. return dca;
  559. }