pci-gart_64.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816
  1. /*
  2. * Dynamic DMA mapping support for AMD Hammer.
  3. *
  4. * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
  5. * This allows to use PCI devices that only support 32bit addresses on systems
  6. * with more than 4GB.
  7. *
  8. * See Documentation/DMA-mapping.txt for the interface specification.
  9. *
  10. * Copyright 2002 Andi Kleen, SuSE Labs.
  11. * Subject to the GNU General Public License v2 only.
  12. */
  13. #include <linux/types.h>
  14. #include <linux/ctype.h>
  15. #include <linux/agp_backend.h>
  16. #include <linux/init.h>
  17. #include <linux/mm.h>
  18. #include <linux/string.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/pci.h>
  21. #include <linux/module.h>
  22. #include <linux/topology.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/bitops.h>
  25. #include <linux/kdebug.h>
  26. #include <linux/scatterlist.h>
  27. #include <linux/iommu-helper.h>
  28. #include <asm/atomic.h>
  29. #include <asm/io.h>
  30. #include <asm/mtrr.h>
  31. #include <asm/pgtable.h>
  32. #include <asm/proto.h>
  33. #include <asm/gart.h>
  34. #include <asm/cacheflush.h>
  35. #include <asm/swiotlb.h>
  36. #include <asm/dma.h>
  37. #include <asm/k8.h>
  38. static unsigned long iommu_bus_base; /* GART remapping area (physical) */
  39. static unsigned long iommu_size; /* size of remapping area bytes */
  40. static unsigned long iommu_pages; /* .. and in pages */
  41. static u32 *iommu_gatt_base; /* Remapping table */
  42. /*
  43. * If this is disabled the IOMMU will use an optimized flushing strategy
  44. * of only flushing when an mapping is reused. With it true the GART is
  45. * flushed for every mapping. Problem is that doing the lazy flush seems
  46. * to trigger bugs with some popular PCI cards, in particular 3ware (but
  47. * has been also also seen with Qlogic at least).
  48. */
  49. int iommu_fullflush = 1;
  50. /* Allocation bitmap for the remapping area: */
  51. static DEFINE_SPINLOCK(iommu_bitmap_lock);
  52. /* Guarded by iommu_bitmap_lock: */
  53. static unsigned long *iommu_gart_bitmap;
  54. static u32 gart_unmapped_entry;
  55. #define GPTE_VALID 1
  56. #define GPTE_COHERENT 2
  57. #define GPTE_ENCODE(x) \
  58. (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
  59. #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
  60. #define to_pages(addr, size) \
  61. (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
  62. #define EMERGENCY_PAGES 32 /* = 128KB */
  63. #ifdef CONFIG_AGP
  64. #define AGPEXTERN extern
  65. #else
  66. #define AGPEXTERN
  67. #endif
  68. /* backdoor interface to AGP driver */
  69. AGPEXTERN int agp_memory_reserved;
  70. AGPEXTERN __u32 *agp_gatt_table;
  71. static unsigned long next_bit; /* protected by iommu_bitmap_lock */
  72. static int need_flush; /* global flush state. set for each gart wrap */
  73. static unsigned long alloc_iommu(struct device *dev, int size)
  74. {
  75. unsigned long offset, flags;
  76. unsigned long boundary_size;
  77. unsigned long base_index;
  78. base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
  79. PAGE_SIZE) >> PAGE_SHIFT;
  80. boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
  81. PAGE_SIZE) >> PAGE_SHIFT;
  82. spin_lock_irqsave(&iommu_bitmap_lock, flags);
  83. offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
  84. size, base_index, boundary_size, 0);
  85. if (offset == -1) {
  86. need_flush = 1;
  87. offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
  88. size, base_index, boundary_size, 0);
  89. }
  90. if (offset != -1) {
  91. set_bit_string(iommu_gart_bitmap, offset, size);
  92. next_bit = offset+size;
  93. if (next_bit >= iommu_pages) {
  94. next_bit = 0;
  95. need_flush = 1;
  96. }
  97. }
  98. if (iommu_fullflush)
  99. need_flush = 1;
  100. spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
  101. return offset;
  102. }
  103. static void free_iommu(unsigned long offset, int size)
  104. {
  105. unsigned long flags;
  106. spin_lock_irqsave(&iommu_bitmap_lock, flags);
  107. iommu_area_free(iommu_gart_bitmap, offset, size);
  108. spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
  109. }
  110. /*
  111. * Use global flush state to avoid races with multiple flushers.
  112. */
  113. static void flush_gart(void)
  114. {
  115. unsigned long flags;
  116. spin_lock_irqsave(&iommu_bitmap_lock, flags);
  117. if (need_flush) {
  118. k8_flush_garts();
  119. need_flush = 0;
  120. }
  121. spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
  122. }
  123. #ifdef CONFIG_IOMMU_LEAK
  124. #define SET_LEAK(x) \
  125. do { \
  126. if (iommu_leak_tab) \
  127. iommu_leak_tab[x] = __builtin_return_address(0);\
  128. } while (0)
  129. #define CLEAR_LEAK(x) \
  130. do { \
  131. if (iommu_leak_tab) \
  132. iommu_leak_tab[x] = NULL; \
  133. } while (0)
  134. /* Debugging aid for drivers that don't free their IOMMU tables */
  135. static void **iommu_leak_tab;
  136. static int leak_trace;
  137. static int iommu_leak_pages = 20;
  138. static void dump_leak(void)
  139. {
  140. int i;
  141. static int dump;
  142. if (dump || !iommu_leak_tab)
  143. return;
  144. dump = 1;
  145. show_stack(NULL, NULL);
  146. /* Very crude. dump some from the end of the table too */
  147. printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
  148. iommu_leak_pages);
  149. for (i = 0; i < iommu_leak_pages; i += 2) {
  150. printk(KERN_DEBUG "%lu: ", iommu_pages-i);
  151. printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0);
  152. printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
  153. }
  154. printk(KERN_DEBUG "\n");
  155. }
  156. #else
  157. # define SET_LEAK(x)
  158. # define CLEAR_LEAK(x)
  159. #endif
  160. static void iommu_full(struct device *dev, size_t size, int dir)
  161. {
  162. /*
  163. * Ran out of IOMMU space for this operation. This is very bad.
  164. * Unfortunately the drivers cannot handle this operation properly.
  165. * Return some non mapped prereserved space in the aperture and
  166. * let the Northbridge deal with it. This will result in garbage
  167. * in the IO operation. When the size exceeds the prereserved space
  168. * memory corruption will occur or random memory will be DMAed
  169. * out. Hopefully no network devices use single mappings that big.
  170. */
  171. printk(KERN_ERR
  172. "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
  173. size, dev->bus_id);
  174. if (size > PAGE_SIZE*EMERGENCY_PAGES) {
  175. if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
  176. panic("PCI-DMA: Memory would be corrupted\n");
  177. if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
  178. panic(KERN_ERR
  179. "PCI-DMA: Random memory would be DMAed\n");
  180. }
  181. #ifdef CONFIG_IOMMU_LEAK
  182. dump_leak();
  183. #endif
  184. }
  185. static inline int
  186. need_iommu(struct device *dev, unsigned long addr, size_t size)
  187. {
  188. u64 mask = *dev->dma_mask;
  189. int high = addr + size > mask;
  190. int mmu = high;
  191. if (force_iommu)
  192. mmu = 1;
  193. return mmu;
  194. }
  195. static inline int
  196. nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
  197. {
  198. u64 mask = *dev->dma_mask;
  199. int high = addr + size > mask;
  200. int mmu = high;
  201. return mmu;
  202. }
  203. /* Map a single continuous physical area into the IOMMU.
  204. * Caller needs to check if the iommu is needed and flush.
  205. */
  206. static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
  207. size_t size, int dir)
  208. {
  209. unsigned long npages = to_pages(phys_mem, size);
  210. unsigned long iommu_page = alloc_iommu(dev, npages);
  211. int i;
  212. if (iommu_page == -1) {
  213. if (!nonforced_iommu(dev, phys_mem, size))
  214. return phys_mem;
  215. if (panic_on_overflow)
  216. panic("dma_map_area overflow %lu bytes\n", size);
  217. iommu_full(dev, size, dir);
  218. return bad_dma_address;
  219. }
  220. for (i = 0; i < npages; i++) {
  221. iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
  222. SET_LEAK(iommu_page + i);
  223. phys_mem += PAGE_SIZE;
  224. }
  225. return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
  226. }
  227. static dma_addr_t
  228. gart_map_simple(struct device *dev, char *buf, size_t size, int dir)
  229. {
  230. dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
  231. flush_gart();
  232. return map;
  233. }
  234. /* Map a single area into the IOMMU */
  235. static dma_addr_t
  236. gart_map_single(struct device *dev, void *addr, size_t size, int dir)
  237. {
  238. unsigned long phys_mem, bus;
  239. if (!dev)
  240. dev = &fallback_dev;
  241. phys_mem = virt_to_phys(addr);
  242. if (!need_iommu(dev, phys_mem, size))
  243. return phys_mem;
  244. bus = gart_map_simple(dev, addr, size, dir);
  245. return bus;
  246. }
  247. /*
  248. * Free a DMA mapping.
  249. */
  250. static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
  251. size_t size, int direction)
  252. {
  253. unsigned long iommu_page;
  254. int npages;
  255. int i;
  256. if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
  257. dma_addr >= iommu_bus_base + iommu_size)
  258. return;
  259. iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
  260. npages = to_pages(dma_addr, size);
  261. for (i = 0; i < npages; i++) {
  262. iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
  263. CLEAR_LEAK(iommu_page + i);
  264. }
  265. free_iommu(iommu_page, npages);
  266. }
  267. /*
  268. * Wrapper for pci_unmap_single working with scatterlists.
  269. */
  270. static void
  271. gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
  272. {
  273. struct scatterlist *s;
  274. int i;
  275. for_each_sg(sg, s, nents, i) {
  276. if (!s->dma_length || !s->length)
  277. break;
  278. gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
  279. }
  280. }
  281. /* Fallback for dma_map_sg in case of overflow */
  282. static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
  283. int nents, int dir)
  284. {
  285. struct scatterlist *s;
  286. int i;
  287. #ifdef CONFIG_IOMMU_DEBUG
  288. printk(KERN_DEBUG "dma_map_sg overflow\n");
  289. #endif
  290. for_each_sg(sg, s, nents, i) {
  291. unsigned long addr = sg_phys(s);
  292. if (nonforced_iommu(dev, addr, s->length)) {
  293. addr = dma_map_area(dev, addr, s->length, dir);
  294. if (addr == bad_dma_address) {
  295. if (i > 0)
  296. gart_unmap_sg(dev, sg, i, dir);
  297. nents = 0;
  298. sg[0].dma_length = 0;
  299. break;
  300. }
  301. }
  302. s->dma_address = addr;
  303. s->dma_length = s->length;
  304. }
  305. flush_gart();
  306. return nents;
  307. }
  308. /* Map multiple scatterlist entries continuous into the first. */
  309. static int __dma_map_cont(struct device *dev, struct scatterlist *start,
  310. int nelems, struct scatterlist *sout,
  311. unsigned long pages)
  312. {
  313. unsigned long iommu_start = alloc_iommu(dev, pages);
  314. unsigned long iommu_page = iommu_start;
  315. struct scatterlist *s;
  316. int i;
  317. if (iommu_start == -1)
  318. return -1;
  319. for_each_sg(start, s, nelems, i) {
  320. unsigned long pages, addr;
  321. unsigned long phys_addr = s->dma_address;
  322. BUG_ON(s != start && s->offset);
  323. if (s == start) {
  324. sout->dma_address = iommu_bus_base;
  325. sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
  326. sout->dma_length = s->length;
  327. } else {
  328. sout->dma_length += s->length;
  329. }
  330. addr = phys_addr;
  331. pages = to_pages(s->offset, s->length);
  332. while (pages--) {
  333. iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
  334. SET_LEAK(iommu_page);
  335. addr += PAGE_SIZE;
  336. iommu_page++;
  337. }
  338. }
  339. BUG_ON(iommu_page - iommu_start != pages);
  340. return 0;
  341. }
  342. static inline int
  343. dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
  344. struct scatterlist *sout, unsigned long pages, int need)
  345. {
  346. if (!need) {
  347. BUG_ON(nelems != 1);
  348. sout->dma_address = start->dma_address;
  349. sout->dma_length = start->length;
  350. return 0;
  351. }
  352. return __dma_map_cont(dev, start, nelems, sout, pages);
  353. }
  354. /*
  355. * DMA map all entries in a scatterlist.
  356. * Merge chunks that have page aligned sizes into a continuous mapping.
  357. */
  358. static int
  359. gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
  360. {
  361. struct scatterlist *s, *ps, *start_sg, *sgmap;
  362. int need = 0, nextneed, i, out, start;
  363. unsigned long pages = 0;
  364. unsigned int seg_size;
  365. unsigned int max_seg_size;
  366. if (nents == 0)
  367. return 0;
  368. if (!dev)
  369. dev = &fallback_dev;
  370. out = 0;
  371. start = 0;
  372. start_sg = sgmap = sg;
  373. seg_size = 0;
  374. max_seg_size = dma_get_max_seg_size(dev);
  375. ps = NULL; /* shut up gcc */
  376. for_each_sg(sg, s, nents, i) {
  377. dma_addr_t addr = sg_phys(s);
  378. s->dma_address = addr;
  379. BUG_ON(s->length == 0);
  380. nextneed = need_iommu(dev, addr, s->length);
  381. /* Handle the previous not yet processed entries */
  382. if (i > start) {
  383. /*
  384. * Can only merge when the last chunk ends on a
  385. * page boundary and the new one doesn't have an
  386. * offset.
  387. */
  388. if (!iommu_merge || !nextneed || !need || s->offset ||
  389. (s->length + seg_size > max_seg_size) ||
  390. (ps->offset + ps->length) % PAGE_SIZE) {
  391. if (dma_map_cont(dev, start_sg, i - start,
  392. sgmap, pages, need) < 0)
  393. goto error;
  394. out++;
  395. seg_size = 0;
  396. sgmap = sg_next(sgmap);
  397. pages = 0;
  398. start = i;
  399. start_sg = s;
  400. }
  401. }
  402. seg_size += s->length;
  403. need = nextneed;
  404. pages += to_pages(s->offset, s->length);
  405. ps = s;
  406. }
  407. if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
  408. goto error;
  409. out++;
  410. flush_gart();
  411. if (out < nents) {
  412. sgmap = sg_next(sgmap);
  413. sgmap->dma_length = 0;
  414. }
  415. return out;
  416. error:
  417. flush_gart();
  418. gart_unmap_sg(dev, sg, out, dir);
  419. /* When it was forced or merged try again in a dumb way */
  420. if (force_iommu || iommu_merge) {
  421. out = dma_map_sg_nonforce(dev, sg, nents, dir);
  422. if (out > 0)
  423. return out;
  424. }
  425. if (panic_on_overflow)
  426. panic("dma_map_sg: overflow on %lu pages\n", pages);
  427. iommu_full(dev, pages << PAGE_SHIFT, dir);
  428. for_each_sg(sg, s, nents, i)
  429. s->dma_address = bad_dma_address;
  430. return 0;
  431. }
  432. static int no_agp;
  433. static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
  434. {
  435. unsigned long a;
  436. if (!iommu_size) {
  437. iommu_size = aper_size;
  438. if (!no_agp)
  439. iommu_size /= 2;
  440. }
  441. a = aper + iommu_size;
  442. iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
  443. if (iommu_size < 64*1024*1024) {
  444. printk(KERN_WARNING
  445. "PCI-DMA: Warning: Small IOMMU %luMB."
  446. " Consider increasing the AGP aperture in BIOS\n",
  447. iommu_size >> 20);
  448. }
  449. return iommu_size;
  450. }
  451. static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
  452. {
  453. unsigned aper_size = 0, aper_base_32, aper_order;
  454. u64 aper_base;
  455. pci_read_config_dword(dev, 0x94, &aper_base_32);
  456. pci_read_config_dword(dev, 0x90, &aper_order);
  457. aper_order = (aper_order >> 1) & 7;
  458. aper_base = aper_base_32 & 0x7fff;
  459. aper_base <<= 25;
  460. aper_size = (32 * 1024 * 1024) << aper_order;
  461. if (aper_base + aper_size > 0x100000000UL || !aper_size)
  462. aper_base = 0;
  463. *size = aper_size;
  464. return aper_base;
  465. }
  466. /*
  467. * Private Northbridge GATT initialization in case we cannot use the
  468. * AGP driver for some reason.
  469. */
  470. static __init int init_k8_gatt(struct agp_kern_info *info)
  471. {
  472. unsigned aper_size, gatt_size, new_aper_size;
  473. unsigned aper_base, new_aper_base;
  474. struct pci_dev *dev;
  475. void *gatt;
  476. int i;
  477. printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
  478. aper_size = aper_base = info->aper_size = 0;
  479. dev = NULL;
  480. for (i = 0; i < num_k8_northbridges; i++) {
  481. dev = k8_northbridges[i];
  482. new_aper_base = read_aperture(dev, &new_aper_size);
  483. if (!new_aper_base)
  484. goto nommu;
  485. if (!aper_base) {
  486. aper_size = new_aper_size;
  487. aper_base = new_aper_base;
  488. }
  489. if (aper_size != new_aper_size || aper_base != new_aper_base)
  490. goto nommu;
  491. }
  492. if (!aper_base)
  493. goto nommu;
  494. info->aper_base = aper_base;
  495. info->aper_size = aper_size >> 20;
  496. gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
  497. gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
  498. if (!gatt)
  499. panic("Cannot allocate GATT table");
  500. if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
  501. panic("Could not set GART PTEs to uncacheable pages");
  502. memset(gatt, 0, gatt_size);
  503. agp_gatt_table = gatt;
  504. for (i = 0; i < num_k8_northbridges; i++) {
  505. u32 gatt_reg;
  506. u32 ctl;
  507. dev = k8_northbridges[i];
  508. gatt_reg = __pa(gatt) >> 12;
  509. gatt_reg <<= 4;
  510. pci_write_config_dword(dev, 0x98, gatt_reg);
  511. pci_read_config_dword(dev, 0x90, &ctl);
  512. ctl |= 1;
  513. ctl &= ~((1<<4) | (1<<5));
  514. pci_write_config_dword(dev, 0x90, ctl);
  515. }
  516. flush_gart();
  517. printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
  518. aper_base, aper_size>>10);
  519. return 0;
  520. nommu:
  521. /* Should not happen anymore */
  522. printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
  523. KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n");
  524. return -1;
  525. }
  526. extern int agp_amd64_init(void);
  527. static const struct dma_mapping_ops gart_dma_ops = {
  528. .mapping_error = NULL,
  529. .map_single = gart_map_single,
  530. .map_simple = gart_map_simple,
  531. .unmap_single = gart_unmap_single,
  532. .sync_single_for_cpu = NULL,
  533. .sync_single_for_device = NULL,
  534. .sync_single_range_for_cpu = NULL,
  535. .sync_single_range_for_device = NULL,
  536. .sync_sg_for_cpu = NULL,
  537. .sync_sg_for_device = NULL,
  538. .map_sg = gart_map_sg,
  539. .unmap_sg = gart_unmap_sg,
  540. };
  541. void gart_iommu_shutdown(void)
  542. {
  543. struct pci_dev *dev;
  544. int i;
  545. if (no_agp && (dma_ops != &gart_dma_ops))
  546. return;
  547. for (i = 0; i < num_k8_northbridges; i++) {
  548. u32 ctl;
  549. dev = k8_northbridges[i];
  550. pci_read_config_dword(dev, 0x90, &ctl);
  551. ctl &= ~1;
  552. pci_write_config_dword(dev, 0x90, ctl);
  553. }
  554. }
  555. void __init gart_iommu_init(void)
  556. {
  557. struct agp_kern_info info;
  558. unsigned long iommu_start;
  559. unsigned long aper_size;
  560. unsigned long scratch;
  561. long i;
  562. if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
  563. printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
  564. return;
  565. }
  566. #ifndef CONFIG_AGP_AMD64
  567. no_agp = 1;
  568. #else
  569. /* Makefile puts PCI initialization via subsys_initcall first. */
  570. /* Add other K8 AGP bridge drivers here */
  571. no_agp = no_agp ||
  572. (agp_amd64_init() < 0) ||
  573. (agp_copy_info(agp_bridge, &info) < 0);
  574. #endif
  575. if (swiotlb)
  576. return;
  577. /* Did we detect a different HW IOMMU? */
  578. if (iommu_detected && !gart_iommu_aperture)
  579. return;
  580. if (no_iommu ||
  581. (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
  582. !gart_iommu_aperture ||
  583. (no_agp && init_k8_gatt(&info) < 0)) {
  584. if (end_pfn > MAX_DMA32_PFN) {
  585. printk(KERN_ERR "WARNING more than 4GB of memory "
  586. "but GART IOMMU not available.\n"
  587. KERN_ERR "WARNING 32bit PCI may malfunction.\n");
  588. }
  589. return;
  590. }
  591. printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
  592. aper_size = info.aper_size * 1024 * 1024;
  593. iommu_size = check_iommu_size(info.aper_base, aper_size);
  594. iommu_pages = iommu_size >> PAGE_SHIFT;
  595. iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL,
  596. get_order(iommu_pages/8));
  597. if (!iommu_gart_bitmap)
  598. panic("Cannot allocate iommu bitmap\n");
  599. memset(iommu_gart_bitmap, 0, iommu_pages/8);
  600. #ifdef CONFIG_IOMMU_LEAK
  601. if (leak_trace) {
  602. iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
  603. get_order(iommu_pages*sizeof(void *)));
  604. if (iommu_leak_tab)
  605. memset(iommu_leak_tab, 0, iommu_pages * 8);
  606. else
  607. printk(KERN_DEBUG
  608. "PCI-DMA: Cannot allocate leak trace area\n");
  609. }
  610. #endif
  611. /*
  612. * Out of IOMMU space handling.
  613. * Reserve some invalid pages at the beginning of the GART.
  614. */
  615. set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
  616. agp_memory_reserved = iommu_size;
  617. printk(KERN_INFO
  618. "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
  619. iommu_size >> 20);
  620. iommu_start = aper_size - iommu_size;
  621. iommu_bus_base = info.aper_base + iommu_start;
  622. bad_dma_address = iommu_bus_base;
  623. iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
  624. /*
  625. * Unmap the IOMMU part of the GART. The alias of the page is
  626. * always mapped with cache enabled and there is no full cache
  627. * coherency across the GART remapping. The unmapping avoids
  628. * automatic prefetches from the CPU allocating cache lines in
  629. * there. All CPU accesses are done via the direct mapping to
  630. * the backing memory. The GART address is only used by PCI
  631. * devices.
  632. */
  633. set_memory_np((unsigned long)__va(iommu_bus_base),
  634. iommu_size >> PAGE_SHIFT);
  635. /*
  636. * Tricky. The GART table remaps the physical memory range,
  637. * so the CPU wont notice potential aliases and if the memory
  638. * is remapped to UC later on, we might surprise the PCI devices
  639. * with a stray writeout of a cacheline. So play it sure and
  640. * do an explicit, full-scale wbinvd() _after_ having marked all
  641. * the pages as Not-Present:
  642. */
  643. wbinvd();
  644. /*
  645. * Try to workaround a bug (thanks to BenH)
  646. * Set unmapped entries to a scratch page instead of 0.
  647. * Any prefetches that hit unmapped entries won't get an bus abort
  648. * then.
  649. */
  650. scratch = get_zeroed_page(GFP_KERNEL);
  651. if (!scratch)
  652. panic("Cannot allocate iommu scratch page");
  653. gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
  654. for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
  655. iommu_gatt_base[i] = gart_unmapped_entry;
  656. flush_gart();
  657. dma_ops = &gart_dma_ops;
  658. }
  659. void __init gart_parse_options(char *p)
  660. {
  661. int arg;
  662. #ifdef CONFIG_IOMMU_LEAK
  663. if (!strncmp(p, "leak", 4)) {
  664. leak_trace = 1;
  665. p += 4;
  666. if (*p == '=') ++p;
  667. if (isdigit(*p) && get_option(&p, &arg))
  668. iommu_leak_pages = arg;
  669. }
  670. #endif
  671. if (isdigit(*p) && get_option(&p, &arg))
  672. iommu_size = arg;
  673. if (!strncmp(p, "fullflush", 8))
  674. iommu_fullflush = 1;
  675. if (!strncmp(p, "nofullflush", 11))
  676. iommu_fullflush = 0;
  677. if (!strncmp(p, "noagp", 5))
  678. no_agp = 1;
  679. if (!strncmp(p, "noaperture", 10))
  680. fix_aperture = 0;
  681. /* duplicated from pci-dma.c */
  682. if (!strncmp(p, "force", 5))
  683. gart_iommu_aperture_allowed = 1;
  684. if (!strncmp(p, "allowed", 7))
  685. gart_iommu_aperture_allowed = 1;
  686. if (!strncmp(p, "memaper", 7)) {
  687. fallback_aper_force = 1;
  688. p += 7;
  689. if (*p == '=') {
  690. ++p;
  691. if (get_option(&p, &arg))
  692. fallback_aper_order = arg;
  693. }
  694. }
  695. }