pci-gart_64.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880
  1. /*
  2. * Dynamic DMA mapping support for AMD Hammer.
  3. *
  4. * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
  5. * This allows to use PCI devices that only support 32bit addresses on systems
  6. * with more than 4GB.
  7. *
  8. * See Documentation/PCI/PCI-DMA-mapping.txt for the interface specification.
  9. *
  10. * Copyright 2002 Andi Kleen, SuSE Labs.
  11. * Subject to the GNU General Public License v2 only.
  12. */
  13. #include <linux/types.h>
  14. #include <linux/ctype.h>
  15. #include <linux/agp_backend.h>
  16. #include <linux/init.h>
  17. #include <linux/mm.h>
  18. #include <linux/string.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/pci.h>
  21. #include <linux/module.h>
  22. #include <linux/topology.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/bitops.h>
  25. #include <linux/kdebug.h>
  26. #include <linux/scatterlist.h>
  27. #include <linux/iommu-helper.h>
  28. #include <linux/sysdev.h>
  29. #include <linux/io.h>
  30. #include <asm/atomic.h>
  31. #include <asm/mtrr.h>
  32. #include <asm/pgtable.h>
  33. #include <asm/proto.h>
  34. #include <asm/iommu.h>
  35. #include <asm/gart.h>
  36. #include <asm/cacheflush.h>
  37. #include <asm/swiotlb.h>
  38. #include <asm/dma.h>
  39. #include <asm/k8.h>
  40. static unsigned long iommu_bus_base; /* GART remapping area (physical) */
  41. static unsigned long iommu_size; /* size of remapping area bytes */
  42. static unsigned long iommu_pages; /* .. and in pages */
  43. static u32 *iommu_gatt_base; /* Remapping table */
  44. /*
  45. * If this is disabled the IOMMU will use an optimized flushing strategy
  46. * of only flushing when an mapping is reused. With it true the GART is
  47. * flushed for every mapping. Problem is that doing the lazy flush seems
  48. * to trigger bugs with some popular PCI cards, in particular 3ware (but
  49. * has been also also seen with Qlogic at least).
  50. */
  51. static int iommu_fullflush = 1;
  52. /* Allocation bitmap for the remapping area: */
  53. static DEFINE_SPINLOCK(iommu_bitmap_lock);
  54. /* Guarded by iommu_bitmap_lock: */
  55. static unsigned long *iommu_gart_bitmap;
  56. static u32 gart_unmapped_entry;
  57. #define GPTE_VALID 1
  58. #define GPTE_COHERENT 2
  59. #define GPTE_ENCODE(x) \
  60. (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
  61. #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
  62. #define EMERGENCY_PAGES 32 /* = 128KB */
  63. #ifdef CONFIG_AGP
  64. #define AGPEXTERN extern
  65. #else
  66. #define AGPEXTERN
  67. #endif
  68. /* backdoor interface to AGP driver */
  69. AGPEXTERN int agp_memory_reserved;
  70. AGPEXTERN __u32 *agp_gatt_table;
  71. static unsigned long next_bit; /* protected by iommu_bitmap_lock */
  72. static bool need_flush; /* global flush state. set for each gart wrap */
  73. static unsigned long alloc_iommu(struct device *dev, int size,
  74. unsigned long align_mask)
  75. {
  76. unsigned long offset, flags;
  77. unsigned long boundary_size;
  78. unsigned long base_index;
  79. base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
  80. PAGE_SIZE) >> PAGE_SHIFT;
  81. boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
  82. PAGE_SIZE) >> PAGE_SHIFT;
  83. spin_lock_irqsave(&iommu_bitmap_lock, flags);
  84. offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
  85. size, base_index, boundary_size, align_mask);
  86. if (offset == -1) {
  87. need_flush = true;
  88. offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
  89. size, base_index, boundary_size,
  90. align_mask);
  91. }
  92. if (offset != -1) {
  93. next_bit = offset+size;
  94. if (next_bit >= iommu_pages) {
  95. next_bit = 0;
  96. need_flush = true;
  97. }
  98. }
  99. if (iommu_fullflush)
  100. need_flush = true;
  101. spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
  102. return offset;
  103. }
  104. static void free_iommu(unsigned long offset, int size)
  105. {
  106. unsigned long flags;
  107. spin_lock_irqsave(&iommu_bitmap_lock, flags);
  108. iommu_area_free(iommu_gart_bitmap, offset, size);
  109. if (offset >= next_bit)
  110. next_bit = offset + size;
  111. spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
  112. }
  113. /*
  114. * Use global flush state to avoid races with multiple flushers.
  115. */
  116. static void flush_gart(void)
  117. {
  118. unsigned long flags;
  119. spin_lock_irqsave(&iommu_bitmap_lock, flags);
  120. if (need_flush) {
  121. k8_flush_garts();
  122. need_flush = false;
  123. }
  124. spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
  125. }
  126. #ifdef CONFIG_IOMMU_LEAK
  127. /* Debugging aid for drivers that don't free their IOMMU tables */
  128. static int leak_trace;
  129. static int iommu_leak_pages = 20;
  130. static void dump_leak(void)
  131. {
  132. static int dump;
  133. if (dump)
  134. return;
  135. dump = 1;
  136. show_stack(NULL, NULL);
  137. debug_dma_dump_mappings(NULL);
  138. }
  139. #endif
  140. static void iommu_full(struct device *dev, size_t size, int dir)
  141. {
  142. /*
  143. * Ran out of IOMMU space for this operation. This is very bad.
  144. * Unfortunately the drivers cannot handle this operation properly.
  145. * Return some non mapped prereserved space in the aperture and
  146. * let the Northbridge deal with it. This will result in garbage
  147. * in the IO operation. When the size exceeds the prereserved space
  148. * memory corruption will occur or random memory will be DMAed
  149. * out. Hopefully no network devices use single mappings that big.
  150. */
  151. dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
  152. if (size > PAGE_SIZE*EMERGENCY_PAGES) {
  153. if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
  154. panic("PCI-DMA: Memory would be corrupted\n");
  155. if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
  156. panic(KERN_ERR
  157. "PCI-DMA: Random memory would be DMAed\n");
  158. }
  159. #ifdef CONFIG_IOMMU_LEAK
  160. dump_leak();
  161. #endif
  162. }
  163. static inline int
  164. need_iommu(struct device *dev, unsigned long addr, size_t size)
  165. {
  166. return force_iommu || !dma_capable(dev, addr, size);
  167. }
  168. static inline int
  169. nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
  170. {
  171. return !dma_capable(dev, addr, size);
  172. }
  173. /* Map a single continuous physical area into the IOMMU.
  174. * Caller needs to check if the iommu is needed and flush.
  175. */
  176. static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
  177. size_t size, int dir, unsigned long align_mask)
  178. {
  179. unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
  180. unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
  181. int i;
  182. if (iommu_page == -1) {
  183. if (!nonforced_iommu(dev, phys_mem, size))
  184. return phys_mem;
  185. if (panic_on_overflow)
  186. panic("dma_map_area overflow %lu bytes\n", size);
  187. iommu_full(dev, size, dir);
  188. return bad_dma_address;
  189. }
  190. for (i = 0; i < npages; i++) {
  191. iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
  192. phys_mem += PAGE_SIZE;
  193. }
  194. return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
  195. }
  196. /* Map a single area into the IOMMU */
  197. static dma_addr_t gart_map_page(struct device *dev, struct page *page,
  198. unsigned long offset, size_t size,
  199. enum dma_data_direction dir,
  200. struct dma_attrs *attrs)
  201. {
  202. unsigned long bus;
  203. phys_addr_t paddr = page_to_phys(page) + offset;
  204. if (!dev)
  205. dev = &x86_dma_fallback_dev;
  206. if (!need_iommu(dev, paddr, size))
  207. return paddr;
  208. bus = dma_map_area(dev, paddr, size, dir, 0);
  209. flush_gart();
  210. return bus;
  211. }
  212. /*
  213. * Free a DMA mapping.
  214. */
  215. static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
  216. size_t size, enum dma_data_direction dir,
  217. struct dma_attrs *attrs)
  218. {
  219. unsigned long iommu_page;
  220. int npages;
  221. int i;
  222. if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
  223. dma_addr >= iommu_bus_base + iommu_size)
  224. return;
  225. iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
  226. npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
  227. for (i = 0; i < npages; i++) {
  228. iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
  229. }
  230. free_iommu(iommu_page, npages);
  231. }
  232. /*
  233. * Wrapper for pci_unmap_single working with scatterlists.
  234. */
  235. static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
  236. enum dma_data_direction dir, struct dma_attrs *attrs)
  237. {
  238. struct scatterlist *s;
  239. int i;
  240. for_each_sg(sg, s, nents, i) {
  241. if (!s->dma_length || !s->length)
  242. break;
  243. gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL);
  244. }
  245. }
  246. /* Fallback for dma_map_sg in case of overflow */
  247. static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
  248. int nents, int dir)
  249. {
  250. struct scatterlist *s;
  251. int i;
  252. #ifdef CONFIG_IOMMU_DEBUG
  253. printk(KERN_DEBUG "dma_map_sg overflow\n");
  254. #endif
  255. for_each_sg(sg, s, nents, i) {
  256. unsigned long addr = sg_phys(s);
  257. if (nonforced_iommu(dev, addr, s->length)) {
  258. addr = dma_map_area(dev, addr, s->length, dir, 0);
  259. if (addr == bad_dma_address) {
  260. if (i > 0)
  261. gart_unmap_sg(dev, sg, i, dir, NULL);
  262. nents = 0;
  263. sg[0].dma_length = 0;
  264. break;
  265. }
  266. }
  267. s->dma_address = addr;
  268. s->dma_length = s->length;
  269. }
  270. flush_gart();
  271. return nents;
  272. }
  273. /* Map multiple scatterlist entries continuous into the first. */
  274. static int __dma_map_cont(struct device *dev, struct scatterlist *start,
  275. int nelems, struct scatterlist *sout,
  276. unsigned long pages)
  277. {
  278. unsigned long iommu_start = alloc_iommu(dev, pages, 0);
  279. unsigned long iommu_page = iommu_start;
  280. struct scatterlist *s;
  281. int i;
  282. if (iommu_start == -1)
  283. return -1;
  284. for_each_sg(start, s, nelems, i) {
  285. unsigned long pages, addr;
  286. unsigned long phys_addr = s->dma_address;
  287. BUG_ON(s != start && s->offset);
  288. if (s == start) {
  289. sout->dma_address = iommu_bus_base;
  290. sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
  291. sout->dma_length = s->length;
  292. } else {
  293. sout->dma_length += s->length;
  294. }
  295. addr = phys_addr;
  296. pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
  297. while (pages--) {
  298. iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
  299. addr += PAGE_SIZE;
  300. iommu_page++;
  301. }
  302. }
  303. BUG_ON(iommu_page - iommu_start != pages);
  304. return 0;
  305. }
  306. static inline int
  307. dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
  308. struct scatterlist *sout, unsigned long pages, int need)
  309. {
  310. if (!need) {
  311. BUG_ON(nelems != 1);
  312. sout->dma_address = start->dma_address;
  313. sout->dma_length = start->length;
  314. return 0;
  315. }
  316. return __dma_map_cont(dev, start, nelems, sout, pages);
  317. }
  318. /*
  319. * DMA map all entries in a scatterlist.
  320. * Merge chunks that have page aligned sizes into a continuous mapping.
  321. */
  322. static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  323. enum dma_data_direction dir, struct dma_attrs *attrs)
  324. {
  325. struct scatterlist *s, *ps, *start_sg, *sgmap;
  326. int need = 0, nextneed, i, out, start;
  327. unsigned long pages = 0;
  328. unsigned int seg_size;
  329. unsigned int max_seg_size;
  330. if (nents == 0)
  331. return 0;
  332. if (!dev)
  333. dev = &x86_dma_fallback_dev;
  334. out = 0;
  335. start = 0;
  336. start_sg = sgmap = sg;
  337. seg_size = 0;
  338. max_seg_size = dma_get_max_seg_size(dev);
  339. ps = NULL; /* shut up gcc */
  340. for_each_sg(sg, s, nents, i) {
  341. dma_addr_t addr = sg_phys(s);
  342. s->dma_address = addr;
  343. BUG_ON(s->length == 0);
  344. nextneed = need_iommu(dev, addr, s->length);
  345. /* Handle the previous not yet processed entries */
  346. if (i > start) {
  347. /*
  348. * Can only merge when the last chunk ends on a
  349. * page boundary and the new one doesn't have an
  350. * offset.
  351. */
  352. if (!iommu_merge || !nextneed || !need || s->offset ||
  353. (s->length + seg_size > max_seg_size) ||
  354. (ps->offset + ps->length) % PAGE_SIZE) {
  355. if (dma_map_cont(dev, start_sg, i - start,
  356. sgmap, pages, need) < 0)
  357. goto error;
  358. out++;
  359. seg_size = 0;
  360. sgmap = sg_next(sgmap);
  361. pages = 0;
  362. start = i;
  363. start_sg = s;
  364. }
  365. }
  366. seg_size += s->length;
  367. need = nextneed;
  368. pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
  369. ps = s;
  370. }
  371. if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
  372. goto error;
  373. out++;
  374. flush_gart();
  375. if (out < nents) {
  376. sgmap = sg_next(sgmap);
  377. sgmap->dma_length = 0;
  378. }
  379. return out;
  380. error:
  381. flush_gart();
  382. gart_unmap_sg(dev, sg, out, dir, NULL);
  383. /* When it was forced or merged try again in a dumb way */
  384. if (force_iommu || iommu_merge) {
  385. out = dma_map_sg_nonforce(dev, sg, nents, dir);
  386. if (out > 0)
  387. return out;
  388. }
  389. if (panic_on_overflow)
  390. panic("dma_map_sg: overflow on %lu pages\n", pages);
  391. iommu_full(dev, pages << PAGE_SHIFT, dir);
  392. for_each_sg(sg, s, nents, i)
  393. s->dma_address = bad_dma_address;
  394. return 0;
  395. }
  396. /* allocate and map a coherent mapping */
  397. static void *
  398. gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
  399. gfp_t flag)
  400. {
  401. dma_addr_t paddr;
  402. unsigned long align_mask;
  403. struct page *page;
  404. if (force_iommu && !(flag & GFP_DMA)) {
  405. flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
  406. page = alloc_pages(flag | __GFP_ZERO, get_order(size));
  407. if (!page)
  408. return NULL;
  409. align_mask = (1UL << get_order(size)) - 1;
  410. paddr = dma_map_area(dev, page_to_phys(page), size,
  411. DMA_BIDIRECTIONAL, align_mask);
  412. flush_gart();
  413. if (paddr != bad_dma_address) {
  414. *dma_addr = paddr;
  415. return page_address(page);
  416. }
  417. __free_pages(page, get_order(size));
  418. } else
  419. return dma_generic_alloc_coherent(dev, size, dma_addr, flag);
  420. return NULL;
  421. }
  422. /* free a coherent mapping */
  423. static void
  424. gart_free_coherent(struct device *dev, size_t size, void *vaddr,
  425. dma_addr_t dma_addr)
  426. {
  427. gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
  428. free_pages((unsigned long)vaddr, get_order(size));
  429. }
  430. static int no_agp;
  431. static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
  432. {
  433. unsigned long a;
  434. if (!iommu_size) {
  435. iommu_size = aper_size;
  436. if (!no_agp)
  437. iommu_size /= 2;
  438. }
  439. a = aper + iommu_size;
  440. iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
  441. if (iommu_size < 64*1024*1024) {
  442. printk(KERN_WARNING
  443. "PCI-DMA: Warning: Small IOMMU %luMB."
  444. " Consider increasing the AGP aperture in BIOS\n",
  445. iommu_size >> 20);
  446. }
  447. return iommu_size;
  448. }
  449. static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
  450. {
  451. unsigned aper_size = 0, aper_base_32, aper_order;
  452. u64 aper_base;
  453. pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
  454. pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
  455. aper_order = (aper_order >> 1) & 7;
  456. aper_base = aper_base_32 & 0x7fff;
  457. aper_base <<= 25;
  458. aper_size = (32 * 1024 * 1024) << aper_order;
  459. if (aper_base + aper_size > 0x100000000UL || !aper_size)
  460. aper_base = 0;
  461. *size = aper_size;
  462. return aper_base;
  463. }
  464. static void enable_gart_translations(void)
  465. {
  466. int i;
  467. for (i = 0; i < num_k8_northbridges; i++) {
  468. struct pci_dev *dev = k8_northbridges[i];
  469. enable_gart_translation(dev, __pa(agp_gatt_table));
  470. }
  471. }
  472. /*
  473. * If fix_up_north_bridges is set, the north bridges have to be fixed up on
  474. * resume in the same way as they are handled in gart_iommu_hole_init().
  475. */
  476. static bool fix_up_north_bridges;
  477. static u32 aperture_order;
  478. static u32 aperture_alloc;
  479. void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
  480. {
  481. fix_up_north_bridges = true;
  482. aperture_order = aper_order;
  483. aperture_alloc = aper_alloc;
  484. }
  485. static int gart_resume(struct sys_device *dev)
  486. {
  487. printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n");
  488. if (fix_up_north_bridges) {
  489. int i;
  490. printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n");
  491. for (i = 0; i < num_k8_northbridges; i++) {
  492. struct pci_dev *dev = k8_northbridges[i];
  493. /*
  494. * Don't enable translations just yet. That is the next
  495. * step. Restore the pre-suspend aperture settings.
  496. */
  497. pci_write_config_dword(dev, AMD64_GARTAPERTURECTL,
  498. aperture_order << 1);
  499. pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
  500. aperture_alloc >> 25);
  501. }
  502. }
  503. enable_gart_translations();
  504. return 0;
  505. }
  506. static int gart_suspend(struct sys_device *dev, pm_message_t state)
  507. {
  508. return 0;
  509. }
  510. static struct sysdev_class gart_sysdev_class = {
  511. .name = "gart",
  512. .suspend = gart_suspend,
  513. .resume = gart_resume,
  514. };
  515. static struct sys_device device_gart = {
  516. .id = 0,
  517. .cls = &gart_sysdev_class,
  518. };
  519. /*
  520. * Private Northbridge GATT initialization in case we cannot use the
  521. * AGP driver for some reason.
  522. */
  523. static __init int init_k8_gatt(struct agp_kern_info *info)
  524. {
  525. unsigned aper_size, gatt_size, new_aper_size;
  526. unsigned aper_base, new_aper_base;
  527. struct pci_dev *dev;
  528. void *gatt;
  529. int i, error;
  530. printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
  531. aper_size = aper_base = info->aper_size = 0;
  532. dev = NULL;
  533. for (i = 0; i < num_k8_northbridges; i++) {
  534. dev = k8_northbridges[i];
  535. new_aper_base = read_aperture(dev, &new_aper_size);
  536. if (!new_aper_base)
  537. goto nommu;
  538. if (!aper_base) {
  539. aper_size = new_aper_size;
  540. aper_base = new_aper_base;
  541. }
  542. if (aper_size != new_aper_size || aper_base != new_aper_base)
  543. goto nommu;
  544. }
  545. if (!aper_base)
  546. goto nommu;
  547. info->aper_base = aper_base;
  548. info->aper_size = aper_size >> 20;
  549. gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
  550. gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
  551. get_order(gatt_size));
  552. if (!gatt)
  553. panic("Cannot allocate GATT table");
  554. if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
  555. panic("Could not set GART PTEs to uncacheable pages");
  556. agp_gatt_table = gatt;
  557. error = sysdev_class_register(&gart_sysdev_class);
  558. if (!error)
  559. error = sysdev_register(&device_gart);
  560. if (error)
  561. panic("Could not register gart_sysdev -- "
  562. "would corrupt data on next suspend");
  563. flush_gart();
  564. printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
  565. aper_base, aper_size>>10);
  566. return 0;
  567. nommu:
  568. /* Should not happen anymore */
  569. printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
  570. "falling back to iommu=soft.\n");
  571. return -1;
  572. }
  573. static struct dma_map_ops gart_dma_ops = {
  574. .map_sg = gart_map_sg,
  575. .unmap_sg = gart_unmap_sg,
  576. .map_page = gart_map_page,
  577. .unmap_page = gart_unmap_page,
  578. .alloc_coherent = gart_alloc_coherent,
  579. .free_coherent = gart_free_coherent,
  580. };
  581. void gart_iommu_shutdown(void)
  582. {
  583. struct pci_dev *dev;
  584. int i;
  585. if (no_agp && (dma_ops != &gart_dma_ops))
  586. return;
  587. for (i = 0; i < num_k8_northbridges; i++) {
  588. u32 ctl;
  589. dev = k8_northbridges[i];
  590. pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
  591. ctl &= ~GARTEN;
  592. pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
  593. }
  594. }
  595. void __init gart_iommu_init(void)
  596. {
  597. struct agp_kern_info info;
  598. unsigned long iommu_start;
  599. unsigned long aper_base, aper_size;
  600. unsigned long start_pfn, end_pfn;
  601. unsigned long scratch;
  602. long i;
  603. if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0)
  604. return;
  605. #ifndef CONFIG_AGP_AMD64
  606. no_agp = 1;
  607. #else
  608. /* Makefile puts PCI initialization via subsys_initcall first. */
  609. /* Add other K8 AGP bridge drivers here */
  610. no_agp = no_agp ||
  611. (agp_amd64_init() < 0) ||
  612. (agp_copy_info(agp_bridge, &info) < 0);
  613. #endif
  614. if (swiotlb)
  615. return;
  616. /* Did we detect a different HW IOMMU? */
  617. if (iommu_detected && !gart_iommu_aperture)
  618. return;
  619. if (no_iommu ||
  620. (!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
  621. !gart_iommu_aperture ||
  622. (no_agp && init_k8_gatt(&info) < 0)) {
  623. if (max_pfn > MAX_DMA32_PFN) {
  624. printk(KERN_WARNING "More than 4GB of memory "
  625. "but GART IOMMU not available.\n");
  626. printk(KERN_WARNING "falling back to iommu=soft.\n");
  627. }
  628. return;
  629. }
  630. /* need to map that range */
  631. aper_size = info.aper_size << 20;
  632. aper_base = info.aper_base;
  633. end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
  634. if (end_pfn > max_low_pfn_mapped) {
  635. start_pfn = (aper_base>>PAGE_SHIFT);
  636. init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
  637. }
  638. printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
  639. iommu_size = check_iommu_size(info.aper_base, aper_size);
  640. iommu_pages = iommu_size >> PAGE_SHIFT;
  641. iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
  642. get_order(iommu_pages/8));
  643. if (!iommu_gart_bitmap)
  644. panic("Cannot allocate iommu bitmap\n");
  645. #ifdef CONFIG_IOMMU_LEAK
  646. if (leak_trace) {
  647. int ret;
  648. ret = dma_debug_resize_entries(iommu_pages);
  649. if (ret)
  650. printk(KERN_DEBUG
  651. "PCI-DMA: Cannot trace all the entries\n");
  652. }
  653. #endif
  654. /*
  655. * Out of IOMMU space handling.
  656. * Reserve some invalid pages at the beginning of the GART.
  657. */
  658. iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
  659. agp_memory_reserved = iommu_size;
  660. printk(KERN_INFO
  661. "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
  662. iommu_size >> 20);
  663. iommu_start = aper_size - iommu_size;
  664. iommu_bus_base = info.aper_base + iommu_start;
  665. bad_dma_address = iommu_bus_base;
  666. iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
  667. /*
  668. * Unmap the IOMMU part of the GART. The alias of the page is
  669. * always mapped with cache enabled and there is no full cache
  670. * coherency across the GART remapping. The unmapping avoids
  671. * automatic prefetches from the CPU allocating cache lines in
  672. * there. All CPU accesses are done via the direct mapping to
  673. * the backing memory. The GART address is only used by PCI
  674. * devices.
  675. */
  676. set_memory_np((unsigned long)__va(iommu_bus_base),
  677. iommu_size >> PAGE_SHIFT);
  678. /*
  679. * Tricky. The GART table remaps the physical memory range,
  680. * so the CPU wont notice potential aliases and if the memory
  681. * is remapped to UC later on, we might surprise the PCI devices
  682. * with a stray writeout of a cacheline. So play it sure and
  683. * do an explicit, full-scale wbinvd() _after_ having marked all
  684. * the pages as Not-Present:
  685. */
  686. wbinvd();
  687. /*
  688. * Now all caches are flushed and we can safely enable
  689. * GART hardware. Doing it early leaves the possibility
  690. * of stale cache entries that can lead to GART PTE
  691. * errors.
  692. */
  693. enable_gart_translations();
  694. /*
  695. * Try to workaround a bug (thanks to BenH):
  696. * Set unmapped entries to a scratch page instead of 0.
  697. * Any prefetches that hit unmapped entries won't get an bus abort
  698. * then. (P2P bridge may be prefetching on DMA reads).
  699. */
  700. scratch = get_zeroed_page(GFP_KERNEL);
  701. if (!scratch)
  702. panic("Cannot allocate iommu scratch page");
  703. gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
  704. for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
  705. iommu_gatt_base[i] = gart_unmapped_entry;
  706. flush_gart();
  707. dma_ops = &gart_dma_ops;
  708. }
  709. void __init gart_parse_options(char *p)
  710. {
  711. int arg;
  712. #ifdef CONFIG_IOMMU_LEAK
  713. if (!strncmp(p, "leak", 4)) {
  714. leak_trace = 1;
  715. p += 4;
  716. if (*p == '=')
  717. ++p;
  718. if (isdigit(*p) && get_option(&p, &arg))
  719. iommu_leak_pages = arg;
  720. }
  721. #endif
  722. if (isdigit(*p) && get_option(&p, &arg))
  723. iommu_size = arg;
  724. if (!strncmp(p, "fullflush", 8))
  725. iommu_fullflush = 1;
  726. if (!strncmp(p, "nofullflush", 11))
  727. iommu_fullflush = 0;
  728. if (!strncmp(p, "noagp", 5))
  729. no_agp = 1;
  730. if (!strncmp(p, "noaperture", 10))
  731. fix_aperture = 0;
  732. /* duplicated from pci-dma.c */
  733. if (!strncmp(p, "force", 5))
  734. gart_iommu_aperture_allowed = 1;
  735. if (!strncmp(p, "allowed", 7))
  736. gart_iommu_aperture_allowed = 1;
  737. if (!strncmp(p, "memaper", 7)) {
  738. fallback_aper_force = 1;
  739. p += 7;
  740. if (*p == '=') {
  741. ++p;
  742. if (get_option(&p, &arg))
  743. fallback_aper_order = arg;
  744. }
  745. }
  746. }