sba_iommu.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134
  1. /*
  2. ** IA64 System Bus Adapter (SBA) I/O MMU manager
  3. **
  4. ** (c) Copyright 2002-2005 Alex Williamson
  5. ** (c) Copyright 2002-2003 Grant Grundler
  6. ** (c) Copyright 2002-2005 Hewlett-Packard Company
  7. **
  8. ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
  9. ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
  10. **
  11. ** This program is free software; you can redistribute it and/or modify
  12. ** it under the terms of the GNU General Public License as published by
  13. ** the Free Software Foundation; either version 2 of the License, or
  14. ** (at your option) any later version.
  15. **
  16. **
  17. ** This module initializes the IOC (I/O Controller) found on HP
  18. ** McKinley machines and their successors.
  19. **
  20. */
  21. #include <linux/config.h>
  22. #include <linux/types.h>
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/slab.h>
  27. #include <linux/init.h>
  28. #include <linux/mm.h>
  29. #include <linux/string.h>
  30. #include <linux/pci.h>
  31. #include <linux/proc_fs.h>
  32. #include <linux/seq_file.h>
  33. #include <linux/acpi.h>
  34. #include <linux/efi.h>
  35. #include <linux/nodemask.h>
  36. #include <linux/bitops.h> /* hweight64() */
  37. #include <asm/delay.h> /* ia64_get_itc() */
  38. #include <asm/io.h>
  39. #include <asm/page.h> /* PAGE_OFFSET */
  40. #include <asm/dma.h>
  41. #include <asm/system.h> /* wmb() */
  42. #include <asm/acpi-ext.h>
  43. #define PFX "IOC: "
  44. /*
  45. ** Enabling timing search of the pdir resource map. Output in /proc.
  46. ** Disabled by default to optimize performance.
  47. */
  48. #undef PDIR_SEARCH_TIMING
  49. /*
  50. ** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
  51. ** not defined, all DMA will be 32bit and go through the TLB.
  52. ** There's potentially a conflict in the bio merge code with us
  53. ** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
  54. ** appears to give more performance than bio-level virtual merging, we'll
  55. ** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to
  56. ** completely restrict DMA to the IOMMU.
  57. */
  58. #define ALLOW_IOV_BYPASS
  59. /*
  60. ** This option specifically allows/disallows bypassing scatterlists with
  61. ** multiple entries. Coalescing these entries can allow better DMA streaming
  62. ** and in some cases shows better performance than entirely bypassing the
  63. ** IOMMU. Performance increase on the order of 1-2% sequential output/input
  64. ** using bonnie++ on a RAID0 MD device (sym2 & mpt).
  65. */
  66. #undef ALLOW_IOV_BYPASS_SG
  67. /*
  68. ** If a device prefetches beyond the end of a valid pdir entry, it will cause
  69. ** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
  70. ** disconnect on 4k boundaries and prevent such issues. If the device is
  71. ** particularly agressive, this option will keep the entire pdir valid such
  72. ** that prefetching will hit a valid address. This could severely impact
  73. ** error containment, and is therefore off by default. The page that is
  74. ** used for spill-over is poisoned, so that should help debugging somewhat.
  75. */
  76. #undef FULL_VALID_PDIR
  77. #define ENABLE_MARK_CLEAN
  78. /*
  79. ** The number of debug flags is a clue - this code is fragile. NOTE: since
  80. ** tightening the use of res_lock the resource bitmap and actual pdir are no
  81. ** longer guaranteed to stay in sync. The sanity checking code isn't going to
  82. ** like that.
  83. */
  84. #undef DEBUG_SBA_INIT
  85. #undef DEBUG_SBA_RUN
  86. #undef DEBUG_SBA_RUN_SG
  87. #undef DEBUG_SBA_RESOURCE
  88. #undef ASSERT_PDIR_SANITY
  89. #undef DEBUG_LARGE_SG_ENTRIES
  90. #undef DEBUG_BYPASS
  91. #if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
  92. #error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
  93. #endif
  94. #define SBA_INLINE __inline__
  95. /* #define SBA_INLINE */
  96. #ifdef DEBUG_SBA_INIT
  97. #define DBG_INIT(x...) printk(x)
  98. #else
  99. #define DBG_INIT(x...)
  100. #endif
  101. #ifdef DEBUG_SBA_RUN
  102. #define DBG_RUN(x...) printk(x)
  103. #else
  104. #define DBG_RUN(x...)
  105. #endif
  106. #ifdef DEBUG_SBA_RUN_SG
  107. #define DBG_RUN_SG(x...) printk(x)
  108. #else
  109. #define DBG_RUN_SG(x...)
  110. #endif
  111. #ifdef DEBUG_SBA_RESOURCE
  112. #define DBG_RES(x...) printk(x)
  113. #else
  114. #define DBG_RES(x...)
  115. #endif
  116. #ifdef DEBUG_BYPASS
  117. #define DBG_BYPASS(x...) printk(x)
  118. #else
  119. #define DBG_BYPASS(x...)
  120. #endif
  121. #ifdef ASSERT_PDIR_SANITY
  122. #define ASSERT(expr) \
  123. if(!(expr)) { \
  124. printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
  125. panic(#expr); \
  126. }
  127. #else
  128. #define ASSERT(expr)
  129. #endif
  130. /*
  131. ** The number of pdir entries to "free" before issuing
  132. ** a read to PCOM register to flush out PCOM writes.
  133. ** Interacts with allocation granularity (ie 4 or 8 entries
  134. ** allocated and free'd/purged at a time might make this
  135. ** less interesting).
  136. */
  137. #define DELAYED_RESOURCE_CNT 64
  138. #define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec
  139. #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
  140. #define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
  141. #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
  142. #define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
  143. #define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
  144. #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
  145. #define IOC_FUNC_ID 0x000
  146. #define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
  147. #define IOC_IBASE 0x300 /* IO TLB */
  148. #define IOC_IMASK 0x308
  149. #define IOC_PCOM 0x310
  150. #define IOC_TCNFG 0x318
  151. #define IOC_PDIR_BASE 0x320
  152. #define IOC_ROPE0_CFG 0x500
  153. #define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
  154. /* AGP GART driver looks for this */
  155. #define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
  156. /*
  157. ** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
  158. **
  159. ** Some IOCs (sx1000) can run at the above pages sizes, but are
  160. ** really only supported using the IOC at a 4k page size.
  161. **
  162. ** iovp_size could only be greater than PAGE_SIZE if we are
  163. ** confident the drivers really only touch the next physical
  164. ** page iff that driver instance owns it.
  165. */
  166. static unsigned long iovp_size;
  167. static unsigned long iovp_shift;
  168. static unsigned long iovp_mask;
  169. struct ioc {
  170. void __iomem *ioc_hpa; /* I/O MMU base address */
  171. char *res_map; /* resource map, bit == pdir entry */
  172. u64 *pdir_base; /* physical base address */
  173. unsigned long ibase; /* pdir IOV Space base */
  174. unsigned long imask; /* pdir IOV Space mask */
  175. unsigned long *res_hint; /* next avail IOVP - circular search */
  176. unsigned long dma_mask;
  177. spinlock_t res_lock; /* protects the resource bitmap, but must be held when */
  178. /* clearing pdir to prevent races with allocations. */
  179. unsigned int res_bitshift; /* from the RIGHT! */
  180. unsigned int res_size; /* size of resource map in bytes */
  181. #ifdef CONFIG_NUMA
  182. unsigned int node; /* node where this IOC lives */
  183. #endif
  184. #if DELAYED_RESOURCE_CNT > 0
  185. spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */
  186. /* than res_lock for bigger systems. */
  187. int saved_cnt;
  188. struct sba_dma_pair {
  189. dma_addr_t iova;
  190. size_t size;
  191. } saved[DELAYED_RESOURCE_CNT];
  192. #endif
  193. #ifdef PDIR_SEARCH_TIMING
  194. #define SBA_SEARCH_SAMPLE 0x100
  195. unsigned long avg_search[SBA_SEARCH_SAMPLE];
  196. unsigned long avg_idx; /* current index into avg_search */
  197. #endif
  198. /* Stuff we don't need in performance path */
  199. struct ioc *next; /* list of IOC's in system */
  200. acpi_handle handle; /* for multiple IOC's */
  201. const char *name;
  202. unsigned int func_id;
  203. unsigned int rev; /* HW revision of chip */
  204. u32 iov_size;
  205. unsigned int pdir_size; /* in bytes, determined by IOV Space size */
  206. struct pci_dev *sac_only_dev;
  207. };
  208. static struct ioc *ioc_list;
  209. static int reserve_sba_gart = 1;
  210. static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
  211. static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
  212. #define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset)
  213. #ifdef FULL_VALID_PDIR
  214. static u64 prefetch_spill_page;
  215. #endif
  216. #ifdef CONFIG_PCI
  217. # define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \
  218. ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
  219. #else
  220. # define GET_IOC(dev) NULL
  221. #endif
  222. /*
  223. ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
  224. ** (or rather not merge) DMA's into managable chunks.
  225. ** On parisc, this is more of the software/tuning constraint
  226. ** rather than the HW. I/O MMU allocation alogorithms can be
  227. ** faster with smaller size is (to some degree).
  228. */
  229. #define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
  230. #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
  231. /************************************
  232. ** SBA register read and write support
  233. **
  234. ** BE WARNED: register writes are posted.
  235. ** (ie follow writes which must reach HW with a read)
  236. **
  237. */
  238. #define READ_REG(addr) __raw_readq(addr)
  239. #define WRITE_REG(val, addr) __raw_writeq(val, addr)
  240. #ifdef DEBUG_SBA_INIT
  241. /**
  242. * sba_dump_tlb - debugging only - print IOMMU operating parameters
  243. * @hpa: base address of the IOMMU
  244. *
  245. * Print the size/location of the IO MMU PDIR.
  246. */
  247. static void
  248. sba_dump_tlb(char *hpa)
  249. {
  250. DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
  251. DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE));
  252. DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK));
  253. DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG));
  254. DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
  255. DBG_INIT("\n");
  256. }
  257. #endif
  258. #ifdef ASSERT_PDIR_SANITY
  259. /**
  260. * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
  261. * @ioc: IO MMU structure which owns the pdir we are interested in.
  262. * @msg: text to print ont the output line.
  263. * @pide: pdir index.
  264. *
  265. * Print one entry of the IO MMU PDIR in human readable form.
  266. */
  267. static void
  268. sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
  269. {
  270. /* start printing from lowest pde in rval */
  271. u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)];
  272. unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
  273. uint rcnt;
  274. printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
  275. msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
  276. rcnt = 0;
  277. while (rcnt < BITS_PER_LONG) {
  278. printk(KERN_DEBUG "%s %2d %p %016Lx\n",
  279. (rcnt == (pide & (BITS_PER_LONG - 1)))
  280. ? " -->" : " ",
  281. rcnt, ptr, (unsigned long long) *ptr );
  282. rcnt++;
  283. ptr++;
  284. }
  285. printk(KERN_DEBUG "%s", msg);
  286. }
  287. /**
  288. * sba_check_pdir - debugging only - consistency checker
  289. * @ioc: IO MMU structure which owns the pdir we are interested in.
  290. * @msg: text to print ont the output line.
  291. *
  292. * Verify the resource map and pdir state is consistent
  293. */
  294. static int
  295. sba_check_pdir(struct ioc *ioc, char *msg)
  296. {
  297. u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
  298. u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */
  299. u64 *pptr = ioc->pdir_base; /* pdir ptr */
  300. uint pide = 0;
  301. while (rptr < rptr_end) {
  302. u64 rval;
  303. int rcnt; /* number of bits we might check */
  304. rval = *rptr;
  305. rcnt = 64;
  306. while (rcnt) {
  307. /* Get last byte and highest bit from that */
  308. u32 pde = ((u32)((*pptr >> (63)) & 0x1));
  309. if ((rval & 0x1) ^ pde)
  310. {
  311. /*
  312. ** BUMMER! -- res_map != pdir --
  313. ** Dump rval and matching pdir entries
  314. */
  315. sba_dump_pdir_entry(ioc, msg, pide);
  316. return(1);
  317. }
  318. rcnt--;
  319. rval >>= 1; /* try the next bit */
  320. pptr++;
  321. pide++;
  322. }
  323. rptr++; /* look at next word of res_map */
  324. }
  325. /* It'd be nice if we always got here :^) */
  326. return 0;
  327. }
  328. /**
  329. * sba_dump_sg - debugging only - print Scatter-Gather list
  330. * @ioc: IO MMU structure which owns the pdir we are interested in.
  331. * @startsg: head of the SG list
  332. * @nents: number of entries in SG list
  333. *
  334. * print the SG list so we can verify it's correct by hand.
  335. */
  336. static void
  337. sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
  338. {
  339. while (nents-- > 0) {
  340. printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
  341. startsg->dma_address, startsg->dma_length,
  342. sba_sg_address(startsg));
  343. startsg++;
  344. }
  345. }
  346. static void
  347. sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
  348. {
  349. struct scatterlist *the_sg = startsg;
  350. int the_nents = nents;
  351. while (the_nents-- > 0) {
  352. if (sba_sg_address(the_sg) == 0x0UL)
  353. sba_dump_sg(NULL, startsg, nents);
  354. the_sg++;
  355. }
  356. }
  357. #endif /* ASSERT_PDIR_SANITY */
  358. /**************************************************************
  359. *
  360. * I/O Pdir Resource Management
  361. *
  362. * Bits set in the resource map are in use.
  363. * Each bit can represent a number of pages.
  364. * LSbs represent lower addresses (IOVA's).
  365. *
  366. ***************************************************************/
  367. #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
  368. /* Convert from IOVP to IOVA and vice versa. */
  369. #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
  370. #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
  371. #define PDIR_ENTRY_SIZE sizeof(u64)
  372. #define PDIR_INDEX(iovp) ((iovp)>>iovp_shift)
  373. #define RESMAP_MASK(n) ~(~0UL << (n))
  374. #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
  375. /**
  376. * For most cases the normal get_order is sufficient, however it limits us
  377. * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
  378. * It only incurs about 1 clock cycle to use this one with the static variable
  379. * and makes the code more intuitive.
  380. */
  381. static SBA_INLINE int
  382. get_iovp_order (unsigned long size)
  383. {
  384. long double d = size - 1;
  385. long order;
  386. order = ia64_getf_exp(d);
  387. order = order - iovp_shift - 0xffff + 1;
  388. if (order < 0)
  389. order = 0;
  390. return order;
  391. }
  392. /**
  393. * sba_search_bitmap - find free space in IO PDIR resource bitmap
  394. * @ioc: IO MMU structure which owns the pdir we are interested in.
  395. * @bits_wanted: number of entries we need.
  396. * @use_hint: use res_hint to indicate where to start looking
  397. *
  398. * Find consecutive free bits in resource bitmap.
  399. * Each bit represents one entry in the IO Pdir.
  400. * Cool perf optimization: search for log2(size) bits at a time.
  401. */
  402. static SBA_INLINE unsigned long
  403. sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
  404. {
  405. unsigned long *res_ptr;
  406. unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
  407. unsigned long flags, pide = ~0UL;
  408. ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
  409. ASSERT(res_ptr < res_end);
  410. spin_lock_irqsave(&ioc->res_lock, flags);
  411. /* Allow caller to force a search through the entire resource space */
  412. if (likely(use_hint)) {
  413. res_ptr = ioc->res_hint;
  414. } else {
  415. res_ptr = (ulong *)ioc->res_map;
  416. ioc->res_bitshift = 0;
  417. }
  418. /*
  419. * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
  420. * if a TLB entry is purged while in use. sba_mark_invalid()
  421. * purges IOTLB entries in power-of-two sizes, so we also
  422. * allocate IOVA space in power-of-two sizes.
  423. */
  424. bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift);
  425. if (likely(bits_wanted == 1)) {
  426. unsigned int bitshiftcnt;
  427. for(; res_ptr < res_end ; res_ptr++) {
  428. if (likely(*res_ptr != ~0UL)) {
  429. bitshiftcnt = ffz(*res_ptr);
  430. *res_ptr |= (1UL << bitshiftcnt);
  431. pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
  432. pide <<= 3; /* convert to bit address */
  433. pide += bitshiftcnt;
  434. ioc->res_bitshift = bitshiftcnt + bits_wanted;
  435. goto found_it;
  436. }
  437. }
  438. goto not_found;
  439. }
  440. if (likely(bits_wanted <= BITS_PER_LONG/2)) {
  441. /*
  442. ** Search the resource bit map on well-aligned values.
  443. ** "o" is the alignment.
  444. ** We need the alignment to invalidate I/O TLB using
  445. ** SBA HW features in the unmap path.
  446. */
  447. unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
  448. uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
  449. unsigned long mask, base_mask;
  450. base_mask = RESMAP_MASK(bits_wanted);
  451. mask = base_mask << bitshiftcnt;
  452. DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr);
  453. for(; res_ptr < res_end ; res_ptr++)
  454. {
  455. DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
  456. ASSERT(0 != mask);
  457. for (; mask ; mask <<= o, bitshiftcnt += o) {
  458. if(0 == ((*res_ptr) & mask)) {
  459. *res_ptr |= mask; /* mark resources busy! */
  460. pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
  461. pide <<= 3; /* convert to bit address */
  462. pide += bitshiftcnt;
  463. ioc->res_bitshift = bitshiftcnt + bits_wanted;
  464. goto found_it;
  465. }
  466. }
  467. bitshiftcnt = 0;
  468. mask = base_mask;
  469. }
  470. } else {
  471. int qwords, bits, i;
  472. unsigned long *end;
  473. qwords = bits_wanted >> 6; /* /64 */
  474. bits = bits_wanted - (qwords * BITS_PER_LONG);
  475. end = res_end - qwords;
  476. for (; res_ptr < end; res_ptr++) {
  477. for (i = 0 ; i < qwords ; i++) {
  478. if (res_ptr[i] != 0)
  479. goto next_ptr;
  480. }
  481. if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits))
  482. continue;
  483. /* Found it, mark it */
  484. for (i = 0 ; i < qwords ; i++)
  485. res_ptr[i] = ~0UL;
  486. res_ptr[i] |= RESMAP_MASK(bits);
  487. pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
  488. pide <<= 3; /* convert to bit address */
  489. res_ptr += qwords;
  490. ioc->res_bitshift = bits;
  491. goto found_it;
  492. next_ptr:
  493. ;
  494. }
  495. }
  496. not_found:
  497. prefetch(ioc->res_map);
  498. ioc->res_hint = (unsigned long *) ioc->res_map;
  499. ioc->res_bitshift = 0;
  500. spin_unlock_irqrestore(&ioc->res_lock, flags);
  501. return (pide);
  502. found_it:
  503. ioc->res_hint = res_ptr;
  504. spin_unlock_irqrestore(&ioc->res_lock, flags);
  505. return (pide);
  506. }
  507. /**
  508. * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
  509. * @ioc: IO MMU structure which owns the pdir we are interested in.
  510. * @size: number of bytes to create a mapping for
  511. *
  512. * Given a size, find consecutive unmarked and then mark those bits in the
  513. * resource bit map.
  514. */
  515. static int
  516. sba_alloc_range(struct ioc *ioc, size_t size)
  517. {
  518. unsigned int pages_needed = size >> iovp_shift;
  519. #ifdef PDIR_SEARCH_TIMING
  520. unsigned long itc_start;
  521. #endif
  522. unsigned long pide;
  523. ASSERT(pages_needed);
  524. ASSERT(0 == (size & ~iovp_mask));
  525. #ifdef PDIR_SEARCH_TIMING
  526. itc_start = ia64_get_itc();
  527. #endif
  528. /*
  529. ** "seek and ye shall find"...praying never hurts either...
  530. */
  531. pide = sba_search_bitmap(ioc, pages_needed, 1);
  532. if (unlikely(pide >= (ioc->res_size << 3))) {
  533. pide = sba_search_bitmap(ioc, pages_needed, 0);
  534. if (unlikely(pide >= (ioc->res_size << 3))) {
  535. #if DELAYED_RESOURCE_CNT > 0
  536. unsigned long flags;
  537. /*
  538. ** With delayed resource freeing, we can give this one more shot. We're
  539. ** getting close to being in trouble here, so do what we can to make this
  540. ** one count.
  541. */
  542. spin_lock_irqsave(&ioc->saved_lock, flags);
  543. if (ioc->saved_cnt > 0) {
  544. struct sba_dma_pair *d;
  545. int cnt = ioc->saved_cnt;
  546. d = &(ioc->saved[ioc->saved_cnt - 1]);
  547. spin_lock(&ioc->res_lock);
  548. while (cnt--) {
  549. sba_mark_invalid(ioc, d->iova, d->size);
  550. sba_free_range(ioc, d->iova, d->size);
  551. d--;
  552. }
  553. ioc->saved_cnt = 0;
  554. READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
  555. spin_unlock(&ioc->res_lock);
  556. }
  557. spin_unlock_irqrestore(&ioc->saved_lock, flags);
  558. pide = sba_search_bitmap(ioc, pages_needed, 0);
  559. if (unlikely(pide >= (ioc->res_size << 3)))
  560. panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
  561. ioc->ioc_hpa);
  562. #else
  563. panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
  564. ioc->ioc_hpa);
  565. #endif
  566. }
  567. }
  568. #ifdef PDIR_SEARCH_TIMING
  569. ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
  570. ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
  571. #endif
  572. prefetchw(&(ioc->pdir_base[pide]));
  573. #ifdef ASSERT_PDIR_SANITY
  574. /* verify the first enable bit is clear */
  575. if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
  576. sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
  577. }
  578. #endif
  579. DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
  580. __FUNCTION__, size, pages_needed, pide,
  581. (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
  582. ioc->res_bitshift );
  583. return (pide);
  584. }
  585. /**
  586. * sba_free_range - unmark bits in IO PDIR resource bitmap
  587. * @ioc: IO MMU structure which owns the pdir we are interested in.
  588. * @iova: IO virtual address which was previously allocated.
  589. * @size: number of bytes to create a mapping for
  590. *
  591. * clear bits in the ioc's resource map
  592. */
  593. static SBA_INLINE void
  594. sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
  595. {
  596. unsigned long iovp = SBA_IOVP(ioc, iova);
  597. unsigned int pide = PDIR_INDEX(iovp);
  598. unsigned int ridx = pide >> 3; /* convert bit to byte address */
  599. unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
  600. int bits_not_wanted = size >> iovp_shift;
  601. unsigned long m;
  602. /* Round up to power-of-two size: see AR2305 note above */
  603. bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift);
  604. for (; bits_not_wanted > 0 ; res_ptr++) {
  605. if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
  606. /* these mappings start 64bit aligned */
  607. *res_ptr = 0UL;
  608. bits_not_wanted -= BITS_PER_LONG;
  609. pide += BITS_PER_LONG;
  610. } else {
  611. /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
  612. m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
  613. bits_not_wanted = 0;
  614. DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __FUNCTION__, (uint) iova, size,
  615. bits_not_wanted, m, pide, res_ptr, *res_ptr);
  616. ASSERT(m != 0);
  617. ASSERT(bits_not_wanted);
  618. ASSERT((*res_ptr & m) == m); /* verify same bits are set */
  619. *res_ptr &= ~m;
  620. }
  621. }
  622. }
  623. /**************************************************************
  624. *
  625. * "Dynamic DMA Mapping" support (aka "Coherent I/O")
  626. *
  627. ***************************************************************/
  628. /**
  629. * sba_io_pdir_entry - fill in one IO PDIR entry
  630. * @pdir_ptr: pointer to IO PDIR entry
  631. * @vba: Virtual CPU address of buffer to map
  632. *
  633. * SBA Mapping Routine
  634. *
  635. * Given a virtual address (vba, arg1) sba_io_pdir_entry()
  636. * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
  637. * Each IO Pdir entry consists of 8 bytes as shown below
  638. * (LSB == bit 0):
  639. *
  640. * 63 40 11 7 0
  641. * +-+---------------------+----------------------------------+----+--------+
  642. * |V| U | PPN[39:12] | U | FF |
  643. * +-+---------------------+----------------------------------+----+--------+
  644. *
  645. * V == Valid Bit
  646. * U == Unused
  647. * PPN == Physical Page Number
  648. *
  649. * The physical address fields are filled with the results of virt_to_phys()
  650. * on the vba.
  651. */
  652. #if 1
  653. #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
  654. | 0x8000000000000000ULL)
  655. #else
  656. void SBA_INLINE
  657. sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
  658. {
  659. *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
  660. }
  661. #endif
  662. #ifdef ENABLE_MARK_CLEAN
  663. /**
  664. * Since DMA is i-cache coherent, any (complete) pages that were written via
  665. * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
  666. * flush them when they get mapped into an executable vm-area.
  667. */
  668. static void
  669. mark_clean (void *addr, size_t size)
  670. {
  671. unsigned long pg_addr, end;
  672. pg_addr = PAGE_ALIGN((unsigned long) addr);
  673. end = (unsigned long) addr + size;
  674. while (pg_addr + PAGE_SIZE <= end) {
  675. struct page *page = virt_to_page((void *)pg_addr);
  676. set_bit(PG_arch_1, &page->flags);
  677. pg_addr += PAGE_SIZE;
  678. }
  679. }
  680. #endif
  681. /**
  682. * sba_mark_invalid - invalidate one or more IO PDIR entries
  683. * @ioc: IO MMU structure which owns the pdir we are interested in.
  684. * @iova: IO Virtual Address mapped earlier
  685. * @byte_cnt: number of bytes this mapping covers.
  686. *
  687. * Marking the IO PDIR entry(ies) as Invalid and invalidate
  688. * corresponding IO TLB entry. The PCOM (Purge Command Register)
  689. * is to purge stale entries in the IO TLB when unmapping entries.
  690. *
  691. * The PCOM register supports purging of multiple pages, with a minium
  692. * of 1 page and a maximum of 2GB. Hardware requires the address be
  693. * aligned to the size of the range being purged. The size of the range
  694. * must be a power of 2. The "Cool perf optimization" in the
  695. * allocation routine helps keep that true.
  696. */
  697. static SBA_INLINE void
  698. sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
  699. {
  700. u32 iovp = (u32) SBA_IOVP(ioc,iova);
  701. int off = PDIR_INDEX(iovp);
  702. /* Must be non-zero and rounded up */
  703. ASSERT(byte_cnt > 0);
  704. ASSERT(0 == (byte_cnt & ~iovp_mask));
  705. #ifdef ASSERT_PDIR_SANITY
  706. /* Assert first pdir entry is set */
  707. if (!(ioc->pdir_base[off] >> 60)) {
  708. sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
  709. }
  710. #endif
  711. if (byte_cnt <= iovp_size)
  712. {
  713. ASSERT(off < ioc->pdir_size);
  714. iovp |= iovp_shift; /* set "size" field for PCOM */
  715. #ifndef FULL_VALID_PDIR
  716. /*
  717. ** clear I/O PDIR entry "valid" bit
  718. ** Do NOT clear the rest - save it for debugging.
  719. ** We should only clear bits that have previously
  720. ** been enabled.
  721. */
  722. ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
  723. #else
  724. /*
  725. ** If we want to maintain the PDIR as valid, put in
  726. ** the spill page so devices prefetching won't
  727. ** cause a hard fail.
  728. */
  729. ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
  730. #endif
  731. } else {
  732. u32 t = get_iovp_order(byte_cnt) + iovp_shift;
  733. iovp |= t;
  734. ASSERT(t <= 31); /* 2GB! Max value of "size" field */
  735. do {
  736. /* verify this pdir entry is enabled */
  737. ASSERT(ioc->pdir_base[off] >> 63);
  738. #ifndef FULL_VALID_PDIR
  739. /* clear I/O Pdir entry "valid" bit first */
  740. ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
  741. #else
  742. ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
  743. #endif
  744. off++;
  745. byte_cnt -= iovp_size;
  746. } while (byte_cnt > 0);
  747. }
  748. WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
  749. }
  750. /**
  751. * sba_map_single - map one buffer and return IOVA for DMA
  752. * @dev: instance of PCI owned by the driver that's asking.
  753. * @addr: driver buffer to map.
  754. * @size: number of bytes to map in driver buffer.
  755. * @dir: R/W or both.
  756. *
  757. * See Documentation/DMA-mapping.txt
  758. */
  759. dma_addr_t
  760. sba_map_single(struct device *dev, void *addr, size_t size, int dir)
  761. {
  762. struct ioc *ioc;
  763. dma_addr_t iovp;
  764. dma_addr_t offset;
  765. u64 *pdir_start;
  766. int pide;
  767. #ifdef ASSERT_PDIR_SANITY
  768. unsigned long flags;
  769. #endif
  770. #ifdef ALLOW_IOV_BYPASS
  771. unsigned long pci_addr = virt_to_phys(addr);
  772. #endif
  773. #ifdef ALLOW_IOV_BYPASS
  774. ASSERT(to_pci_dev(dev)->dma_mask);
  775. /*
  776. ** Check if the PCI device can DMA to ptr... if so, just return ptr
  777. */
  778. if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
  779. /*
  780. ** Device is bit capable of DMA'ing to the buffer...
  781. ** just return the PCI address of ptr
  782. */
  783. DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n",
  784. to_pci_dev(dev)->dma_mask, pci_addr);
  785. return pci_addr;
  786. }
  787. #endif
  788. ioc = GET_IOC(dev);
  789. ASSERT(ioc);
  790. prefetch(ioc->res_hint);
  791. ASSERT(size > 0);
  792. ASSERT(size <= DMA_CHUNK_SIZE);
  793. /* save offset bits */
  794. offset = ((dma_addr_t) (long) addr) & ~iovp_mask;
  795. /* round up to nearest iovp_size */
  796. size = (size + offset + ~iovp_mask) & iovp_mask;
  797. #ifdef ASSERT_PDIR_SANITY
  798. spin_lock_irqsave(&ioc->res_lock, flags);
  799. if (sba_check_pdir(ioc,"Check before sba_map_single()"))
  800. panic("Sanity check failed");
  801. spin_unlock_irqrestore(&ioc->res_lock, flags);
  802. #endif
  803. pide = sba_alloc_range(ioc, size);
  804. iovp = (dma_addr_t) pide << iovp_shift;
  805. DBG_RUN("%s() 0x%p -> 0x%lx\n",
  806. __FUNCTION__, addr, (long) iovp | offset);
  807. pdir_start = &(ioc->pdir_base[pide]);
  808. while (size > 0) {
  809. ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
  810. sba_io_pdir_entry(pdir_start, (unsigned long) addr);
  811. DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
  812. addr += iovp_size;
  813. size -= iovp_size;
  814. pdir_start++;
  815. }
  816. /* force pdir update */
  817. wmb();
  818. /* form complete address */
  819. #ifdef ASSERT_PDIR_SANITY
  820. spin_lock_irqsave(&ioc->res_lock, flags);
  821. sba_check_pdir(ioc,"Check after sba_map_single()");
  822. spin_unlock_irqrestore(&ioc->res_lock, flags);
  823. #endif
  824. return SBA_IOVA(ioc, iovp, offset);
  825. }
  826. #ifdef ENABLE_MARK_CLEAN
  827. static SBA_INLINE void
  828. sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
  829. {
  830. u32 iovp = (u32) SBA_IOVP(ioc,iova);
  831. int off = PDIR_INDEX(iovp);
  832. void *addr;
  833. if (size <= iovp_size) {
  834. addr = phys_to_virt(ioc->pdir_base[off] &
  835. ~0xE000000000000FFFULL);
  836. mark_clean(addr, size);
  837. } else {
  838. do {
  839. addr = phys_to_virt(ioc->pdir_base[off] &
  840. ~0xE000000000000FFFULL);
  841. mark_clean(addr, min(size, iovp_size));
  842. off++;
  843. size -= iovp_size;
  844. } while (size > 0);
  845. }
  846. }
  847. #endif
  848. /**
  849. * sba_unmap_single - unmap one IOVA and free resources
  850. * @dev: instance of PCI owned by the driver that's asking.
  851. * @iova: IOVA of driver buffer previously mapped.
  852. * @size: number of bytes mapped in driver buffer.
  853. * @dir: R/W or both.
  854. *
  855. * See Documentation/DMA-mapping.txt
  856. */
  857. void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
  858. {
  859. struct ioc *ioc;
  860. #if DELAYED_RESOURCE_CNT > 0
  861. struct sba_dma_pair *d;
  862. #endif
  863. unsigned long flags;
  864. dma_addr_t offset;
  865. ioc = GET_IOC(dev);
  866. ASSERT(ioc);
  867. #ifdef ALLOW_IOV_BYPASS
  868. if (likely((iova & ioc->imask) != ioc->ibase)) {
  869. /*
  870. ** Address does not fall w/in IOVA, must be bypassing
  871. */
  872. DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova);
  873. #ifdef ENABLE_MARK_CLEAN
  874. if (dir == DMA_FROM_DEVICE) {
  875. mark_clean(phys_to_virt(iova), size);
  876. }
  877. #endif
  878. return;
  879. }
  880. #endif
  881. offset = iova & ~iovp_mask;
  882. DBG_RUN("%s() iovp 0x%lx/%x\n",
  883. __FUNCTION__, (long) iova, size);
  884. iova ^= offset; /* clear offset bits */
  885. size += offset;
  886. size = ROUNDUP(size, iovp_size);
  887. #ifdef ENABLE_MARK_CLEAN
  888. if (dir == DMA_FROM_DEVICE)
  889. sba_mark_clean(ioc, iova, size);
  890. #endif
  891. #if DELAYED_RESOURCE_CNT > 0
  892. spin_lock_irqsave(&ioc->saved_lock, flags);
  893. d = &(ioc->saved[ioc->saved_cnt]);
  894. d->iova = iova;
  895. d->size = size;
  896. if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
  897. int cnt = ioc->saved_cnt;
  898. spin_lock(&ioc->res_lock);
  899. while (cnt--) {
  900. sba_mark_invalid(ioc, d->iova, d->size);
  901. sba_free_range(ioc, d->iova, d->size);
  902. d--;
  903. }
  904. ioc->saved_cnt = 0;
  905. READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
  906. spin_unlock(&ioc->res_lock);
  907. }
  908. spin_unlock_irqrestore(&ioc->saved_lock, flags);
  909. #else /* DELAYED_RESOURCE_CNT == 0 */
  910. spin_lock_irqsave(&ioc->res_lock, flags);
  911. sba_mark_invalid(ioc, iova, size);
  912. sba_free_range(ioc, iova, size);
  913. READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
  914. spin_unlock_irqrestore(&ioc->res_lock, flags);
  915. #endif /* DELAYED_RESOURCE_CNT == 0 */
  916. }
  917. /**
  918. * sba_alloc_coherent - allocate/map shared mem for DMA
  919. * @dev: instance of PCI owned by the driver that's asking.
  920. * @size: number of bytes mapped in driver buffer.
  921. * @dma_handle: IOVA of new buffer.
  922. *
  923. * See Documentation/DMA-mapping.txt
  924. */
  925. void *
  926. sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
  927. {
  928. struct ioc *ioc;
  929. void *addr;
  930. ioc = GET_IOC(dev);
  931. ASSERT(ioc);
  932. #ifdef CONFIG_NUMA
  933. {
  934. struct page *page;
  935. page = alloc_pages_node(ioc->node == MAX_NUMNODES ?
  936. numa_node_id() : ioc->node, flags,
  937. get_order(size));
  938. if (unlikely(!page))
  939. return NULL;
  940. addr = page_address(page);
  941. }
  942. #else
  943. addr = (void *) __get_free_pages(flags, get_order(size));
  944. #endif
  945. if (unlikely(!addr))
  946. return NULL;
  947. memset(addr, 0, size);
  948. *dma_handle = virt_to_phys(addr);
  949. #ifdef ALLOW_IOV_BYPASS
  950. ASSERT(dev->coherent_dma_mask);
  951. /*
  952. ** Check if the PCI device can DMA to ptr... if so, just return ptr
  953. */
  954. if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {
  955. DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
  956. dev->coherent_dma_mask, *dma_handle);
  957. return addr;
  958. }
  959. #endif
  960. /*
  961. * If device can't bypass or bypass is disabled, pass the 32bit fake
  962. * device to map single to get an iova mapping.
  963. */
  964. *dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0);
  965. return addr;
  966. }
  967. /**
  968. * sba_free_coherent - free/unmap shared mem for DMA
  969. * @dev: instance of PCI owned by the driver that's asking.
  970. * @size: number of bytes mapped in driver buffer.
  971. * @vaddr: virtual address IOVA of "consistent" buffer.
  972. * @dma_handler: IO virtual address of "consistent" buffer.
  973. *
  974. * See Documentation/DMA-mapping.txt
  975. */
  976. void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
  977. {
  978. sba_unmap_single(dev, dma_handle, size, 0);
  979. free_pages((unsigned long) vaddr, get_order(size));
  980. }
  981. /*
  982. ** Since 0 is a valid pdir_base index value, can't use that
  983. ** to determine if a value is valid or not. Use a flag to indicate
  984. ** the SG list entry contains a valid pdir index.
  985. */
  986. #define PIDE_FLAG 0x1UL
  987. #ifdef DEBUG_LARGE_SG_ENTRIES
  988. int dump_run_sg = 0;
  989. #endif
  990. /**
  991. * sba_fill_pdir - write allocated SG entries into IO PDIR
  992. * @ioc: IO MMU structure which owns the pdir we are interested in.
  993. * @startsg: list of IOVA/size pairs
  994. * @nents: number of entries in startsg list
  995. *
  996. * Take preprocessed SG list and write corresponding entries
  997. * in the IO PDIR.
  998. */
  999. static SBA_INLINE int
  1000. sba_fill_pdir(
  1001. struct ioc *ioc,
  1002. struct scatterlist *startsg,
  1003. int nents)
  1004. {
  1005. struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
  1006. int n_mappings = 0;
  1007. u64 *pdirp = NULL;
  1008. unsigned long dma_offset = 0;
  1009. dma_sg--;
  1010. while (nents-- > 0) {
  1011. int cnt = startsg->dma_length;
  1012. startsg->dma_length = 0;
  1013. #ifdef DEBUG_LARGE_SG_ENTRIES
  1014. if (dump_run_sg)
  1015. printk(" %2d : %08lx/%05x %p\n",
  1016. nents, startsg->dma_address, cnt,
  1017. sba_sg_address(startsg));
  1018. #else
  1019. DBG_RUN_SG(" %d : %08lx/%05x %p\n",
  1020. nents, startsg->dma_address, cnt,
  1021. sba_sg_address(startsg));
  1022. #endif
  1023. /*
  1024. ** Look for the start of a new DMA stream
  1025. */
  1026. if (startsg->dma_address & PIDE_FLAG) {
  1027. u32 pide = startsg->dma_address & ~PIDE_FLAG;
  1028. dma_offset = (unsigned long) pide & ~iovp_mask;
  1029. startsg->dma_address = 0;
  1030. dma_sg++;
  1031. dma_sg->dma_address = pide | ioc->ibase;
  1032. pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
  1033. n_mappings++;
  1034. }
  1035. /*
  1036. ** Look for a VCONTIG chunk
  1037. */
  1038. if (cnt) {
  1039. unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
  1040. ASSERT(pdirp);
  1041. /* Since multiple Vcontig blocks could make up
  1042. ** one DMA stream, *add* cnt to dma_len.
  1043. */
  1044. dma_sg->dma_length += cnt;
  1045. cnt += dma_offset;
  1046. dma_offset=0; /* only want offset on first chunk */
  1047. cnt = ROUNDUP(cnt, iovp_size);
  1048. do {
  1049. sba_io_pdir_entry(pdirp, vaddr);
  1050. vaddr += iovp_size;
  1051. cnt -= iovp_size;
  1052. pdirp++;
  1053. } while (cnt > 0);
  1054. }
  1055. startsg++;
  1056. }
  1057. /* force pdir update */
  1058. wmb();
  1059. #ifdef DEBUG_LARGE_SG_ENTRIES
  1060. dump_run_sg = 0;
  1061. #endif
  1062. return(n_mappings);
  1063. }
  1064. /*
  1065. ** Two address ranges are DMA contiguous *iff* "end of prev" and
  1066. ** "start of next" are both on an IOV page boundary.
  1067. **
  1068. ** (shift left is a quick trick to mask off upper bits)
  1069. */
  1070. #define DMA_CONTIG(__X, __Y) \
  1071. (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
  1072. /**
  1073. * sba_coalesce_chunks - preprocess the SG list
  1074. * @ioc: IO MMU structure which owns the pdir we are interested in.
  1075. * @startsg: list of IOVA/size pairs
  1076. * @nents: number of entries in startsg list
  1077. *
  1078. * First pass is to walk the SG list and determine where the breaks are
  1079. * in the DMA stream. Allocates PDIR entries but does not fill them.
  1080. * Returns the number of DMA chunks.
  1081. *
  1082. * Doing the fill separate from the coalescing/allocation keeps the
  1083. * code simpler. Future enhancement could make one pass through
  1084. * the sglist do both.
  1085. */
  1086. static SBA_INLINE int
  1087. sba_coalesce_chunks( struct ioc *ioc,
  1088. struct scatterlist *startsg,
  1089. int nents)
  1090. {
  1091. struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
  1092. unsigned long vcontig_len; /* len of VCONTIG chunk */
  1093. unsigned long vcontig_end;
  1094. struct scatterlist *dma_sg; /* next DMA stream head */
  1095. unsigned long dma_offset, dma_len; /* start/len of DMA stream */
  1096. int n_mappings = 0;
  1097. while (nents > 0) {
  1098. unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
  1099. /*
  1100. ** Prepare for first/next DMA stream
  1101. */
  1102. dma_sg = vcontig_sg = startsg;
  1103. dma_len = vcontig_len = vcontig_end = startsg->length;
  1104. vcontig_end += vaddr;
  1105. dma_offset = vaddr & ~iovp_mask;
  1106. /* PARANOID: clear entries */
  1107. startsg->dma_address = startsg->dma_length = 0;
  1108. /*
  1109. ** This loop terminates one iteration "early" since
  1110. ** it's always looking one "ahead".
  1111. */
  1112. while (--nents > 0) {
  1113. unsigned long vaddr; /* tmp */
  1114. startsg++;
  1115. /* PARANOID */
  1116. startsg->dma_address = startsg->dma_length = 0;
  1117. /* catch brokenness in SCSI layer */
  1118. ASSERT(startsg->length <= DMA_CHUNK_SIZE);
  1119. /*
  1120. ** First make sure current dma stream won't
  1121. ** exceed DMA_CHUNK_SIZE if we coalesce the
  1122. ** next entry.
  1123. */
  1124. if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)
  1125. > DMA_CHUNK_SIZE)
  1126. break;
  1127. /*
  1128. ** Then look for virtually contiguous blocks.
  1129. **
  1130. ** append the next transaction?
  1131. */
  1132. vaddr = (unsigned long) sba_sg_address(startsg);
  1133. if (vcontig_end == vaddr)
  1134. {
  1135. vcontig_len += startsg->length;
  1136. vcontig_end += startsg->length;
  1137. dma_len += startsg->length;
  1138. continue;
  1139. }
  1140. #ifdef DEBUG_LARGE_SG_ENTRIES
  1141. dump_run_sg = (vcontig_len > iovp_size);
  1142. #endif
  1143. /*
  1144. ** Not virtually contigous.
  1145. ** Terminate prev chunk.
  1146. ** Start a new chunk.
  1147. **
  1148. ** Once we start a new VCONTIG chunk, dma_offset
  1149. ** can't change. And we need the offset from the first
  1150. ** chunk - not the last one. Ergo Successive chunks
  1151. ** must start on page boundaries and dove tail
  1152. ** with it's predecessor.
  1153. */
  1154. vcontig_sg->dma_length = vcontig_len;
  1155. vcontig_sg = startsg;
  1156. vcontig_len = startsg->length;
  1157. /*
  1158. ** 3) do the entries end/start on page boundaries?
  1159. ** Don't update vcontig_end until we've checked.
  1160. */
  1161. if (DMA_CONTIG(vcontig_end, vaddr))
  1162. {
  1163. vcontig_end = vcontig_len + vaddr;
  1164. dma_len += vcontig_len;
  1165. continue;
  1166. } else {
  1167. break;
  1168. }
  1169. }
  1170. /*
  1171. ** End of DMA Stream
  1172. ** Terminate last VCONTIG block.
  1173. ** Allocate space for DMA stream.
  1174. */
  1175. vcontig_sg->dma_length = vcontig_len;
  1176. dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
  1177. ASSERT(dma_len <= DMA_CHUNK_SIZE);
  1178. dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG
  1179. | (sba_alloc_range(ioc, dma_len) << iovp_shift)
  1180. | dma_offset);
  1181. n_mappings++;
  1182. }
  1183. return n_mappings;
  1184. }
  1185. /**
  1186. * sba_map_sg - map Scatter/Gather list
  1187. * @dev: instance of PCI owned by the driver that's asking.
  1188. * @sglist: array of buffer/length pairs
  1189. * @nents: number of entries in list
  1190. * @dir: R/W or both.
  1191. *
  1192. * See Documentation/DMA-mapping.txt
  1193. */
  1194. int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir)
  1195. {
  1196. struct ioc *ioc;
  1197. int coalesced, filled = 0;
  1198. #ifdef ASSERT_PDIR_SANITY
  1199. unsigned long flags;
  1200. #endif
  1201. #ifdef ALLOW_IOV_BYPASS_SG
  1202. struct scatterlist *sg;
  1203. #endif
  1204. DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);
  1205. ioc = GET_IOC(dev);
  1206. ASSERT(ioc);
  1207. #ifdef ALLOW_IOV_BYPASS_SG
  1208. ASSERT(to_pci_dev(dev)->dma_mask);
  1209. if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
  1210. for (sg = sglist ; filled < nents ; filled++, sg++){
  1211. sg->dma_length = sg->length;
  1212. sg->dma_address = virt_to_phys(sba_sg_address(sg));
  1213. }
  1214. return filled;
  1215. }
  1216. #endif
  1217. /* Fast path single entry scatterlists. */
  1218. if (nents == 1) {
  1219. sglist->dma_length = sglist->length;
  1220. sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length, dir);
  1221. return 1;
  1222. }
  1223. #ifdef ASSERT_PDIR_SANITY
  1224. spin_lock_irqsave(&ioc->res_lock, flags);
  1225. if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
  1226. {
  1227. sba_dump_sg(ioc, sglist, nents);
  1228. panic("Check before sba_map_sg()");
  1229. }
  1230. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1231. #endif
  1232. prefetch(ioc->res_hint);
  1233. /*
  1234. ** First coalesce the chunks and allocate I/O pdir space
  1235. **
  1236. ** If this is one DMA stream, we can properly map using the
  1237. ** correct virtual address associated with each DMA page.
  1238. ** w/o this association, we wouldn't have coherent DMA!
  1239. ** Access to the virtual address is what forces a two pass algorithm.
  1240. */
  1241. coalesced = sba_coalesce_chunks(ioc, sglist, nents);
  1242. /*
  1243. ** Program the I/O Pdir
  1244. **
  1245. ** map the virtual addresses to the I/O Pdir
  1246. ** o dma_address will contain the pdir index
  1247. ** o dma_len will contain the number of bytes to map
  1248. ** o address contains the virtual address.
  1249. */
  1250. filled = sba_fill_pdir(ioc, sglist, nents);
  1251. #ifdef ASSERT_PDIR_SANITY
  1252. spin_lock_irqsave(&ioc->res_lock, flags);
  1253. if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
  1254. {
  1255. sba_dump_sg(ioc, sglist, nents);
  1256. panic("Check after sba_map_sg()\n");
  1257. }
  1258. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1259. #endif
  1260. ASSERT(coalesced == filled);
  1261. DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);
  1262. return filled;
  1263. }
  1264. /**
  1265. * sba_unmap_sg - unmap Scatter/Gather list
  1266. * @dev: instance of PCI owned by the driver that's asking.
  1267. * @sglist: array of buffer/length pairs
  1268. * @nents: number of entries in list
  1269. * @dir: R/W or both.
  1270. *
  1271. * See Documentation/DMA-mapping.txt
  1272. */
  1273. void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
  1274. {
  1275. #ifdef ASSERT_PDIR_SANITY
  1276. struct ioc *ioc;
  1277. unsigned long flags;
  1278. #endif
  1279. DBG_RUN_SG("%s() START %d entries, %p,%x\n",
  1280. __FUNCTION__, nents, sba_sg_address(sglist), sglist->length);
  1281. #ifdef ASSERT_PDIR_SANITY
  1282. ioc = GET_IOC(dev);
  1283. ASSERT(ioc);
  1284. spin_lock_irqsave(&ioc->res_lock, flags);
  1285. sba_check_pdir(ioc,"Check before sba_unmap_sg()");
  1286. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1287. #endif
  1288. while (nents && sglist->dma_length) {
  1289. sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
  1290. sglist++;
  1291. nents--;
  1292. }
  1293. DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents);
  1294. #ifdef ASSERT_PDIR_SANITY
  1295. spin_lock_irqsave(&ioc->res_lock, flags);
  1296. sba_check_pdir(ioc,"Check after sba_unmap_sg()");
  1297. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1298. #endif
  1299. }
  1300. /**************************************************************
  1301. *
  1302. * Initialization and claim
  1303. *
  1304. ***************************************************************/
  1305. static void __init
  1306. ioc_iova_init(struct ioc *ioc)
  1307. {
  1308. int tcnfg;
  1309. int agp_found = 0;
  1310. struct pci_dev *device = NULL;
  1311. #ifdef FULL_VALID_PDIR
  1312. unsigned long index;
  1313. #endif
  1314. /*
  1315. ** Firmware programs the base and size of a "safe IOVA space"
  1316. ** (one that doesn't overlap memory or LMMIO space) in the
  1317. ** IBASE and IMASK registers.
  1318. */
  1319. ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
  1320. ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
  1321. ioc->iov_size = ~ioc->imask + 1;
  1322. DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
  1323. __FUNCTION__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
  1324. ioc->iov_size >> 20);
  1325. switch (iovp_size) {
  1326. case 4*1024: tcnfg = 0; break;
  1327. case 8*1024: tcnfg = 1; break;
  1328. case 16*1024: tcnfg = 2; break;
  1329. case 64*1024: tcnfg = 3; break;
  1330. default:
  1331. panic(PFX "Unsupported IOTLB page size %ldK",
  1332. iovp_size >> 10);
  1333. break;
  1334. }
  1335. WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
  1336. ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
  1337. ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
  1338. get_order(ioc->pdir_size));
  1339. if (!ioc->pdir_base)
  1340. panic(PFX "Couldn't allocate I/O Page Table\n");
  1341. memset(ioc->pdir_base, 0, ioc->pdir_size);
  1342. DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__,
  1343. iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
  1344. ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
  1345. WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
  1346. /*
  1347. ** If an AGP device is present, only use half of the IOV space
  1348. ** for PCI DMA. Unfortunately we can't know ahead of time
  1349. ** whether GART support will actually be used, for now we
  1350. ** can just key on an AGP device found in the system.
  1351. ** We program the next pdir index after we stop w/ a key for
  1352. ** the GART code to handshake on.
  1353. */
  1354. for_each_pci_dev(device)
  1355. agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
  1356. if (agp_found && reserve_sba_gart) {
  1357. printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",
  1358. ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
  1359. ioc->pdir_size /= 2;
  1360. ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
  1361. }
  1362. #ifdef FULL_VALID_PDIR
  1363. /*
  1364. ** Check to see if the spill page has been allocated, we don't need more than
  1365. ** one across multiple SBAs.
  1366. */
  1367. if (!prefetch_spill_page) {
  1368. char *spill_poison = "SBAIOMMU POISON";
  1369. int poison_size = 16;
  1370. void *poison_addr, *addr;
  1371. addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));
  1372. if (!addr)
  1373. panic(PFX "Couldn't allocate PDIR spill page\n");
  1374. poison_addr = addr;
  1375. for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
  1376. memcpy(poison_addr, spill_poison, poison_size);
  1377. prefetch_spill_page = virt_to_phys(addr);
  1378. DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page);
  1379. }
  1380. /*
  1381. ** Set all the PDIR entries valid w/ the spill page as the target
  1382. */
  1383. for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
  1384. ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
  1385. #endif
  1386. /* Clear I/O TLB of any possible entries */
  1387. WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
  1388. READ_REG(ioc->ioc_hpa + IOC_PCOM);
  1389. /* Enable IOVA translation */
  1390. WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
  1391. READ_REG(ioc->ioc_hpa + IOC_IBASE);
  1392. }
  1393. static void __init
  1394. ioc_resource_init(struct ioc *ioc)
  1395. {
  1396. spin_lock_init(&ioc->res_lock);
  1397. #if DELAYED_RESOURCE_CNT > 0
  1398. spin_lock_init(&ioc->saved_lock);
  1399. #endif
  1400. /* resource map size dictated by pdir_size */
  1401. ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
  1402. ioc->res_size >>= 3; /* convert bit count to byte count */
  1403. DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size);
  1404. ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
  1405. get_order(ioc->res_size));
  1406. if (!ioc->res_map)
  1407. panic(PFX "Couldn't allocate resource map\n");
  1408. memset(ioc->res_map, 0, ioc->res_size);
  1409. /* next available IOVP - circular search */
  1410. ioc->res_hint = (unsigned long *) ioc->res_map;
  1411. #ifdef ASSERT_PDIR_SANITY
  1412. /* Mark first bit busy - ie no IOVA 0 */
  1413. ioc->res_map[0] = 0x1;
  1414. ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
  1415. #endif
  1416. #ifdef FULL_VALID_PDIR
  1417. /* Mark the last resource used so we don't prefetch beyond IOVA space */
  1418. ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
  1419. ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
  1420. | prefetch_spill_page);
  1421. #endif
  1422. DBG_INIT("%s() res_map %x %p\n", __FUNCTION__,
  1423. ioc->res_size, (void *) ioc->res_map);
  1424. }
  1425. static void __init
  1426. ioc_sac_init(struct ioc *ioc)
  1427. {
  1428. struct pci_dev *sac = NULL;
  1429. struct pci_controller *controller = NULL;
  1430. /*
  1431. * pci_alloc_coherent() must return a DMA address which is
  1432. * SAC (single address cycle) addressable, so allocate a
  1433. * pseudo-device to enforce that.
  1434. */
  1435. sac = kmalloc(sizeof(*sac), GFP_KERNEL);
  1436. if (!sac)
  1437. panic(PFX "Couldn't allocate struct pci_dev");
  1438. memset(sac, 0, sizeof(*sac));
  1439. controller = kmalloc(sizeof(*controller), GFP_KERNEL);
  1440. if (!controller)
  1441. panic(PFX "Couldn't allocate struct pci_controller");
  1442. memset(controller, 0, sizeof(*controller));
  1443. controller->iommu = ioc;
  1444. sac->sysdata = controller;
  1445. sac->dma_mask = 0xFFFFFFFFUL;
  1446. #ifdef CONFIG_PCI
  1447. sac->dev.bus = &pci_bus_type;
  1448. #endif
  1449. ioc->sac_only_dev = sac;
  1450. }
  1451. static void __init
  1452. ioc_zx1_init(struct ioc *ioc)
  1453. {
  1454. unsigned long rope_config;
  1455. unsigned int i;
  1456. if (ioc->rev < 0x20)
  1457. panic(PFX "IOC 2.0 or later required for IOMMU support\n");
  1458. /* 38 bit memory controller + extra bit for range displaced by MMIO */
  1459. ioc->dma_mask = (0x1UL << 39) - 1;
  1460. /*
  1461. ** Clear ROPE(N)_CONFIG AO bit.
  1462. ** Disables "NT Ordering" (~= !"Relaxed Ordering")
  1463. ** Overrides bit 1 in DMA Hint Sets.
  1464. ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
  1465. */
  1466. for (i=0; i<(8*8); i+=8) {
  1467. rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
  1468. rope_config &= ~IOC_ROPE_AO;
  1469. WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
  1470. }
  1471. }
  1472. typedef void (initfunc)(struct ioc *);
  1473. struct ioc_iommu {
  1474. u32 func_id;
  1475. char *name;
  1476. initfunc *init;
  1477. };
  1478. static struct ioc_iommu ioc_iommu_info[] __initdata = {
  1479. { ZX1_IOC_ID, "zx1", ioc_zx1_init },
  1480. { ZX2_IOC_ID, "zx2", NULL },
  1481. { SX1000_IOC_ID, "sx1000", NULL },
  1482. { SX2000_IOC_ID, "sx2000", NULL },
  1483. };
  1484. static struct ioc * __init
  1485. ioc_init(u64 hpa, void *handle)
  1486. {
  1487. struct ioc *ioc;
  1488. struct ioc_iommu *info;
  1489. ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
  1490. if (!ioc)
  1491. return NULL;
  1492. memset(ioc, 0, sizeof(*ioc));
  1493. ioc->next = ioc_list;
  1494. ioc_list = ioc;
  1495. ioc->handle = handle;
  1496. ioc->ioc_hpa = ioremap(hpa, 0x1000);
  1497. ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
  1498. ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
  1499. ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */
  1500. for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
  1501. if (ioc->func_id == info->func_id) {
  1502. ioc->name = info->name;
  1503. if (info->init)
  1504. (info->init)(ioc);
  1505. }
  1506. }
  1507. iovp_size = (1 << iovp_shift);
  1508. iovp_mask = ~(iovp_size - 1);
  1509. DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __FUNCTION__,
  1510. PAGE_SIZE >> 10, iovp_size >> 10);
  1511. if (!ioc->name) {
  1512. ioc->name = kmalloc(24, GFP_KERNEL);
  1513. if (ioc->name)
  1514. sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
  1515. ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
  1516. else
  1517. ioc->name = "Unknown";
  1518. }
  1519. ioc_iova_init(ioc);
  1520. ioc_resource_init(ioc);
  1521. ioc_sac_init(ioc);
  1522. if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask)
  1523. ia64_max_iommu_merge_mask = ~iovp_mask;
  1524. printk(KERN_INFO PFX
  1525. "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
  1526. ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
  1527. hpa, ioc->iov_size >> 20, ioc->ibase);
  1528. return ioc;
  1529. }
  1530. /**************************************************************************
  1531. **
  1532. ** SBA initialization code (HW and SW)
  1533. **
  1534. ** o identify SBA chip itself
  1535. ** o FIXME: initialize DMA hints for reasonable defaults
  1536. **
  1537. **************************************************************************/
  1538. #ifdef CONFIG_PROC_FS
  1539. static void *
  1540. ioc_start(struct seq_file *s, loff_t *pos)
  1541. {
  1542. struct ioc *ioc;
  1543. loff_t n = *pos;
  1544. for (ioc = ioc_list; ioc; ioc = ioc->next)
  1545. if (!n--)
  1546. return ioc;
  1547. return NULL;
  1548. }
  1549. static void *
  1550. ioc_next(struct seq_file *s, void *v, loff_t *pos)
  1551. {
  1552. struct ioc *ioc = v;
  1553. ++*pos;
  1554. return ioc->next;
  1555. }
  1556. static void
  1557. ioc_stop(struct seq_file *s, void *v)
  1558. {
  1559. }
  1560. static int
  1561. ioc_show(struct seq_file *s, void *v)
  1562. {
  1563. struct ioc *ioc = v;
  1564. unsigned long *res_ptr = (unsigned long *)ioc->res_map;
  1565. int i, used = 0;
  1566. seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
  1567. ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
  1568. #ifdef CONFIG_NUMA
  1569. if (ioc->node != MAX_NUMNODES)
  1570. seq_printf(s, "NUMA node : %d\n", ioc->node);
  1571. #endif
  1572. seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
  1573. seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024);
  1574. for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
  1575. used += hweight64(*res_ptr);
  1576. seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3);
  1577. seq_printf(s, "PDIR used : %d entries\n", used);
  1578. #ifdef PDIR_SEARCH_TIMING
  1579. {
  1580. unsigned long i = 0, avg = 0, min, max;
  1581. min = max = ioc->avg_search[0];
  1582. for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
  1583. avg += ioc->avg_search[i];
  1584. if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
  1585. if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
  1586. }
  1587. avg /= SBA_SEARCH_SAMPLE;
  1588. seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
  1589. min, avg, max);
  1590. }
  1591. #endif
  1592. #ifndef ALLOW_IOV_BYPASS
  1593. seq_printf(s, "IOVA bypass disabled\n");
  1594. #endif
  1595. return 0;
  1596. }
  1597. static struct seq_operations ioc_seq_ops = {
  1598. .start = ioc_start,
  1599. .next = ioc_next,
  1600. .stop = ioc_stop,
  1601. .show = ioc_show
  1602. };
  1603. static int
  1604. ioc_open(struct inode *inode, struct file *file)
  1605. {
  1606. return seq_open(file, &ioc_seq_ops);
  1607. }
  1608. static struct file_operations ioc_fops = {
  1609. .open = ioc_open,
  1610. .read = seq_read,
  1611. .llseek = seq_lseek,
  1612. .release = seq_release
  1613. };
  1614. static void __init
  1615. ioc_proc_init(void)
  1616. {
  1617. struct proc_dir_entry *dir, *entry;
  1618. dir = proc_mkdir("bus/mckinley", NULL);
  1619. if (!dir)
  1620. return;
  1621. entry = create_proc_entry(ioc_list->name, 0, dir);
  1622. if (entry)
  1623. entry->proc_fops = &ioc_fops;
  1624. }
  1625. #endif
  1626. static void
  1627. sba_connect_bus(struct pci_bus *bus)
  1628. {
  1629. acpi_handle handle, parent;
  1630. acpi_status status;
  1631. struct ioc *ioc;
  1632. if (!PCI_CONTROLLER(bus))
  1633. panic(PFX "no sysdata on bus %d!\n", bus->number);
  1634. if (PCI_CONTROLLER(bus)->iommu)
  1635. return;
  1636. handle = PCI_CONTROLLER(bus)->acpi_handle;
  1637. if (!handle)
  1638. return;
  1639. /*
  1640. * The IOC scope encloses PCI root bridges in the ACPI
  1641. * namespace, so work our way out until we find an IOC we
  1642. * claimed previously.
  1643. */
  1644. do {
  1645. for (ioc = ioc_list; ioc; ioc = ioc->next)
  1646. if (ioc->handle == handle) {
  1647. PCI_CONTROLLER(bus)->iommu = ioc;
  1648. return;
  1649. }
  1650. status = acpi_get_parent(handle, &parent);
  1651. handle = parent;
  1652. } while (ACPI_SUCCESS(status));
  1653. printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number);
  1654. }
  1655. #ifdef CONFIG_NUMA
  1656. static void __init
  1657. sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
  1658. {
  1659. unsigned int node;
  1660. int pxm;
  1661. ioc->node = MAX_NUMNODES;
  1662. pxm = acpi_get_pxm(handle);
  1663. if (pxm < 0)
  1664. return;
  1665. node = pxm_to_nid_map[pxm];
  1666. if (node >= MAX_NUMNODES || !node_online(node))
  1667. return;
  1668. ioc->node = node;
  1669. return;
  1670. }
  1671. #else
  1672. #define sba_map_ioc_to_node(ioc, handle)
  1673. #endif
  1674. static int __init
  1675. acpi_sba_ioc_add(struct acpi_device *device)
  1676. {
  1677. struct ioc *ioc;
  1678. acpi_status status;
  1679. u64 hpa, length;
  1680. struct acpi_buffer buffer;
  1681. struct acpi_device_info *dev_info;
  1682. status = hp_acpi_csr_space(device->handle, &hpa, &length);
  1683. if (ACPI_FAILURE(status))
  1684. return 1;
  1685. buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
  1686. status = acpi_get_object_info(device->handle, &buffer);
  1687. if (ACPI_FAILURE(status))
  1688. return 1;
  1689. dev_info = buffer.pointer;
  1690. /*
  1691. * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
  1692. * root bridges, and its CSR space includes the IOC function.
  1693. */
  1694. if (strncmp("HWP0001", dev_info->hardware_id.value, 7) == 0) {
  1695. hpa += ZX1_IOC_OFFSET;
  1696. /* zx1 based systems default to kernel page size iommu pages */
  1697. if (!iovp_shift)
  1698. iovp_shift = min(PAGE_SHIFT, 16);
  1699. }
  1700. ACPI_MEM_FREE(dev_info);
  1701. /*
  1702. * default anything not caught above or specified on cmdline to 4k
  1703. * iommu page size
  1704. */
  1705. if (!iovp_shift)
  1706. iovp_shift = 12;
  1707. ioc = ioc_init(hpa, device->handle);
  1708. if (!ioc)
  1709. return 1;
  1710. /* setup NUMA node association */
  1711. sba_map_ioc_to_node(ioc, device->handle);
  1712. return 0;
  1713. }
  1714. static struct acpi_driver acpi_sba_ioc_driver = {
  1715. .name = "IOC IOMMU Driver",
  1716. .ids = "HWP0001,HWP0004",
  1717. .ops = {
  1718. .add = acpi_sba_ioc_add,
  1719. },
  1720. };
  1721. static int __init
  1722. sba_init(void)
  1723. {
  1724. if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
  1725. return 0;
  1726. acpi_bus_register_driver(&acpi_sba_ioc_driver);
  1727. if (!ioc_list) {
  1728. #ifdef CONFIG_IA64_GENERIC
  1729. extern int swiotlb_late_init_with_default_size (size_t size);
  1730. /*
  1731. * If we didn't find something sba_iommu can claim, we
  1732. * need to setup the swiotlb and switch to the dig machvec.
  1733. */
  1734. if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
  1735. panic("Unable to find SBA IOMMU or initialize "
  1736. "software I/O TLB: Try machvec=dig boot option");
  1737. machvec_init("dig");
  1738. #else
  1739. panic("Unable to find SBA IOMMU: Try a generic or DIG kernel");
  1740. #endif
  1741. return 0;
  1742. }
  1743. #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
  1744. /*
  1745. * hpzx1_swiotlb needs to have a fairly small swiotlb bounce
  1746. * buffer setup to support devices with smaller DMA masks than
  1747. * sba_iommu can handle.
  1748. */
  1749. if (ia64_platform_is("hpzx1_swiotlb")) {
  1750. extern void hwsw_init(void);
  1751. hwsw_init();
  1752. }
  1753. #endif
  1754. #ifdef CONFIG_PCI
  1755. {
  1756. struct pci_bus *b = NULL;
  1757. while ((b = pci_find_next_bus(b)) != NULL)
  1758. sba_connect_bus(b);
  1759. }
  1760. #endif
  1761. #ifdef CONFIG_PROC_FS
  1762. ioc_proc_init();
  1763. #endif
  1764. return 0;
  1765. }
  1766. subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
  1767. static int __init
  1768. nosbagart(char *str)
  1769. {
  1770. reserve_sba_gart = 0;
  1771. return 1;
  1772. }
  1773. int
  1774. sba_dma_supported (struct device *dev, u64 mask)
  1775. {
  1776. /* make sure it's at least 32bit capable */
  1777. return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
  1778. }
  1779. int
  1780. sba_dma_mapping_error (dma_addr_t dma_addr)
  1781. {
  1782. return 0;
  1783. }
  1784. __setup("nosbagart", nosbagart);
  1785. static int __init
  1786. sba_page_override(char *str)
  1787. {
  1788. unsigned long page_size;
  1789. page_size = memparse(str, &str);
  1790. switch (page_size) {
  1791. case 4096:
  1792. case 8192:
  1793. case 16384:
  1794. case 65536:
  1795. iovp_shift = ffs(page_size) - 1;
  1796. break;
  1797. default:
  1798. printk("%s: unknown/unsupported iommu page size %ld\n",
  1799. __FUNCTION__, page_size);
  1800. }
  1801. return 1;
  1802. }
  1803. __setup("sbapagesize=",sba_page_override);
  1804. EXPORT_SYMBOL(sba_dma_mapping_error);
  1805. EXPORT_SYMBOL(sba_map_single);
  1806. EXPORT_SYMBOL(sba_unmap_single);
  1807. EXPORT_SYMBOL(sba_map_sg);
  1808. EXPORT_SYMBOL(sba_unmap_sg);
  1809. EXPORT_SYMBOL(sba_dma_supported);
  1810. EXPORT_SYMBOL(sba_alloc_coherent);
  1811. EXPORT_SYMBOL(sba_free_coherent);