sba_iommu.c 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232
  1. /*
  2. ** IA64 System Bus Adapter (SBA) I/O MMU manager
  3. **
  4. ** (c) Copyright 2002-2005 Alex Williamson
  5. ** (c) Copyright 2002-2003 Grant Grundler
  6. ** (c) Copyright 2002-2005 Hewlett-Packard Company
  7. **
  8. ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
  9. ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
  10. **
  11. ** This program is free software; you can redistribute it and/or modify
  12. ** it under the terms of the GNU General Public License as published by
  13. ** the Free Software Foundation; either version 2 of the License, or
  14. ** (at your option) any later version.
  15. **
  16. **
  17. ** This module initializes the IOC (I/O Controller) found on HP
  18. ** McKinley machines and their successors.
  19. **
  20. */
  21. #include <linux/types.h>
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/slab.h>
  26. #include <linux/init.h>
  27. #include <linux/mm.h>
  28. #include <linux/string.h>
  29. #include <linux/pci.h>
  30. #include <linux/proc_fs.h>
  31. #include <linux/seq_file.h>
  32. #include <linux/acpi.h>
  33. #include <linux/efi.h>
  34. #include <linux/nodemask.h>
  35. #include <linux/bitops.h> /* hweight64() */
  36. #include <linux/crash_dump.h>
  37. #include <linux/iommu-helper.h>
  38. #include <linux/dma-mapping.h>
  39. #include <asm/delay.h> /* ia64_get_itc() */
  40. #include <asm/io.h>
  41. #include <asm/page.h> /* PAGE_OFFSET */
  42. #include <asm/dma.h>
  43. #include <asm/system.h> /* wmb() */
  44. #include <asm/acpi-ext.h>
  45. extern int swiotlb_late_init_with_default_size (size_t size);
  46. #define PFX "IOC: "
  47. /*
  48. ** Enabling timing search of the pdir resource map. Output in /proc.
  49. ** Disabled by default to optimize performance.
  50. */
  51. #undef PDIR_SEARCH_TIMING
  52. /*
  53. ** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
  54. ** not defined, all DMA will be 32bit and go through the TLB.
  55. ** There's potentially a conflict in the bio merge code with us
  56. ** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
  57. ** appears to give more performance than bio-level virtual merging, we'll
  58. ** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to
  59. ** completely restrict DMA to the IOMMU.
  60. */
  61. #define ALLOW_IOV_BYPASS
  62. /*
  63. ** This option specifically allows/disallows bypassing scatterlists with
  64. ** multiple entries. Coalescing these entries can allow better DMA streaming
  65. ** and in some cases shows better performance than entirely bypassing the
  66. ** IOMMU. Performance increase on the order of 1-2% sequential output/input
  67. ** using bonnie++ on a RAID0 MD device (sym2 & mpt).
  68. */
  69. #undef ALLOW_IOV_BYPASS_SG
  70. /*
  71. ** If a device prefetches beyond the end of a valid pdir entry, it will cause
  72. ** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
  73. ** disconnect on 4k boundaries and prevent such issues. If the device is
  74. ** particularly aggressive, this option will keep the entire pdir valid such
  75. ** that prefetching will hit a valid address. This could severely impact
  76. ** error containment, and is therefore off by default. The page that is
  77. ** used for spill-over is poisoned, so that should help debugging somewhat.
  78. */
  79. #undef FULL_VALID_PDIR
  80. #define ENABLE_MARK_CLEAN
  81. /*
  82. ** The number of debug flags is a clue - this code is fragile. NOTE: since
  83. ** tightening the use of res_lock the resource bitmap and actual pdir are no
  84. ** longer guaranteed to stay in sync. The sanity checking code isn't going to
  85. ** like that.
  86. */
  87. #undef DEBUG_SBA_INIT
  88. #undef DEBUG_SBA_RUN
  89. #undef DEBUG_SBA_RUN_SG
  90. #undef DEBUG_SBA_RESOURCE
  91. #undef ASSERT_PDIR_SANITY
  92. #undef DEBUG_LARGE_SG_ENTRIES
  93. #undef DEBUG_BYPASS
  94. #if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
  95. #error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
  96. #endif
  97. #define SBA_INLINE __inline__
  98. /* #define SBA_INLINE */
  99. #ifdef DEBUG_SBA_INIT
  100. #define DBG_INIT(x...) printk(x)
  101. #else
  102. #define DBG_INIT(x...)
  103. #endif
  104. #ifdef DEBUG_SBA_RUN
  105. #define DBG_RUN(x...) printk(x)
  106. #else
  107. #define DBG_RUN(x...)
  108. #endif
  109. #ifdef DEBUG_SBA_RUN_SG
  110. #define DBG_RUN_SG(x...) printk(x)
  111. #else
  112. #define DBG_RUN_SG(x...)
  113. #endif
  114. #ifdef DEBUG_SBA_RESOURCE
  115. #define DBG_RES(x...) printk(x)
  116. #else
  117. #define DBG_RES(x...)
  118. #endif
  119. #ifdef DEBUG_BYPASS
  120. #define DBG_BYPASS(x...) printk(x)
  121. #else
  122. #define DBG_BYPASS(x...)
  123. #endif
  124. #ifdef ASSERT_PDIR_SANITY
  125. #define ASSERT(expr) \
  126. if(!(expr)) { \
  127. printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
  128. panic(#expr); \
  129. }
  130. #else
  131. #define ASSERT(expr)
  132. #endif
  133. /*
  134. ** The number of pdir entries to "free" before issuing
  135. ** a read to PCOM register to flush out PCOM writes.
  136. ** Interacts with allocation granularity (ie 4 or 8 entries
  137. ** allocated and free'd/purged at a time might make this
  138. ** less interesting).
  139. */
  140. #define DELAYED_RESOURCE_CNT 64
  141. #define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec
  142. #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
  143. #define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
  144. #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
  145. #define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
  146. #define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
  147. #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
  148. #define IOC_FUNC_ID 0x000
  149. #define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
  150. #define IOC_IBASE 0x300 /* IO TLB */
  151. #define IOC_IMASK 0x308
  152. #define IOC_PCOM 0x310
  153. #define IOC_TCNFG 0x318
  154. #define IOC_PDIR_BASE 0x320
  155. #define IOC_ROPE0_CFG 0x500
  156. #define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
  157. /* AGP GART driver looks for this */
  158. #define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
  159. /*
  160. ** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
  161. **
  162. ** Some IOCs (sx1000) can run at the above pages sizes, but are
  163. ** really only supported using the IOC at a 4k page size.
  164. **
  165. ** iovp_size could only be greater than PAGE_SIZE if we are
  166. ** confident the drivers really only touch the next physical
  167. ** page iff that driver instance owns it.
  168. */
  169. static unsigned long iovp_size;
  170. static unsigned long iovp_shift;
  171. static unsigned long iovp_mask;
  172. struct ioc {
  173. void __iomem *ioc_hpa; /* I/O MMU base address */
  174. char *res_map; /* resource map, bit == pdir entry */
  175. u64 *pdir_base; /* physical base address */
  176. unsigned long ibase; /* pdir IOV Space base */
  177. unsigned long imask; /* pdir IOV Space mask */
  178. unsigned long *res_hint; /* next avail IOVP - circular search */
  179. unsigned long dma_mask;
  180. spinlock_t res_lock; /* protects the resource bitmap, but must be held when */
  181. /* clearing pdir to prevent races with allocations. */
  182. unsigned int res_bitshift; /* from the RIGHT! */
  183. unsigned int res_size; /* size of resource map in bytes */
  184. #ifdef CONFIG_NUMA
  185. unsigned int node; /* node where this IOC lives */
  186. #endif
  187. #if DELAYED_RESOURCE_CNT > 0
  188. spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */
  189. /* than res_lock for bigger systems. */
  190. int saved_cnt;
  191. struct sba_dma_pair {
  192. dma_addr_t iova;
  193. size_t size;
  194. } saved[DELAYED_RESOURCE_CNT];
  195. #endif
  196. #ifdef PDIR_SEARCH_TIMING
  197. #define SBA_SEARCH_SAMPLE 0x100
  198. unsigned long avg_search[SBA_SEARCH_SAMPLE];
  199. unsigned long avg_idx; /* current index into avg_search */
  200. #endif
  201. /* Stuff we don't need in performance path */
  202. struct ioc *next; /* list of IOC's in system */
  203. acpi_handle handle; /* for multiple IOC's */
  204. const char *name;
  205. unsigned int func_id;
  206. unsigned int rev; /* HW revision of chip */
  207. u32 iov_size;
  208. unsigned int pdir_size; /* in bytes, determined by IOV Space size */
  209. struct pci_dev *sac_only_dev;
  210. };
  211. static struct ioc *ioc_list;
  212. static int reserve_sba_gart = 1;
  213. static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
  214. static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
  215. #define sba_sg_address(sg) sg_virt((sg))
  216. #ifdef FULL_VALID_PDIR
  217. static u64 prefetch_spill_page;
  218. #endif
  219. #ifdef CONFIG_PCI
  220. # define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \
  221. ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
  222. #else
  223. # define GET_IOC(dev) NULL
  224. #endif
  225. /*
  226. ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
  227. ** (or rather not merge) DMAs into manageable chunks.
  228. ** On parisc, this is more of the software/tuning constraint
  229. ** rather than the HW. I/O MMU allocation algorithms can be
  230. ** faster with smaller sizes (to some degree).
  231. */
  232. #define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
  233. #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
  234. /************************************
  235. ** SBA register read and write support
  236. **
  237. ** BE WARNED: register writes are posted.
  238. ** (ie follow writes which must reach HW with a read)
  239. **
  240. */
  241. #define READ_REG(addr) __raw_readq(addr)
  242. #define WRITE_REG(val, addr) __raw_writeq(val, addr)
  243. #ifdef DEBUG_SBA_INIT
  244. /**
  245. * sba_dump_tlb - debugging only - print IOMMU operating parameters
  246. * @hpa: base address of the IOMMU
  247. *
  248. * Print the size/location of the IO MMU PDIR.
  249. */
  250. static void
  251. sba_dump_tlb(char *hpa)
  252. {
  253. DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
  254. DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE));
  255. DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK));
  256. DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG));
  257. DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
  258. DBG_INIT("\n");
  259. }
  260. #endif
  261. #ifdef ASSERT_PDIR_SANITY
  262. /**
  263. * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
  264. * @ioc: IO MMU structure which owns the pdir we are interested in.
  265. * @msg: text to print ont the output line.
  266. * @pide: pdir index.
  267. *
  268. * Print one entry of the IO MMU PDIR in human readable form.
  269. */
  270. static void
  271. sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
  272. {
  273. /* start printing from lowest pde in rval */
  274. u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)];
  275. unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
  276. uint rcnt;
  277. printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
  278. msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
  279. rcnt = 0;
  280. while (rcnt < BITS_PER_LONG) {
  281. printk(KERN_DEBUG "%s %2d %p %016Lx\n",
  282. (rcnt == (pide & (BITS_PER_LONG - 1)))
  283. ? " -->" : " ",
  284. rcnt, ptr, (unsigned long long) *ptr );
  285. rcnt++;
  286. ptr++;
  287. }
  288. printk(KERN_DEBUG "%s", msg);
  289. }
  290. /**
  291. * sba_check_pdir - debugging only - consistency checker
  292. * @ioc: IO MMU structure which owns the pdir we are interested in.
  293. * @msg: text to print ont the output line.
  294. *
  295. * Verify the resource map and pdir state is consistent
  296. */
  297. static int
  298. sba_check_pdir(struct ioc *ioc, char *msg)
  299. {
  300. u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
  301. u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */
  302. u64 *pptr = ioc->pdir_base; /* pdir ptr */
  303. uint pide = 0;
  304. while (rptr < rptr_end) {
  305. u64 rval;
  306. int rcnt; /* number of bits we might check */
  307. rval = *rptr;
  308. rcnt = 64;
  309. while (rcnt) {
  310. /* Get last byte and highest bit from that */
  311. u32 pde = ((u32)((*pptr >> (63)) & 0x1));
  312. if ((rval & 0x1) ^ pde)
  313. {
  314. /*
  315. ** BUMMER! -- res_map != pdir --
  316. ** Dump rval and matching pdir entries
  317. */
  318. sba_dump_pdir_entry(ioc, msg, pide);
  319. return(1);
  320. }
  321. rcnt--;
  322. rval >>= 1; /* try the next bit */
  323. pptr++;
  324. pide++;
  325. }
  326. rptr++; /* look at next word of res_map */
  327. }
  328. /* It'd be nice if we always got here :^) */
  329. return 0;
  330. }
  331. /**
  332. * sba_dump_sg - debugging only - print Scatter-Gather list
  333. * @ioc: IO MMU structure which owns the pdir we are interested in.
  334. * @startsg: head of the SG list
  335. * @nents: number of entries in SG list
  336. *
  337. * print the SG list so we can verify it's correct by hand.
  338. */
  339. static void
  340. sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
  341. {
  342. while (nents-- > 0) {
  343. printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
  344. startsg->dma_address, startsg->dma_length,
  345. sba_sg_address(startsg));
  346. startsg = sg_next(startsg);
  347. }
  348. }
  349. static void
  350. sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
  351. {
  352. struct scatterlist *the_sg = startsg;
  353. int the_nents = nents;
  354. while (the_nents-- > 0) {
  355. if (sba_sg_address(the_sg) == 0x0UL)
  356. sba_dump_sg(NULL, startsg, nents);
  357. the_sg = sg_next(the_sg);
  358. }
  359. }
  360. #endif /* ASSERT_PDIR_SANITY */
  361. /**************************************************************
  362. *
  363. * I/O Pdir Resource Management
  364. *
  365. * Bits set in the resource map are in use.
  366. * Each bit can represent a number of pages.
  367. * LSbs represent lower addresses (IOVA's).
  368. *
  369. ***************************************************************/
  370. #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
  371. /* Convert from IOVP to IOVA and vice versa. */
  372. #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
  373. #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
  374. #define PDIR_ENTRY_SIZE sizeof(u64)
  375. #define PDIR_INDEX(iovp) ((iovp)>>iovp_shift)
  376. #define RESMAP_MASK(n) ~(~0UL << (n))
  377. #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
  378. /**
  379. * For most cases the normal get_order is sufficient, however it limits us
  380. * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
  381. * It only incurs about 1 clock cycle to use this one with the static variable
  382. * and makes the code more intuitive.
  383. */
  384. static SBA_INLINE int
  385. get_iovp_order (unsigned long size)
  386. {
  387. long double d = size - 1;
  388. long order;
  389. order = ia64_getf_exp(d);
  390. order = order - iovp_shift - 0xffff + 1;
  391. if (order < 0)
  392. order = 0;
  393. return order;
  394. }
  395. static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
  396. unsigned int bitshiftcnt)
  397. {
  398. return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
  399. + bitshiftcnt;
  400. }
  401. /**
  402. * sba_search_bitmap - find free space in IO PDIR resource bitmap
  403. * @ioc: IO MMU structure which owns the pdir we are interested in.
  404. * @bits_wanted: number of entries we need.
  405. * @use_hint: use res_hint to indicate where to start looking
  406. *
  407. * Find consecutive free bits in resource bitmap.
  408. * Each bit represents one entry in the IO Pdir.
  409. * Cool perf optimization: search for log2(size) bits at a time.
  410. */
  411. static SBA_INLINE unsigned long
  412. sba_search_bitmap(struct ioc *ioc, struct device *dev,
  413. unsigned long bits_wanted, int use_hint)
  414. {
  415. unsigned long *res_ptr;
  416. unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
  417. unsigned long flags, pide = ~0UL, tpide;
  418. unsigned long boundary_size;
  419. unsigned long shift;
  420. int ret;
  421. ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
  422. ASSERT(res_ptr < res_end);
  423. boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1;
  424. boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;
  425. BUG_ON(ioc->ibase & ~iovp_mask);
  426. shift = ioc->ibase >> iovp_shift;
  427. spin_lock_irqsave(&ioc->res_lock, flags);
  428. /* Allow caller to force a search through the entire resource space */
  429. if (likely(use_hint)) {
  430. res_ptr = ioc->res_hint;
  431. } else {
  432. res_ptr = (ulong *)ioc->res_map;
  433. ioc->res_bitshift = 0;
  434. }
  435. /*
  436. * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
  437. * if a TLB entry is purged while in use. sba_mark_invalid()
  438. * purges IOTLB entries in power-of-two sizes, so we also
  439. * allocate IOVA space in power-of-two sizes.
  440. */
  441. bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift);
  442. if (likely(bits_wanted == 1)) {
  443. unsigned int bitshiftcnt;
  444. for(; res_ptr < res_end ; res_ptr++) {
  445. if (likely(*res_ptr != ~0UL)) {
  446. bitshiftcnt = ffz(*res_ptr);
  447. *res_ptr |= (1UL << bitshiftcnt);
  448. pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
  449. ioc->res_bitshift = bitshiftcnt + bits_wanted;
  450. goto found_it;
  451. }
  452. }
  453. goto not_found;
  454. }
  455. if (likely(bits_wanted <= BITS_PER_LONG/2)) {
  456. /*
  457. ** Search the resource bit map on well-aligned values.
  458. ** "o" is the alignment.
  459. ** We need the alignment to invalidate I/O TLB using
  460. ** SBA HW features in the unmap path.
  461. */
  462. unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
  463. uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
  464. unsigned long mask, base_mask;
  465. base_mask = RESMAP_MASK(bits_wanted);
  466. mask = base_mask << bitshiftcnt;
  467. DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
  468. for(; res_ptr < res_end ; res_ptr++)
  469. {
  470. DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
  471. ASSERT(0 != mask);
  472. for (; mask ; mask <<= o, bitshiftcnt += o) {
  473. tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
  474. ret = iommu_is_span_boundary(tpide, bits_wanted,
  475. shift,
  476. boundary_size);
  477. if ((0 == ((*res_ptr) & mask)) && !ret) {
  478. *res_ptr |= mask; /* mark resources busy! */
  479. pide = tpide;
  480. ioc->res_bitshift = bitshiftcnt + bits_wanted;
  481. goto found_it;
  482. }
  483. }
  484. bitshiftcnt = 0;
  485. mask = base_mask;
  486. }
  487. } else {
  488. int qwords, bits, i;
  489. unsigned long *end;
  490. qwords = bits_wanted >> 6; /* /64 */
  491. bits = bits_wanted - (qwords * BITS_PER_LONG);
  492. end = res_end - qwords;
  493. for (; res_ptr < end; res_ptr++) {
  494. tpide = ptr_to_pide(ioc, res_ptr, 0);
  495. ret = iommu_is_span_boundary(tpide, bits_wanted,
  496. shift, boundary_size);
  497. if (ret)
  498. goto next_ptr;
  499. for (i = 0 ; i < qwords ; i++) {
  500. if (res_ptr[i] != 0)
  501. goto next_ptr;
  502. }
  503. if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits))
  504. continue;
  505. /* Found it, mark it */
  506. for (i = 0 ; i < qwords ; i++)
  507. res_ptr[i] = ~0UL;
  508. res_ptr[i] |= RESMAP_MASK(bits);
  509. pide = tpide;
  510. res_ptr += qwords;
  511. ioc->res_bitshift = bits;
  512. goto found_it;
  513. next_ptr:
  514. ;
  515. }
  516. }
  517. not_found:
  518. prefetch(ioc->res_map);
  519. ioc->res_hint = (unsigned long *) ioc->res_map;
  520. ioc->res_bitshift = 0;
  521. spin_unlock_irqrestore(&ioc->res_lock, flags);
  522. return (pide);
  523. found_it:
  524. ioc->res_hint = res_ptr;
  525. spin_unlock_irqrestore(&ioc->res_lock, flags);
  526. return (pide);
  527. }
  528. /**
  529. * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
  530. * @ioc: IO MMU structure which owns the pdir we are interested in.
  531. * @size: number of bytes to create a mapping for
  532. *
  533. * Given a size, find consecutive unmarked and then mark those bits in the
  534. * resource bit map.
  535. */
  536. static int
  537. sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
  538. {
  539. unsigned int pages_needed = size >> iovp_shift;
  540. #ifdef PDIR_SEARCH_TIMING
  541. unsigned long itc_start;
  542. #endif
  543. unsigned long pide;
  544. ASSERT(pages_needed);
  545. ASSERT(0 == (size & ~iovp_mask));
  546. #ifdef PDIR_SEARCH_TIMING
  547. itc_start = ia64_get_itc();
  548. #endif
  549. /*
  550. ** "seek and ye shall find"...praying never hurts either...
  551. */
  552. pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
  553. if (unlikely(pide >= (ioc->res_size << 3))) {
  554. pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
  555. if (unlikely(pide >= (ioc->res_size << 3))) {
  556. #if DELAYED_RESOURCE_CNT > 0
  557. unsigned long flags;
  558. /*
  559. ** With delayed resource freeing, we can give this one more shot. We're
  560. ** getting close to being in trouble here, so do what we can to make this
  561. ** one count.
  562. */
  563. spin_lock_irqsave(&ioc->saved_lock, flags);
  564. if (ioc->saved_cnt > 0) {
  565. struct sba_dma_pair *d;
  566. int cnt = ioc->saved_cnt;
  567. d = &(ioc->saved[ioc->saved_cnt - 1]);
  568. spin_lock(&ioc->res_lock);
  569. while (cnt--) {
  570. sba_mark_invalid(ioc, d->iova, d->size);
  571. sba_free_range(ioc, d->iova, d->size);
  572. d--;
  573. }
  574. ioc->saved_cnt = 0;
  575. READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
  576. spin_unlock(&ioc->res_lock);
  577. }
  578. spin_unlock_irqrestore(&ioc->saved_lock, flags);
  579. pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
  580. if (unlikely(pide >= (ioc->res_size << 3))) {
  581. printk(KERN_WARNING "%s: I/O MMU @ %p is"
  582. "out of mapping resources, %u %u %lx\n",
  583. __func__, ioc->ioc_hpa, ioc->res_size,
  584. pages_needed, dma_get_seg_boundary(dev));
  585. return -1;
  586. }
  587. #else
  588. printk(KERN_WARNING "%s: I/O MMU @ %p is"
  589. "out of mapping resources, %u %u %lx\n",
  590. __func__, ioc->ioc_hpa, ioc->res_size,
  591. pages_needed, dma_get_seg_boundary(dev));
  592. return -1;
  593. #endif
  594. }
  595. }
  596. #ifdef PDIR_SEARCH_TIMING
  597. ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
  598. ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
  599. #endif
  600. prefetchw(&(ioc->pdir_base[pide]));
  601. #ifdef ASSERT_PDIR_SANITY
  602. /* verify the first enable bit is clear */
  603. if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
  604. sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
  605. }
  606. #endif
  607. DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
  608. __func__, size, pages_needed, pide,
  609. (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
  610. ioc->res_bitshift );
  611. return (pide);
  612. }
  613. /**
  614. * sba_free_range - unmark bits in IO PDIR resource bitmap
  615. * @ioc: IO MMU structure which owns the pdir we are interested in.
  616. * @iova: IO virtual address which was previously allocated.
  617. * @size: number of bytes to create a mapping for
  618. *
  619. * clear bits in the ioc's resource map
  620. */
  621. static SBA_INLINE void
  622. sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
  623. {
  624. unsigned long iovp = SBA_IOVP(ioc, iova);
  625. unsigned int pide = PDIR_INDEX(iovp);
  626. unsigned int ridx = pide >> 3; /* convert bit to byte address */
  627. unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
  628. int bits_not_wanted = size >> iovp_shift;
  629. unsigned long m;
  630. /* Round up to power-of-two size: see AR2305 note above */
  631. bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift);
  632. for (; bits_not_wanted > 0 ; res_ptr++) {
  633. if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
  634. /* these mappings start 64bit aligned */
  635. *res_ptr = 0UL;
  636. bits_not_wanted -= BITS_PER_LONG;
  637. pide += BITS_PER_LONG;
  638. } else {
  639. /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
  640. m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
  641. bits_not_wanted = 0;
  642. DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size,
  643. bits_not_wanted, m, pide, res_ptr, *res_ptr);
  644. ASSERT(m != 0);
  645. ASSERT(bits_not_wanted);
  646. ASSERT((*res_ptr & m) == m); /* verify same bits are set */
  647. *res_ptr &= ~m;
  648. }
  649. }
  650. }
  651. /**************************************************************
  652. *
  653. * "Dynamic DMA Mapping" support (aka "Coherent I/O")
  654. *
  655. ***************************************************************/
  656. /**
  657. * sba_io_pdir_entry - fill in one IO PDIR entry
  658. * @pdir_ptr: pointer to IO PDIR entry
  659. * @vba: Virtual CPU address of buffer to map
  660. *
  661. * SBA Mapping Routine
  662. *
  663. * Given a virtual address (vba, arg1) sba_io_pdir_entry()
  664. * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
  665. * Each IO Pdir entry consists of 8 bytes as shown below
  666. * (LSB == bit 0):
  667. *
  668. * 63 40 11 7 0
  669. * +-+---------------------+----------------------------------+----+--------+
  670. * |V| U | PPN[39:12] | U | FF |
  671. * +-+---------------------+----------------------------------+----+--------+
  672. *
  673. * V == Valid Bit
  674. * U == Unused
  675. * PPN == Physical Page Number
  676. *
  677. * The physical address fields are filled with the results of virt_to_phys()
  678. * on the vba.
  679. */
  680. #if 1
  681. #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
  682. | 0x8000000000000000ULL)
  683. #else
  684. void SBA_INLINE
  685. sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
  686. {
  687. *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
  688. }
  689. #endif
  690. #ifdef ENABLE_MARK_CLEAN
  691. /**
  692. * Since DMA is i-cache coherent, any (complete) pages that were written via
  693. * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
  694. * flush them when they get mapped into an executable vm-area.
  695. */
  696. static void
  697. mark_clean (void *addr, size_t size)
  698. {
  699. unsigned long pg_addr, end;
  700. pg_addr = PAGE_ALIGN((unsigned long) addr);
  701. end = (unsigned long) addr + size;
  702. while (pg_addr + PAGE_SIZE <= end) {
  703. struct page *page = virt_to_page((void *)pg_addr);
  704. set_bit(PG_arch_1, &page->flags);
  705. pg_addr += PAGE_SIZE;
  706. }
  707. }
  708. #endif
  709. /**
  710. * sba_mark_invalid - invalidate one or more IO PDIR entries
  711. * @ioc: IO MMU structure which owns the pdir we are interested in.
  712. * @iova: IO Virtual Address mapped earlier
  713. * @byte_cnt: number of bytes this mapping covers.
  714. *
  715. * Marking the IO PDIR entry(ies) as Invalid and invalidate
  716. * corresponding IO TLB entry. The PCOM (Purge Command Register)
  717. * is to purge stale entries in the IO TLB when unmapping entries.
  718. *
  719. * The PCOM register supports purging of multiple pages, with a minium
  720. * of 1 page and a maximum of 2GB. Hardware requires the address be
  721. * aligned to the size of the range being purged. The size of the range
  722. * must be a power of 2. The "Cool perf optimization" in the
  723. * allocation routine helps keep that true.
  724. */
  725. static SBA_INLINE void
  726. sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
  727. {
  728. u32 iovp = (u32) SBA_IOVP(ioc,iova);
  729. int off = PDIR_INDEX(iovp);
  730. /* Must be non-zero and rounded up */
  731. ASSERT(byte_cnt > 0);
  732. ASSERT(0 == (byte_cnt & ~iovp_mask));
  733. #ifdef ASSERT_PDIR_SANITY
  734. /* Assert first pdir entry is set */
  735. if (!(ioc->pdir_base[off] >> 60)) {
  736. sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
  737. }
  738. #endif
  739. if (byte_cnt <= iovp_size)
  740. {
  741. ASSERT(off < ioc->pdir_size);
  742. iovp |= iovp_shift; /* set "size" field for PCOM */
  743. #ifndef FULL_VALID_PDIR
  744. /*
  745. ** clear I/O PDIR entry "valid" bit
  746. ** Do NOT clear the rest - save it for debugging.
  747. ** We should only clear bits that have previously
  748. ** been enabled.
  749. */
  750. ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
  751. #else
  752. /*
  753. ** If we want to maintain the PDIR as valid, put in
  754. ** the spill page so devices prefetching won't
  755. ** cause a hard fail.
  756. */
  757. ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
  758. #endif
  759. } else {
  760. u32 t = get_iovp_order(byte_cnt) + iovp_shift;
  761. iovp |= t;
  762. ASSERT(t <= 31); /* 2GB! Max value of "size" field */
  763. do {
  764. /* verify this pdir entry is enabled */
  765. ASSERT(ioc->pdir_base[off] >> 63);
  766. #ifndef FULL_VALID_PDIR
  767. /* clear I/O Pdir entry "valid" bit first */
  768. ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
  769. #else
  770. ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
  771. #endif
  772. off++;
  773. byte_cnt -= iovp_size;
  774. } while (byte_cnt > 0);
  775. }
  776. WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
  777. }
  778. /**
  779. * sba_map_single_attrs - map one buffer and return IOVA for DMA
  780. * @dev: instance of PCI owned by the driver that's asking.
  781. * @addr: driver buffer to map.
  782. * @size: number of bytes to map in driver buffer.
  783. * @dir: R/W or both.
  784. * @attrs: optional dma attributes
  785. *
  786. * See Documentation/PCI/PCI-DMA-mapping.txt
  787. */
  788. static dma_addr_t sba_map_page(struct device *dev, struct page *page,
  789. unsigned long poff, size_t size,
  790. enum dma_data_direction dir,
  791. struct dma_attrs *attrs)
  792. {
  793. struct ioc *ioc;
  794. void *addr = page_address(page) + poff;
  795. dma_addr_t iovp;
  796. dma_addr_t offset;
  797. u64 *pdir_start;
  798. int pide;
  799. #ifdef ASSERT_PDIR_SANITY
  800. unsigned long flags;
  801. #endif
  802. #ifdef ALLOW_IOV_BYPASS
  803. unsigned long pci_addr = virt_to_phys(addr);
  804. #endif
  805. #ifdef ALLOW_IOV_BYPASS
  806. ASSERT(to_pci_dev(dev)->dma_mask);
  807. /*
  808. ** Check if the PCI device can DMA to ptr... if so, just return ptr
  809. */
  810. if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
  811. /*
  812. ** Device is bit capable of DMA'ing to the buffer...
  813. ** just return the PCI address of ptr
  814. */
  815. DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
  816. "0x%lx/0x%lx\n",
  817. to_pci_dev(dev)->dma_mask, pci_addr);
  818. return pci_addr;
  819. }
  820. #endif
  821. ioc = GET_IOC(dev);
  822. ASSERT(ioc);
  823. prefetch(ioc->res_hint);
  824. ASSERT(size > 0);
  825. ASSERT(size <= DMA_CHUNK_SIZE);
  826. /* save offset bits */
  827. offset = ((dma_addr_t) (long) addr) & ~iovp_mask;
  828. /* round up to nearest iovp_size */
  829. size = (size + offset + ~iovp_mask) & iovp_mask;
  830. #ifdef ASSERT_PDIR_SANITY
  831. spin_lock_irqsave(&ioc->res_lock, flags);
  832. if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
  833. panic("Sanity check failed");
  834. spin_unlock_irqrestore(&ioc->res_lock, flags);
  835. #endif
  836. pide = sba_alloc_range(ioc, dev, size);
  837. if (pide < 0)
  838. return 0;
  839. iovp = (dma_addr_t) pide << iovp_shift;
  840. DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset);
  841. pdir_start = &(ioc->pdir_base[pide]);
  842. while (size > 0) {
  843. ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
  844. sba_io_pdir_entry(pdir_start, (unsigned long) addr);
  845. DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
  846. addr += iovp_size;
  847. size -= iovp_size;
  848. pdir_start++;
  849. }
  850. /* force pdir update */
  851. wmb();
  852. /* form complete address */
  853. #ifdef ASSERT_PDIR_SANITY
  854. spin_lock_irqsave(&ioc->res_lock, flags);
  855. sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
  856. spin_unlock_irqrestore(&ioc->res_lock, flags);
  857. #endif
  858. return SBA_IOVA(ioc, iovp, offset);
  859. }
  860. static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
  861. size_t size, enum dma_data_direction dir,
  862. struct dma_attrs *attrs)
  863. {
  864. return sba_map_page(dev, virt_to_page(addr),
  865. (unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
  866. }
  867. #ifdef ENABLE_MARK_CLEAN
  868. static SBA_INLINE void
  869. sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
  870. {
  871. u32 iovp = (u32) SBA_IOVP(ioc,iova);
  872. int off = PDIR_INDEX(iovp);
  873. void *addr;
  874. if (size <= iovp_size) {
  875. addr = phys_to_virt(ioc->pdir_base[off] &
  876. ~0xE000000000000FFFULL);
  877. mark_clean(addr, size);
  878. } else {
  879. do {
  880. addr = phys_to_virt(ioc->pdir_base[off] &
  881. ~0xE000000000000FFFULL);
  882. mark_clean(addr, min(size, iovp_size));
  883. off++;
  884. size -= iovp_size;
  885. } while (size > 0);
  886. }
  887. }
  888. #endif
  889. /**
  890. * sba_unmap_single_attrs - unmap one IOVA and free resources
  891. * @dev: instance of PCI owned by the driver that's asking.
  892. * @iova: IOVA of driver buffer previously mapped.
  893. * @size: number of bytes mapped in driver buffer.
  894. * @dir: R/W or both.
  895. * @attrs: optional dma attributes
  896. *
  897. * See Documentation/PCI/PCI-DMA-mapping.txt
  898. */
  899. static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
  900. enum dma_data_direction dir, struct dma_attrs *attrs)
  901. {
  902. struct ioc *ioc;
  903. #if DELAYED_RESOURCE_CNT > 0
  904. struct sba_dma_pair *d;
  905. #endif
  906. unsigned long flags;
  907. dma_addr_t offset;
  908. ioc = GET_IOC(dev);
  909. ASSERT(ioc);
  910. #ifdef ALLOW_IOV_BYPASS
  911. if (likely((iova & ioc->imask) != ioc->ibase)) {
  912. /*
  913. ** Address does not fall w/in IOVA, must be bypassing
  914. */
  915. DBG_BYPASS("sba_unmap_single_atttrs() bypass addr: 0x%lx\n",
  916. iova);
  917. #ifdef ENABLE_MARK_CLEAN
  918. if (dir == DMA_FROM_DEVICE) {
  919. mark_clean(phys_to_virt(iova), size);
  920. }
  921. #endif
  922. return;
  923. }
  924. #endif
  925. offset = iova & ~iovp_mask;
  926. DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
  927. iova ^= offset; /* clear offset bits */
  928. size += offset;
  929. size = ROUNDUP(size, iovp_size);
  930. #ifdef ENABLE_MARK_CLEAN
  931. if (dir == DMA_FROM_DEVICE)
  932. sba_mark_clean(ioc, iova, size);
  933. #endif
  934. #if DELAYED_RESOURCE_CNT > 0
  935. spin_lock_irqsave(&ioc->saved_lock, flags);
  936. d = &(ioc->saved[ioc->saved_cnt]);
  937. d->iova = iova;
  938. d->size = size;
  939. if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
  940. int cnt = ioc->saved_cnt;
  941. spin_lock(&ioc->res_lock);
  942. while (cnt--) {
  943. sba_mark_invalid(ioc, d->iova, d->size);
  944. sba_free_range(ioc, d->iova, d->size);
  945. d--;
  946. }
  947. ioc->saved_cnt = 0;
  948. READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
  949. spin_unlock(&ioc->res_lock);
  950. }
  951. spin_unlock_irqrestore(&ioc->saved_lock, flags);
  952. #else /* DELAYED_RESOURCE_CNT == 0 */
  953. spin_lock_irqsave(&ioc->res_lock, flags);
  954. sba_mark_invalid(ioc, iova, size);
  955. sba_free_range(ioc, iova, size);
  956. READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
  957. spin_unlock_irqrestore(&ioc->res_lock, flags);
  958. #endif /* DELAYED_RESOURCE_CNT == 0 */
  959. }
  960. void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
  961. enum dma_data_direction dir, struct dma_attrs *attrs)
  962. {
  963. sba_unmap_page(dev, iova, size, dir, attrs);
  964. }
  965. /**
  966. * sba_alloc_coherent - allocate/map shared mem for DMA
  967. * @dev: instance of PCI owned by the driver that's asking.
  968. * @size: number of bytes mapped in driver buffer.
  969. * @dma_handle: IOVA of new buffer.
  970. *
  971. * See Documentation/PCI/PCI-DMA-mapping.txt
  972. */
  973. static void *
  974. sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
  975. {
  976. struct ioc *ioc;
  977. void *addr;
  978. ioc = GET_IOC(dev);
  979. ASSERT(ioc);
  980. #ifdef CONFIG_NUMA
  981. {
  982. struct page *page;
  983. page = alloc_pages_exact_node(ioc->node == MAX_NUMNODES ?
  984. numa_node_id() : ioc->node, flags,
  985. get_order(size));
  986. if (unlikely(!page))
  987. return NULL;
  988. addr = page_address(page);
  989. }
  990. #else
  991. addr = (void *) __get_free_pages(flags, get_order(size));
  992. #endif
  993. if (unlikely(!addr))
  994. return NULL;
  995. memset(addr, 0, size);
  996. *dma_handle = virt_to_phys(addr);
  997. #ifdef ALLOW_IOV_BYPASS
  998. ASSERT(dev->coherent_dma_mask);
  999. /*
  1000. ** Check if the PCI device can DMA to ptr... if so, just return ptr
  1001. */
  1002. if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {
  1003. DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
  1004. dev->coherent_dma_mask, *dma_handle);
  1005. return addr;
  1006. }
  1007. #endif
  1008. /*
  1009. * If device can't bypass or bypass is disabled, pass the 32bit fake
  1010. * device to map single to get an iova mapping.
  1011. */
  1012. *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
  1013. size, 0, NULL);
  1014. return addr;
  1015. }
  1016. /**
  1017. * sba_free_coherent - free/unmap shared mem for DMA
  1018. * @dev: instance of PCI owned by the driver that's asking.
  1019. * @size: number of bytes mapped in driver buffer.
  1020. * @vaddr: virtual address IOVA of "consistent" buffer.
  1021. * @dma_handler: IO virtual address of "consistent" buffer.
  1022. *
  1023. * See Documentation/PCI/PCI-DMA-mapping.txt
  1024. */
  1025. static void sba_free_coherent (struct device *dev, size_t size, void *vaddr,
  1026. dma_addr_t dma_handle)
  1027. {
  1028. sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
  1029. free_pages((unsigned long) vaddr, get_order(size));
  1030. }
  1031. /*
  1032. ** Since 0 is a valid pdir_base index value, can't use that
  1033. ** to determine if a value is valid or not. Use a flag to indicate
  1034. ** the SG list entry contains a valid pdir index.
  1035. */
  1036. #define PIDE_FLAG 0x1UL
  1037. #ifdef DEBUG_LARGE_SG_ENTRIES
  1038. int dump_run_sg = 0;
  1039. #endif
  1040. /**
  1041. * sba_fill_pdir - write allocated SG entries into IO PDIR
  1042. * @ioc: IO MMU structure which owns the pdir we are interested in.
  1043. * @startsg: list of IOVA/size pairs
  1044. * @nents: number of entries in startsg list
  1045. *
  1046. * Take preprocessed SG list and write corresponding entries
  1047. * in the IO PDIR.
  1048. */
  1049. static SBA_INLINE int
  1050. sba_fill_pdir(
  1051. struct ioc *ioc,
  1052. struct scatterlist *startsg,
  1053. int nents)
  1054. {
  1055. struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
  1056. int n_mappings = 0;
  1057. u64 *pdirp = NULL;
  1058. unsigned long dma_offset = 0;
  1059. while (nents-- > 0) {
  1060. int cnt = startsg->dma_length;
  1061. startsg->dma_length = 0;
  1062. #ifdef DEBUG_LARGE_SG_ENTRIES
  1063. if (dump_run_sg)
  1064. printk(" %2d : %08lx/%05x %p\n",
  1065. nents, startsg->dma_address, cnt,
  1066. sba_sg_address(startsg));
  1067. #else
  1068. DBG_RUN_SG(" %d : %08lx/%05x %p\n",
  1069. nents, startsg->dma_address, cnt,
  1070. sba_sg_address(startsg));
  1071. #endif
  1072. /*
  1073. ** Look for the start of a new DMA stream
  1074. */
  1075. if (startsg->dma_address & PIDE_FLAG) {
  1076. u32 pide = startsg->dma_address & ~PIDE_FLAG;
  1077. dma_offset = (unsigned long) pide & ~iovp_mask;
  1078. startsg->dma_address = 0;
  1079. if (n_mappings)
  1080. dma_sg = sg_next(dma_sg);
  1081. dma_sg->dma_address = pide | ioc->ibase;
  1082. pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
  1083. n_mappings++;
  1084. }
  1085. /*
  1086. ** Look for a VCONTIG chunk
  1087. */
  1088. if (cnt) {
  1089. unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
  1090. ASSERT(pdirp);
  1091. /* Since multiple Vcontig blocks could make up
  1092. ** one DMA stream, *add* cnt to dma_len.
  1093. */
  1094. dma_sg->dma_length += cnt;
  1095. cnt += dma_offset;
  1096. dma_offset=0; /* only want offset on first chunk */
  1097. cnt = ROUNDUP(cnt, iovp_size);
  1098. do {
  1099. sba_io_pdir_entry(pdirp, vaddr);
  1100. vaddr += iovp_size;
  1101. cnt -= iovp_size;
  1102. pdirp++;
  1103. } while (cnt > 0);
  1104. }
  1105. startsg = sg_next(startsg);
  1106. }
  1107. /* force pdir update */
  1108. wmb();
  1109. #ifdef DEBUG_LARGE_SG_ENTRIES
  1110. dump_run_sg = 0;
  1111. #endif
  1112. return(n_mappings);
  1113. }
  1114. /*
  1115. ** Two address ranges are DMA contiguous *iff* "end of prev" and
  1116. ** "start of next" are both on an IOV page boundary.
  1117. **
  1118. ** (shift left is a quick trick to mask off upper bits)
  1119. */
  1120. #define DMA_CONTIG(__X, __Y) \
  1121. (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
  1122. /**
  1123. * sba_coalesce_chunks - preprocess the SG list
  1124. * @ioc: IO MMU structure which owns the pdir we are interested in.
  1125. * @startsg: list of IOVA/size pairs
  1126. * @nents: number of entries in startsg list
  1127. *
  1128. * First pass is to walk the SG list and determine where the breaks are
  1129. * in the DMA stream. Allocates PDIR entries but does not fill them.
  1130. * Returns the number of DMA chunks.
  1131. *
  1132. * Doing the fill separate from the coalescing/allocation keeps the
  1133. * code simpler. Future enhancement could make one pass through
  1134. * the sglist do both.
  1135. */
  1136. static SBA_INLINE int
  1137. sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
  1138. struct scatterlist *startsg,
  1139. int nents)
  1140. {
  1141. struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
  1142. unsigned long vcontig_len; /* len of VCONTIG chunk */
  1143. unsigned long vcontig_end;
  1144. struct scatterlist *dma_sg; /* next DMA stream head */
  1145. unsigned long dma_offset, dma_len; /* start/len of DMA stream */
  1146. int n_mappings = 0;
  1147. unsigned int max_seg_size = dma_get_max_seg_size(dev);
  1148. int idx;
  1149. while (nents > 0) {
  1150. unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
  1151. /*
  1152. ** Prepare for first/next DMA stream
  1153. */
  1154. dma_sg = vcontig_sg = startsg;
  1155. dma_len = vcontig_len = vcontig_end = startsg->length;
  1156. vcontig_end += vaddr;
  1157. dma_offset = vaddr & ~iovp_mask;
  1158. /* PARANOID: clear entries */
  1159. startsg->dma_address = startsg->dma_length = 0;
  1160. /*
  1161. ** This loop terminates one iteration "early" since
  1162. ** it's always looking one "ahead".
  1163. */
  1164. while (--nents > 0) {
  1165. unsigned long vaddr; /* tmp */
  1166. startsg = sg_next(startsg);
  1167. /* PARANOID */
  1168. startsg->dma_address = startsg->dma_length = 0;
  1169. /* catch brokenness in SCSI layer */
  1170. ASSERT(startsg->length <= DMA_CHUNK_SIZE);
  1171. /*
  1172. ** First make sure current dma stream won't
  1173. ** exceed DMA_CHUNK_SIZE if we coalesce the
  1174. ** next entry.
  1175. */
  1176. if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)
  1177. > DMA_CHUNK_SIZE)
  1178. break;
  1179. if (dma_len + startsg->length > max_seg_size)
  1180. break;
  1181. /*
  1182. ** Then look for virtually contiguous blocks.
  1183. **
  1184. ** append the next transaction?
  1185. */
  1186. vaddr = (unsigned long) sba_sg_address(startsg);
  1187. if (vcontig_end == vaddr)
  1188. {
  1189. vcontig_len += startsg->length;
  1190. vcontig_end += startsg->length;
  1191. dma_len += startsg->length;
  1192. continue;
  1193. }
  1194. #ifdef DEBUG_LARGE_SG_ENTRIES
  1195. dump_run_sg = (vcontig_len > iovp_size);
  1196. #endif
  1197. /*
  1198. ** Not virtually contiguous.
  1199. ** Terminate prev chunk.
  1200. ** Start a new chunk.
  1201. **
  1202. ** Once we start a new VCONTIG chunk, dma_offset
  1203. ** can't change. And we need the offset from the first
  1204. ** chunk - not the last one. Ergo Successive chunks
  1205. ** must start on page boundaries and dove tail
  1206. ** with it's predecessor.
  1207. */
  1208. vcontig_sg->dma_length = vcontig_len;
  1209. vcontig_sg = startsg;
  1210. vcontig_len = startsg->length;
  1211. /*
  1212. ** 3) do the entries end/start on page boundaries?
  1213. ** Don't update vcontig_end until we've checked.
  1214. */
  1215. if (DMA_CONTIG(vcontig_end, vaddr))
  1216. {
  1217. vcontig_end = vcontig_len + vaddr;
  1218. dma_len += vcontig_len;
  1219. continue;
  1220. } else {
  1221. break;
  1222. }
  1223. }
  1224. /*
  1225. ** End of DMA Stream
  1226. ** Terminate last VCONTIG block.
  1227. ** Allocate space for DMA stream.
  1228. */
  1229. vcontig_sg->dma_length = vcontig_len;
  1230. dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
  1231. ASSERT(dma_len <= DMA_CHUNK_SIZE);
  1232. idx = sba_alloc_range(ioc, dev, dma_len);
  1233. if (idx < 0) {
  1234. dma_sg->dma_length = 0;
  1235. return -1;
  1236. }
  1237. dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
  1238. | dma_offset);
  1239. n_mappings++;
  1240. }
  1241. return n_mappings;
  1242. }
  1243. static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
  1244. int nents, enum dma_data_direction dir,
  1245. struct dma_attrs *attrs);
  1246. /**
  1247. * sba_map_sg - map Scatter/Gather list
  1248. * @dev: instance of PCI owned by the driver that's asking.
  1249. * @sglist: array of buffer/length pairs
  1250. * @nents: number of entries in list
  1251. * @dir: R/W or both.
  1252. * @attrs: optional dma attributes
  1253. *
  1254. * See Documentation/PCI/PCI-DMA-mapping.txt
  1255. */
  1256. static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
  1257. int nents, enum dma_data_direction dir,
  1258. struct dma_attrs *attrs)
  1259. {
  1260. struct ioc *ioc;
  1261. int coalesced, filled = 0;
  1262. #ifdef ASSERT_PDIR_SANITY
  1263. unsigned long flags;
  1264. #endif
  1265. #ifdef ALLOW_IOV_BYPASS_SG
  1266. struct scatterlist *sg;
  1267. #endif
  1268. DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
  1269. ioc = GET_IOC(dev);
  1270. ASSERT(ioc);
  1271. #ifdef ALLOW_IOV_BYPASS_SG
  1272. ASSERT(to_pci_dev(dev)->dma_mask);
  1273. if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
  1274. for_each_sg(sglist, sg, nents, filled) {
  1275. sg->dma_length = sg->length;
  1276. sg->dma_address = virt_to_phys(sba_sg_address(sg));
  1277. }
  1278. return filled;
  1279. }
  1280. #endif
  1281. /* Fast path single entry scatterlists. */
  1282. if (nents == 1) {
  1283. sglist->dma_length = sglist->length;
  1284. sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
  1285. return 1;
  1286. }
  1287. #ifdef ASSERT_PDIR_SANITY
  1288. spin_lock_irqsave(&ioc->res_lock, flags);
  1289. if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
  1290. {
  1291. sba_dump_sg(ioc, sglist, nents);
  1292. panic("Check before sba_map_sg_attrs()");
  1293. }
  1294. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1295. #endif
  1296. prefetch(ioc->res_hint);
  1297. /*
  1298. ** First coalesce the chunks and allocate I/O pdir space
  1299. **
  1300. ** If this is one DMA stream, we can properly map using the
  1301. ** correct virtual address associated with each DMA page.
  1302. ** w/o this association, we wouldn't have coherent DMA!
  1303. ** Access to the virtual address is what forces a two pass algorithm.
  1304. */
  1305. coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
  1306. if (coalesced < 0) {
  1307. sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
  1308. return 0;
  1309. }
  1310. /*
  1311. ** Program the I/O Pdir
  1312. **
  1313. ** map the virtual addresses to the I/O Pdir
  1314. ** o dma_address will contain the pdir index
  1315. ** o dma_len will contain the number of bytes to map
  1316. ** o address contains the virtual address.
  1317. */
  1318. filled = sba_fill_pdir(ioc, sglist, nents);
  1319. #ifdef ASSERT_PDIR_SANITY
  1320. spin_lock_irqsave(&ioc->res_lock, flags);
  1321. if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
  1322. {
  1323. sba_dump_sg(ioc, sglist, nents);
  1324. panic("Check after sba_map_sg_attrs()\n");
  1325. }
  1326. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1327. #endif
  1328. ASSERT(coalesced == filled);
  1329. DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
  1330. return filled;
  1331. }
  1332. /**
  1333. * sba_unmap_sg_attrs - unmap Scatter/Gather list
  1334. * @dev: instance of PCI owned by the driver that's asking.
  1335. * @sglist: array of buffer/length pairs
  1336. * @nents: number of entries in list
  1337. * @dir: R/W or both.
  1338. * @attrs: optional dma attributes
  1339. *
  1340. * See Documentation/PCI/PCI-DMA-mapping.txt
  1341. */
  1342. static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
  1343. int nents, enum dma_data_direction dir,
  1344. struct dma_attrs *attrs)
  1345. {
  1346. #ifdef ASSERT_PDIR_SANITY
  1347. struct ioc *ioc;
  1348. unsigned long flags;
  1349. #endif
  1350. DBG_RUN_SG("%s() START %d entries, %p,%x\n",
  1351. __func__, nents, sba_sg_address(sglist), sglist->length);
  1352. #ifdef ASSERT_PDIR_SANITY
  1353. ioc = GET_IOC(dev);
  1354. ASSERT(ioc);
  1355. spin_lock_irqsave(&ioc->res_lock, flags);
  1356. sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
  1357. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1358. #endif
  1359. while (nents && sglist->dma_length) {
  1360. sba_unmap_single_attrs(dev, sglist->dma_address,
  1361. sglist->dma_length, dir, attrs);
  1362. sglist = sg_next(sglist);
  1363. nents--;
  1364. }
  1365. DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
  1366. #ifdef ASSERT_PDIR_SANITY
  1367. spin_lock_irqsave(&ioc->res_lock, flags);
  1368. sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
  1369. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1370. #endif
  1371. }
  1372. /**************************************************************
  1373. *
  1374. * Initialization and claim
  1375. *
  1376. ***************************************************************/
  1377. static void __init
  1378. ioc_iova_init(struct ioc *ioc)
  1379. {
  1380. int tcnfg;
  1381. int agp_found = 0;
  1382. struct pci_dev *device = NULL;
  1383. #ifdef FULL_VALID_PDIR
  1384. unsigned long index;
  1385. #endif
  1386. /*
  1387. ** Firmware programs the base and size of a "safe IOVA space"
  1388. ** (one that doesn't overlap memory or LMMIO space) in the
  1389. ** IBASE and IMASK registers.
  1390. */
  1391. ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
  1392. ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
  1393. ioc->iov_size = ~ioc->imask + 1;
  1394. DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
  1395. __func__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
  1396. ioc->iov_size >> 20);
  1397. switch (iovp_size) {
  1398. case 4*1024: tcnfg = 0; break;
  1399. case 8*1024: tcnfg = 1; break;
  1400. case 16*1024: tcnfg = 2; break;
  1401. case 64*1024: tcnfg = 3; break;
  1402. default:
  1403. panic(PFX "Unsupported IOTLB page size %ldK",
  1404. iovp_size >> 10);
  1405. break;
  1406. }
  1407. WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
  1408. ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
  1409. ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
  1410. get_order(ioc->pdir_size));
  1411. if (!ioc->pdir_base)
  1412. panic(PFX "Couldn't allocate I/O Page Table\n");
  1413. memset(ioc->pdir_base, 0, ioc->pdir_size);
  1414. DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__,
  1415. iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
  1416. ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
  1417. WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
  1418. /*
  1419. ** If an AGP device is present, only use half of the IOV space
  1420. ** for PCI DMA. Unfortunately we can't know ahead of time
  1421. ** whether GART support will actually be used, for now we
  1422. ** can just key on an AGP device found in the system.
  1423. ** We program the next pdir index after we stop w/ a key for
  1424. ** the GART code to handshake on.
  1425. */
  1426. for_each_pci_dev(device)
  1427. agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
  1428. if (agp_found && reserve_sba_gart) {
  1429. printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",
  1430. ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
  1431. ioc->pdir_size /= 2;
  1432. ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
  1433. }
  1434. #ifdef FULL_VALID_PDIR
  1435. /*
  1436. ** Check to see if the spill page has been allocated, we don't need more than
  1437. ** one across multiple SBAs.
  1438. */
  1439. if (!prefetch_spill_page) {
  1440. char *spill_poison = "SBAIOMMU POISON";
  1441. int poison_size = 16;
  1442. void *poison_addr, *addr;
  1443. addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));
  1444. if (!addr)
  1445. panic(PFX "Couldn't allocate PDIR spill page\n");
  1446. poison_addr = addr;
  1447. for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
  1448. memcpy(poison_addr, spill_poison, poison_size);
  1449. prefetch_spill_page = virt_to_phys(addr);
  1450. DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page);
  1451. }
  1452. /*
  1453. ** Set all the PDIR entries valid w/ the spill page as the target
  1454. */
  1455. for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
  1456. ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
  1457. #endif
  1458. /* Clear I/O TLB of any possible entries */
  1459. WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
  1460. READ_REG(ioc->ioc_hpa + IOC_PCOM);
  1461. /* Enable IOVA translation */
  1462. WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
  1463. READ_REG(ioc->ioc_hpa + IOC_IBASE);
  1464. }
  1465. static void __init
  1466. ioc_resource_init(struct ioc *ioc)
  1467. {
  1468. spin_lock_init(&ioc->res_lock);
  1469. #if DELAYED_RESOURCE_CNT > 0
  1470. spin_lock_init(&ioc->saved_lock);
  1471. #endif
  1472. /* resource map size dictated by pdir_size */
  1473. ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
  1474. ioc->res_size >>= 3; /* convert bit count to byte count */
  1475. DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
  1476. ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
  1477. get_order(ioc->res_size));
  1478. if (!ioc->res_map)
  1479. panic(PFX "Couldn't allocate resource map\n");
  1480. memset(ioc->res_map, 0, ioc->res_size);
  1481. /* next available IOVP - circular search */
  1482. ioc->res_hint = (unsigned long *) ioc->res_map;
  1483. #ifdef ASSERT_PDIR_SANITY
  1484. /* Mark first bit busy - ie no IOVA 0 */
  1485. ioc->res_map[0] = 0x1;
  1486. ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
  1487. #endif
  1488. #ifdef FULL_VALID_PDIR
  1489. /* Mark the last resource used so we don't prefetch beyond IOVA space */
  1490. ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
  1491. ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
  1492. | prefetch_spill_page);
  1493. #endif
  1494. DBG_INIT("%s() res_map %x %p\n", __func__,
  1495. ioc->res_size, (void *) ioc->res_map);
  1496. }
  1497. static void __init
  1498. ioc_sac_init(struct ioc *ioc)
  1499. {
  1500. struct pci_dev *sac = NULL;
  1501. struct pci_controller *controller = NULL;
  1502. /*
  1503. * pci_alloc_coherent() must return a DMA address which is
  1504. * SAC (single address cycle) addressable, so allocate a
  1505. * pseudo-device to enforce that.
  1506. */
  1507. sac = kzalloc(sizeof(*sac), GFP_KERNEL);
  1508. if (!sac)
  1509. panic(PFX "Couldn't allocate struct pci_dev");
  1510. controller = kzalloc(sizeof(*controller), GFP_KERNEL);
  1511. if (!controller)
  1512. panic(PFX "Couldn't allocate struct pci_controller");
  1513. controller->iommu = ioc;
  1514. sac->sysdata = controller;
  1515. sac->dma_mask = 0xFFFFFFFFUL;
  1516. #ifdef CONFIG_PCI
  1517. sac->dev.bus = &pci_bus_type;
  1518. #endif
  1519. ioc->sac_only_dev = sac;
  1520. }
  1521. static void __init
  1522. ioc_zx1_init(struct ioc *ioc)
  1523. {
  1524. unsigned long rope_config;
  1525. unsigned int i;
  1526. if (ioc->rev < 0x20)
  1527. panic(PFX "IOC 2.0 or later required for IOMMU support\n");
  1528. /* 38 bit memory controller + extra bit for range displaced by MMIO */
  1529. ioc->dma_mask = (0x1UL << 39) - 1;
  1530. /*
  1531. ** Clear ROPE(N)_CONFIG AO bit.
  1532. ** Disables "NT Ordering" (~= !"Relaxed Ordering")
  1533. ** Overrides bit 1 in DMA Hint Sets.
  1534. ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
  1535. */
  1536. for (i=0; i<(8*8); i+=8) {
  1537. rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
  1538. rope_config &= ~IOC_ROPE_AO;
  1539. WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
  1540. }
  1541. }
  1542. typedef void (initfunc)(struct ioc *);
  1543. struct ioc_iommu {
  1544. u32 func_id;
  1545. char *name;
  1546. initfunc *init;
  1547. };
  1548. static struct ioc_iommu ioc_iommu_info[] __initdata = {
  1549. { ZX1_IOC_ID, "zx1", ioc_zx1_init },
  1550. { ZX2_IOC_ID, "zx2", NULL },
  1551. { SX1000_IOC_ID, "sx1000", NULL },
  1552. { SX2000_IOC_ID, "sx2000", NULL },
  1553. };
  1554. static struct ioc * __init
  1555. ioc_init(unsigned long hpa, void *handle)
  1556. {
  1557. struct ioc *ioc;
  1558. struct ioc_iommu *info;
  1559. ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
  1560. if (!ioc)
  1561. return NULL;
  1562. ioc->next = ioc_list;
  1563. ioc_list = ioc;
  1564. ioc->handle = handle;
  1565. ioc->ioc_hpa = ioremap(hpa, 0x1000);
  1566. ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
  1567. ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
  1568. ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */
  1569. for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
  1570. if (ioc->func_id == info->func_id) {
  1571. ioc->name = info->name;
  1572. if (info->init)
  1573. (info->init)(ioc);
  1574. }
  1575. }
  1576. iovp_size = (1 << iovp_shift);
  1577. iovp_mask = ~(iovp_size - 1);
  1578. DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__,
  1579. PAGE_SIZE >> 10, iovp_size >> 10);
  1580. if (!ioc->name) {
  1581. ioc->name = kmalloc(24, GFP_KERNEL);
  1582. if (ioc->name)
  1583. sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
  1584. ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
  1585. else
  1586. ioc->name = "Unknown";
  1587. }
  1588. ioc_iova_init(ioc);
  1589. ioc_resource_init(ioc);
  1590. ioc_sac_init(ioc);
  1591. if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask)
  1592. ia64_max_iommu_merge_mask = ~iovp_mask;
  1593. printk(KERN_INFO PFX
  1594. "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
  1595. ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
  1596. hpa, ioc->iov_size >> 20, ioc->ibase);
  1597. return ioc;
  1598. }
  1599. /**************************************************************************
  1600. **
  1601. ** SBA initialization code (HW and SW)
  1602. **
  1603. ** o identify SBA chip itself
  1604. ** o FIXME: initialize DMA hints for reasonable defaults
  1605. **
  1606. **************************************************************************/
  1607. #ifdef CONFIG_PROC_FS
  1608. static void *
  1609. ioc_start(struct seq_file *s, loff_t *pos)
  1610. {
  1611. struct ioc *ioc;
  1612. loff_t n = *pos;
  1613. for (ioc = ioc_list; ioc; ioc = ioc->next)
  1614. if (!n--)
  1615. return ioc;
  1616. return NULL;
  1617. }
  1618. static void *
  1619. ioc_next(struct seq_file *s, void *v, loff_t *pos)
  1620. {
  1621. struct ioc *ioc = v;
  1622. ++*pos;
  1623. return ioc->next;
  1624. }
  1625. static void
  1626. ioc_stop(struct seq_file *s, void *v)
  1627. {
  1628. }
  1629. static int
  1630. ioc_show(struct seq_file *s, void *v)
  1631. {
  1632. struct ioc *ioc = v;
  1633. unsigned long *res_ptr = (unsigned long *)ioc->res_map;
  1634. int i, used = 0;
  1635. seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
  1636. ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
  1637. #ifdef CONFIG_NUMA
  1638. if (ioc->node != MAX_NUMNODES)
  1639. seq_printf(s, "NUMA node : %d\n", ioc->node);
  1640. #endif
  1641. seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
  1642. seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024);
  1643. for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
  1644. used += hweight64(*res_ptr);
  1645. seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3);
  1646. seq_printf(s, "PDIR used : %d entries\n", used);
  1647. #ifdef PDIR_SEARCH_TIMING
  1648. {
  1649. unsigned long i = 0, avg = 0, min, max;
  1650. min = max = ioc->avg_search[0];
  1651. for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
  1652. avg += ioc->avg_search[i];
  1653. if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
  1654. if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
  1655. }
  1656. avg /= SBA_SEARCH_SAMPLE;
  1657. seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
  1658. min, avg, max);
  1659. }
  1660. #endif
  1661. #ifndef ALLOW_IOV_BYPASS
  1662. seq_printf(s, "IOVA bypass disabled\n");
  1663. #endif
  1664. return 0;
  1665. }
  1666. static const struct seq_operations ioc_seq_ops = {
  1667. .start = ioc_start,
  1668. .next = ioc_next,
  1669. .stop = ioc_stop,
  1670. .show = ioc_show
  1671. };
  1672. static int
  1673. ioc_open(struct inode *inode, struct file *file)
  1674. {
  1675. return seq_open(file, &ioc_seq_ops);
  1676. }
  1677. static const struct file_operations ioc_fops = {
  1678. .open = ioc_open,
  1679. .read = seq_read,
  1680. .llseek = seq_lseek,
  1681. .release = seq_release
  1682. };
  1683. static void __init
  1684. ioc_proc_init(void)
  1685. {
  1686. struct proc_dir_entry *dir;
  1687. dir = proc_mkdir("bus/mckinley", NULL);
  1688. if (!dir)
  1689. return;
  1690. proc_create(ioc_list->name, 0, dir, &ioc_fops);
  1691. }
  1692. #endif
  1693. static void
  1694. sba_connect_bus(struct pci_bus *bus)
  1695. {
  1696. acpi_handle handle, parent;
  1697. acpi_status status;
  1698. struct ioc *ioc;
  1699. if (!PCI_CONTROLLER(bus))
  1700. panic(PFX "no sysdata on bus %d!\n", bus->number);
  1701. if (PCI_CONTROLLER(bus)->iommu)
  1702. return;
  1703. handle = PCI_CONTROLLER(bus)->acpi_handle;
  1704. if (!handle)
  1705. return;
  1706. /*
  1707. * The IOC scope encloses PCI root bridges in the ACPI
  1708. * namespace, so work our way out until we find an IOC we
  1709. * claimed previously.
  1710. */
  1711. do {
  1712. for (ioc = ioc_list; ioc; ioc = ioc->next)
  1713. if (ioc->handle == handle) {
  1714. PCI_CONTROLLER(bus)->iommu = ioc;
  1715. return;
  1716. }
  1717. status = acpi_get_parent(handle, &parent);
  1718. handle = parent;
  1719. } while (ACPI_SUCCESS(status));
  1720. printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number);
  1721. }
  1722. #ifdef CONFIG_NUMA
  1723. static void __init
  1724. sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
  1725. {
  1726. unsigned int node;
  1727. int pxm;
  1728. ioc->node = MAX_NUMNODES;
  1729. pxm = acpi_get_pxm(handle);
  1730. if (pxm < 0)
  1731. return;
  1732. node = pxm_to_node(pxm);
  1733. if (node >= MAX_NUMNODES || !node_online(node))
  1734. return;
  1735. ioc->node = node;
  1736. return;
  1737. }
  1738. #else
  1739. #define sba_map_ioc_to_node(ioc, handle)
  1740. #endif
  1741. static int __init
  1742. acpi_sba_ioc_add(struct acpi_device *device)
  1743. {
  1744. struct ioc *ioc;
  1745. acpi_status status;
  1746. u64 hpa, length;
  1747. struct acpi_device_info *adi;
  1748. status = hp_acpi_csr_space(device->handle, &hpa, &length);
  1749. if (ACPI_FAILURE(status))
  1750. return 1;
  1751. status = acpi_get_object_info(device->handle, &adi);
  1752. if (ACPI_FAILURE(status))
  1753. return 1;
  1754. /*
  1755. * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
  1756. * root bridges, and its CSR space includes the IOC function.
  1757. */
  1758. if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) {
  1759. hpa += ZX1_IOC_OFFSET;
  1760. /* zx1 based systems default to kernel page size iommu pages */
  1761. if (!iovp_shift)
  1762. iovp_shift = min(PAGE_SHIFT, 16);
  1763. }
  1764. kfree(adi);
  1765. /*
  1766. * default anything not caught above or specified on cmdline to 4k
  1767. * iommu page size
  1768. */
  1769. if (!iovp_shift)
  1770. iovp_shift = 12;
  1771. ioc = ioc_init(hpa, device->handle);
  1772. if (!ioc)
  1773. return 1;
  1774. /* setup NUMA node association */
  1775. sba_map_ioc_to_node(ioc, device->handle);
  1776. return 0;
  1777. }
  1778. static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
  1779. {"HWP0001", 0},
  1780. {"HWP0004", 0},
  1781. {"", 0},
  1782. };
  1783. static struct acpi_driver acpi_sba_ioc_driver = {
  1784. .name = "IOC IOMMU Driver",
  1785. .ids = hp_ioc_iommu_device_ids,
  1786. .ops = {
  1787. .add = acpi_sba_ioc_add,
  1788. },
  1789. };
  1790. extern struct dma_map_ops swiotlb_dma_ops;
  1791. static int __init
  1792. sba_init(void)
  1793. {
  1794. if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
  1795. return 0;
  1796. #if defined(CONFIG_IA64_GENERIC)
  1797. /* If we are booting a kdump kernel, the sba_iommu will
  1798. * cause devices that were not shutdown properly to MCA
  1799. * as soon as they are turned back on. Our only option for
  1800. * a successful kdump kernel boot is to use the swiotlb.
  1801. */
  1802. if (is_kdump_kernel()) {
  1803. dma_ops = &swiotlb_dma_ops;
  1804. if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
  1805. panic("Unable to initialize software I/O TLB:"
  1806. " Try machvec=dig boot option");
  1807. machvec_init("dig");
  1808. return 0;
  1809. }
  1810. #endif
  1811. acpi_bus_register_driver(&acpi_sba_ioc_driver);
  1812. if (!ioc_list) {
  1813. #ifdef CONFIG_IA64_GENERIC
  1814. /*
  1815. * If we didn't find something sba_iommu can claim, we
  1816. * need to setup the swiotlb and switch to the dig machvec.
  1817. */
  1818. dma_ops = &swiotlb_dma_ops;
  1819. if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
  1820. panic("Unable to find SBA IOMMU or initialize "
  1821. "software I/O TLB: Try machvec=dig boot option");
  1822. machvec_init("dig");
  1823. #else
  1824. panic("Unable to find SBA IOMMU: Try a generic or DIG kernel");
  1825. #endif
  1826. return 0;
  1827. }
  1828. #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
  1829. /*
  1830. * hpzx1_swiotlb needs to have a fairly small swiotlb bounce
  1831. * buffer setup to support devices with smaller DMA masks than
  1832. * sba_iommu can handle.
  1833. */
  1834. if (ia64_platform_is("hpzx1_swiotlb")) {
  1835. extern void hwsw_init(void);
  1836. hwsw_init();
  1837. }
  1838. #endif
  1839. #ifdef CONFIG_PCI
  1840. {
  1841. struct pci_bus *b = NULL;
  1842. while ((b = pci_find_next_bus(b)) != NULL)
  1843. sba_connect_bus(b);
  1844. }
  1845. #endif
  1846. #ifdef CONFIG_PROC_FS
  1847. ioc_proc_init();
  1848. #endif
  1849. return 0;
  1850. }
  1851. subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
  1852. static int __init
  1853. nosbagart(char *str)
  1854. {
  1855. reserve_sba_gart = 0;
  1856. return 1;
  1857. }
  1858. static int sba_dma_supported (struct device *dev, u64 mask)
  1859. {
  1860. /* make sure it's at least 32bit capable */
  1861. return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
  1862. }
  1863. static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  1864. {
  1865. return 0;
  1866. }
  1867. __setup("nosbagart", nosbagart);
  1868. static int __init
  1869. sba_page_override(char *str)
  1870. {
  1871. unsigned long page_size;
  1872. page_size = memparse(str, &str);
  1873. switch (page_size) {
  1874. case 4096:
  1875. case 8192:
  1876. case 16384:
  1877. case 65536:
  1878. iovp_shift = ffs(page_size) - 1;
  1879. break;
  1880. default:
  1881. printk("%s: unknown/unsupported iommu page size %ld\n",
  1882. __func__, page_size);
  1883. }
  1884. return 1;
  1885. }
  1886. __setup("sbapagesize=",sba_page_override);
  1887. struct dma_map_ops sba_dma_ops = {
  1888. .alloc_coherent = sba_alloc_coherent,
  1889. .free_coherent = sba_free_coherent,
  1890. .map_page = sba_map_page,
  1891. .unmap_page = sba_unmap_page,
  1892. .map_sg = sba_map_sg_attrs,
  1893. .unmap_sg = sba_unmap_sg_attrs,
  1894. .sync_single_for_cpu = machvec_dma_sync_single,
  1895. .sync_sg_for_cpu = machvec_dma_sync_sg,
  1896. .sync_single_for_device = machvec_dma_sync_single,
  1897. .sync_sg_for_device = machvec_dma_sync_sg,
  1898. .dma_supported = sba_dma_supported,
  1899. .mapping_error = sba_dma_mapping_error,
  1900. };
  1901. void sba_dma_init(void)
  1902. {
  1903. dma_ops = &sba_dma_ops;
  1904. }