sba_iommu.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183
  1. /*
  2. ** IA64 System Bus Adapter (SBA) I/O MMU manager
  3. **
  4. ** (c) Copyright 2002-2005 Alex Williamson
  5. ** (c) Copyright 2002-2003 Grant Grundler
  6. ** (c) Copyright 2002-2005 Hewlett-Packard Company
  7. **
  8. ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
  9. ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
  10. **
  11. ** This program is free software; you can redistribute it and/or modify
  12. ** it under the terms of the GNU General Public License as published by
  13. ** the Free Software Foundation; either version 2 of the License, or
  14. ** (at your option) any later version.
  15. **
  16. **
  17. ** This module initializes the IOC (I/O Controller) found on HP
  18. ** McKinley machines and their successors.
  19. **
  20. */
  21. #include <linux/types.h>
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/slab.h>
  26. #include <linux/init.h>
  27. #include <linux/mm.h>
  28. #include <linux/string.h>
  29. #include <linux/pci.h>
  30. #include <linux/proc_fs.h>
  31. #include <linux/seq_file.h>
  32. #include <linux/acpi.h>
  33. #include <linux/efi.h>
  34. #include <linux/nodemask.h>
  35. #include <linux/bitops.h> /* hweight64() */
  36. #include <linux/crash_dump.h>
  37. #include <linux/iommu-helper.h>
  38. #include <asm/delay.h> /* ia64_get_itc() */
  39. #include <asm/io.h>
  40. #include <asm/page.h> /* PAGE_OFFSET */
  41. #include <asm/dma.h>
  42. #include <asm/system.h> /* wmb() */
  43. #include <asm/acpi-ext.h>
  44. extern int swiotlb_late_init_with_default_size (size_t size);
  45. #define PFX "IOC: "
  46. /*
  47. ** Enabling timing search of the pdir resource map. Output in /proc.
  48. ** Disabled by default to optimize performance.
  49. */
  50. #undef PDIR_SEARCH_TIMING
  51. /*
  52. ** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
  53. ** not defined, all DMA will be 32bit and go through the TLB.
  54. ** There's potentially a conflict in the bio merge code with us
  55. ** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
  56. ** appears to give more performance than bio-level virtual merging, we'll
  57. ** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to
  58. ** completely restrict DMA to the IOMMU.
  59. */
  60. #define ALLOW_IOV_BYPASS
  61. /*
  62. ** This option specifically allows/disallows bypassing scatterlists with
  63. ** multiple entries. Coalescing these entries can allow better DMA streaming
  64. ** and in some cases shows better performance than entirely bypassing the
  65. ** IOMMU. Performance increase on the order of 1-2% sequential output/input
  66. ** using bonnie++ on a RAID0 MD device (sym2 & mpt).
  67. */
  68. #undef ALLOW_IOV_BYPASS_SG
  69. /*
  70. ** If a device prefetches beyond the end of a valid pdir entry, it will cause
  71. ** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
  72. ** disconnect on 4k boundaries and prevent such issues. If the device is
  73. ** particularly aggressive, this option will keep the entire pdir valid such
  74. ** that prefetching will hit a valid address. This could severely impact
  75. ** error containment, and is therefore off by default. The page that is
  76. ** used for spill-over is poisoned, so that should help debugging somewhat.
  77. */
  78. #undef FULL_VALID_PDIR
  79. #define ENABLE_MARK_CLEAN
  80. /*
  81. ** The number of debug flags is a clue - this code is fragile. NOTE: since
  82. ** tightening the use of res_lock the resource bitmap and actual pdir are no
  83. ** longer guaranteed to stay in sync. The sanity checking code isn't going to
  84. ** like that.
  85. */
  86. #undef DEBUG_SBA_INIT
  87. #undef DEBUG_SBA_RUN
  88. #undef DEBUG_SBA_RUN_SG
  89. #undef DEBUG_SBA_RESOURCE
  90. #undef ASSERT_PDIR_SANITY
  91. #undef DEBUG_LARGE_SG_ENTRIES
  92. #undef DEBUG_BYPASS
  93. #if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
  94. #error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
  95. #endif
  96. #define SBA_INLINE __inline__
  97. /* #define SBA_INLINE */
  98. #ifdef DEBUG_SBA_INIT
  99. #define DBG_INIT(x...) printk(x)
  100. #else
  101. #define DBG_INIT(x...)
  102. #endif
  103. #ifdef DEBUG_SBA_RUN
  104. #define DBG_RUN(x...) printk(x)
  105. #else
  106. #define DBG_RUN(x...)
  107. #endif
  108. #ifdef DEBUG_SBA_RUN_SG
  109. #define DBG_RUN_SG(x...) printk(x)
  110. #else
  111. #define DBG_RUN_SG(x...)
  112. #endif
  113. #ifdef DEBUG_SBA_RESOURCE
  114. #define DBG_RES(x...) printk(x)
  115. #else
  116. #define DBG_RES(x...)
  117. #endif
  118. #ifdef DEBUG_BYPASS
  119. #define DBG_BYPASS(x...) printk(x)
  120. #else
  121. #define DBG_BYPASS(x...)
  122. #endif
  123. #ifdef ASSERT_PDIR_SANITY
  124. #define ASSERT(expr) \
  125. if(!(expr)) { \
  126. printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
  127. panic(#expr); \
  128. }
  129. #else
  130. #define ASSERT(expr)
  131. #endif
  132. /*
  133. ** The number of pdir entries to "free" before issuing
  134. ** a read to PCOM register to flush out PCOM writes.
  135. ** Interacts with allocation granularity (ie 4 or 8 entries
  136. ** allocated and free'd/purged at a time might make this
  137. ** less interesting).
  138. */
  139. #define DELAYED_RESOURCE_CNT 64
  140. #define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec
  141. #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
  142. #define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
  143. #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
  144. #define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
  145. #define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
  146. #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
  147. #define IOC_FUNC_ID 0x000
  148. #define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
  149. #define IOC_IBASE 0x300 /* IO TLB */
  150. #define IOC_IMASK 0x308
  151. #define IOC_PCOM 0x310
  152. #define IOC_TCNFG 0x318
  153. #define IOC_PDIR_BASE 0x320
  154. #define IOC_ROPE0_CFG 0x500
  155. #define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
  156. /* AGP GART driver looks for this */
  157. #define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
  158. /*
  159. ** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
  160. **
  161. ** Some IOCs (sx1000) can run at the above pages sizes, but are
  162. ** really only supported using the IOC at a 4k page size.
  163. **
  164. ** iovp_size could only be greater than PAGE_SIZE if we are
  165. ** confident the drivers really only touch the next physical
  166. ** page iff that driver instance owns it.
  167. */
  168. static unsigned long iovp_size;
  169. static unsigned long iovp_shift;
  170. static unsigned long iovp_mask;
  171. struct ioc {
  172. void __iomem *ioc_hpa; /* I/O MMU base address */
  173. char *res_map; /* resource map, bit == pdir entry */
  174. u64 *pdir_base; /* physical base address */
  175. unsigned long ibase; /* pdir IOV Space base */
  176. unsigned long imask; /* pdir IOV Space mask */
  177. unsigned long *res_hint; /* next avail IOVP - circular search */
  178. unsigned long dma_mask;
  179. spinlock_t res_lock; /* protects the resource bitmap, but must be held when */
  180. /* clearing pdir to prevent races with allocations. */
  181. unsigned int res_bitshift; /* from the RIGHT! */
  182. unsigned int res_size; /* size of resource map in bytes */
  183. #ifdef CONFIG_NUMA
  184. unsigned int node; /* node where this IOC lives */
  185. #endif
  186. #if DELAYED_RESOURCE_CNT > 0
  187. spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */
  188. /* than res_lock for bigger systems. */
  189. int saved_cnt;
  190. struct sba_dma_pair {
  191. dma_addr_t iova;
  192. size_t size;
  193. } saved[DELAYED_RESOURCE_CNT];
  194. #endif
  195. #ifdef PDIR_SEARCH_TIMING
  196. #define SBA_SEARCH_SAMPLE 0x100
  197. unsigned long avg_search[SBA_SEARCH_SAMPLE];
  198. unsigned long avg_idx; /* current index into avg_search */
  199. #endif
  200. /* Stuff we don't need in performance path */
  201. struct ioc *next; /* list of IOC's in system */
  202. acpi_handle handle; /* for multiple IOC's */
  203. const char *name;
  204. unsigned int func_id;
  205. unsigned int rev; /* HW revision of chip */
  206. u32 iov_size;
  207. unsigned int pdir_size; /* in bytes, determined by IOV Space size */
  208. struct pci_dev *sac_only_dev;
  209. };
  210. static struct ioc *ioc_list;
  211. static int reserve_sba_gart = 1;
  212. static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
  213. static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
  214. #define sba_sg_address(sg) sg_virt((sg))
  215. #ifdef FULL_VALID_PDIR
  216. static u64 prefetch_spill_page;
  217. #endif
  218. #ifdef CONFIG_PCI
  219. # define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \
  220. ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
  221. #else
  222. # define GET_IOC(dev) NULL
  223. #endif
  224. /*
  225. ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
  226. ** (or rather not merge) DMAs into manageable chunks.
  227. ** On parisc, this is more of the software/tuning constraint
  228. ** rather than the HW. I/O MMU allocation algorithms can be
  229. ** faster with smaller sizes (to some degree).
  230. */
  231. #define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
  232. #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
  233. /************************************
  234. ** SBA register read and write support
  235. **
  236. ** BE WARNED: register writes are posted.
  237. ** (ie follow writes which must reach HW with a read)
  238. **
  239. */
  240. #define READ_REG(addr) __raw_readq(addr)
  241. #define WRITE_REG(val, addr) __raw_writeq(val, addr)
  242. #ifdef DEBUG_SBA_INIT
  243. /**
  244. * sba_dump_tlb - debugging only - print IOMMU operating parameters
  245. * @hpa: base address of the IOMMU
  246. *
  247. * Print the size/location of the IO MMU PDIR.
  248. */
  249. static void
  250. sba_dump_tlb(char *hpa)
  251. {
  252. DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
  253. DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE));
  254. DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK));
  255. DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG));
  256. DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
  257. DBG_INIT("\n");
  258. }
  259. #endif
  260. #ifdef ASSERT_PDIR_SANITY
  261. /**
  262. * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
  263. * @ioc: IO MMU structure which owns the pdir we are interested in.
  264. * @msg: text to print ont the output line.
  265. * @pide: pdir index.
  266. *
  267. * Print one entry of the IO MMU PDIR in human readable form.
  268. */
  269. static void
  270. sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
  271. {
  272. /* start printing from lowest pde in rval */
  273. u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)];
  274. unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
  275. uint rcnt;
  276. printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
  277. msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
  278. rcnt = 0;
  279. while (rcnt < BITS_PER_LONG) {
  280. printk(KERN_DEBUG "%s %2d %p %016Lx\n",
  281. (rcnt == (pide & (BITS_PER_LONG - 1)))
  282. ? " -->" : " ",
  283. rcnt, ptr, (unsigned long long) *ptr );
  284. rcnt++;
  285. ptr++;
  286. }
  287. printk(KERN_DEBUG "%s", msg);
  288. }
  289. /**
  290. * sba_check_pdir - debugging only - consistency checker
  291. * @ioc: IO MMU structure which owns the pdir we are interested in.
  292. * @msg: text to print ont the output line.
  293. *
  294. * Verify the resource map and pdir state is consistent
  295. */
  296. static int
  297. sba_check_pdir(struct ioc *ioc, char *msg)
  298. {
  299. u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
  300. u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */
  301. u64 *pptr = ioc->pdir_base; /* pdir ptr */
  302. uint pide = 0;
  303. while (rptr < rptr_end) {
  304. u64 rval;
  305. int rcnt; /* number of bits we might check */
  306. rval = *rptr;
  307. rcnt = 64;
  308. while (rcnt) {
  309. /* Get last byte and highest bit from that */
  310. u32 pde = ((u32)((*pptr >> (63)) & 0x1));
  311. if ((rval & 0x1) ^ pde)
  312. {
  313. /*
  314. ** BUMMER! -- res_map != pdir --
  315. ** Dump rval and matching pdir entries
  316. */
  317. sba_dump_pdir_entry(ioc, msg, pide);
  318. return(1);
  319. }
  320. rcnt--;
  321. rval >>= 1; /* try the next bit */
  322. pptr++;
  323. pide++;
  324. }
  325. rptr++; /* look at next word of res_map */
  326. }
  327. /* It'd be nice if we always got here :^) */
  328. return 0;
  329. }
  330. /**
  331. * sba_dump_sg - debugging only - print Scatter-Gather list
  332. * @ioc: IO MMU structure which owns the pdir we are interested in.
  333. * @startsg: head of the SG list
  334. * @nents: number of entries in SG list
  335. *
  336. * print the SG list so we can verify it's correct by hand.
  337. */
  338. static void
  339. sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
  340. {
  341. while (nents-- > 0) {
  342. printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
  343. startsg->dma_address, startsg->dma_length,
  344. sba_sg_address(startsg));
  345. startsg = sg_next(startsg);
  346. }
  347. }
  348. static void
  349. sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
  350. {
  351. struct scatterlist *the_sg = startsg;
  352. int the_nents = nents;
  353. while (the_nents-- > 0) {
  354. if (sba_sg_address(the_sg) == 0x0UL)
  355. sba_dump_sg(NULL, startsg, nents);
  356. the_sg = sg_next(the_sg);
  357. }
  358. }
  359. #endif /* ASSERT_PDIR_SANITY */
  360. /**************************************************************
  361. *
  362. * I/O Pdir Resource Management
  363. *
  364. * Bits set in the resource map are in use.
  365. * Each bit can represent a number of pages.
  366. * LSbs represent lower addresses (IOVA's).
  367. *
  368. ***************************************************************/
  369. #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
  370. /* Convert from IOVP to IOVA and vice versa. */
  371. #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
  372. #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
  373. #define PDIR_ENTRY_SIZE sizeof(u64)
  374. #define PDIR_INDEX(iovp) ((iovp)>>iovp_shift)
  375. #define RESMAP_MASK(n) ~(~0UL << (n))
  376. #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
  377. /**
  378. * For most cases the normal get_order is sufficient, however it limits us
  379. * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
  380. * It only incurs about 1 clock cycle to use this one with the static variable
  381. * and makes the code more intuitive.
  382. */
  383. static SBA_INLINE int
  384. get_iovp_order (unsigned long size)
  385. {
  386. long double d = size - 1;
  387. long order;
  388. order = ia64_getf_exp(d);
  389. order = order - iovp_shift - 0xffff + 1;
  390. if (order < 0)
  391. order = 0;
  392. return order;
  393. }
  394. static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
  395. unsigned int bitshiftcnt)
  396. {
  397. return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
  398. + bitshiftcnt;
  399. }
  400. /**
  401. * sba_search_bitmap - find free space in IO PDIR resource bitmap
  402. * @ioc: IO MMU structure which owns the pdir we are interested in.
  403. * @bits_wanted: number of entries we need.
  404. * @use_hint: use res_hint to indicate where to start looking
  405. *
  406. * Find consecutive free bits in resource bitmap.
  407. * Each bit represents one entry in the IO Pdir.
  408. * Cool perf optimization: search for log2(size) bits at a time.
  409. */
  410. static SBA_INLINE unsigned long
  411. sba_search_bitmap(struct ioc *ioc, struct device *dev,
  412. unsigned long bits_wanted, int use_hint)
  413. {
  414. unsigned long *res_ptr;
  415. unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
  416. unsigned long flags, pide = ~0UL, tpide;
  417. unsigned long boundary_size;
  418. unsigned long shift;
  419. int ret;
  420. ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
  421. ASSERT(res_ptr < res_end);
  422. boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1;
  423. boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;
  424. BUG_ON(ioc->ibase & ~iovp_mask);
  425. shift = ioc->ibase >> iovp_shift;
  426. spin_lock_irqsave(&ioc->res_lock, flags);
  427. /* Allow caller to force a search through the entire resource space */
  428. if (likely(use_hint)) {
  429. res_ptr = ioc->res_hint;
  430. } else {
  431. res_ptr = (ulong *)ioc->res_map;
  432. ioc->res_bitshift = 0;
  433. }
  434. /*
  435. * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
  436. * if a TLB entry is purged while in use. sba_mark_invalid()
  437. * purges IOTLB entries in power-of-two sizes, so we also
  438. * allocate IOVA space in power-of-two sizes.
  439. */
  440. bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift);
  441. if (likely(bits_wanted == 1)) {
  442. unsigned int bitshiftcnt;
  443. for(; res_ptr < res_end ; res_ptr++) {
  444. if (likely(*res_ptr != ~0UL)) {
  445. bitshiftcnt = ffz(*res_ptr);
  446. *res_ptr |= (1UL << bitshiftcnt);
  447. pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
  448. ioc->res_bitshift = bitshiftcnt + bits_wanted;
  449. goto found_it;
  450. }
  451. }
  452. goto not_found;
  453. }
  454. if (likely(bits_wanted <= BITS_PER_LONG/2)) {
  455. /*
  456. ** Search the resource bit map on well-aligned values.
  457. ** "o" is the alignment.
  458. ** We need the alignment to invalidate I/O TLB using
  459. ** SBA HW features in the unmap path.
  460. */
  461. unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
  462. uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
  463. unsigned long mask, base_mask;
  464. base_mask = RESMAP_MASK(bits_wanted);
  465. mask = base_mask << bitshiftcnt;
  466. DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
  467. for(; res_ptr < res_end ; res_ptr++)
  468. {
  469. DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
  470. ASSERT(0 != mask);
  471. for (; mask ; mask <<= o, bitshiftcnt += o) {
  472. tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
  473. ret = iommu_is_span_boundary(tpide, bits_wanted,
  474. shift,
  475. boundary_size);
  476. if ((0 == ((*res_ptr) & mask)) && !ret) {
  477. *res_ptr |= mask; /* mark resources busy! */
  478. pide = tpide;
  479. ioc->res_bitshift = bitshiftcnt + bits_wanted;
  480. goto found_it;
  481. }
  482. }
  483. bitshiftcnt = 0;
  484. mask = base_mask;
  485. }
  486. } else {
  487. int qwords, bits, i;
  488. unsigned long *end;
  489. qwords = bits_wanted >> 6; /* /64 */
  490. bits = bits_wanted - (qwords * BITS_PER_LONG);
  491. end = res_end - qwords;
  492. for (; res_ptr < end; res_ptr++) {
  493. tpide = ptr_to_pide(ioc, res_ptr, 0);
  494. ret = iommu_is_span_boundary(tpide, bits_wanted,
  495. shift, boundary_size);
  496. if (ret)
  497. goto next_ptr;
  498. for (i = 0 ; i < qwords ; i++) {
  499. if (res_ptr[i] != 0)
  500. goto next_ptr;
  501. }
  502. if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits))
  503. continue;
  504. /* Found it, mark it */
  505. for (i = 0 ; i < qwords ; i++)
  506. res_ptr[i] = ~0UL;
  507. res_ptr[i] |= RESMAP_MASK(bits);
  508. pide = tpide;
  509. res_ptr += qwords;
  510. ioc->res_bitshift = bits;
  511. goto found_it;
  512. next_ptr:
  513. ;
  514. }
  515. }
  516. not_found:
  517. prefetch(ioc->res_map);
  518. ioc->res_hint = (unsigned long *) ioc->res_map;
  519. ioc->res_bitshift = 0;
  520. spin_unlock_irqrestore(&ioc->res_lock, flags);
  521. return (pide);
  522. found_it:
  523. ioc->res_hint = res_ptr;
  524. spin_unlock_irqrestore(&ioc->res_lock, flags);
  525. return (pide);
  526. }
  527. /**
  528. * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
  529. * @ioc: IO MMU structure which owns the pdir we are interested in.
  530. * @size: number of bytes to create a mapping for
  531. *
  532. * Given a size, find consecutive unmarked and then mark those bits in the
  533. * resource bit map.
  534. */
  535. static int
  536. sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
  537. {
  538. unsigned int pages_needed = size >> iovp_shift;
  539. #ifdef PDIR_SEARCH_TIMING
  540. unsigned long itc_start;
  541. #endif
  542. unsigned long pide;
  543. ASSERT(pages_needed);
  544. ASSERT(0 == (size & ~iovp_mask));
  545. #ifdef PDIR_SEARCH_TIMING
  546. itc_start = ia64_get_itc();
  547. #endif
  548. /*
  549. ** "seek and ye shall find"...praying never hurts either...
  550. */
  551. pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
  552. if (unlikely(pide >= (ioc->res_size << 3))) {
  553. pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
  554. if (unlikely(pide >= (ioc->res_size << 3))) {
  555. #if DELAYED_RESOURCE_CNT > 0
  556. unsigned long flags;
  557. /*
  558. ** With delayed resource freeing, we can give this one more shot. We're
  559. ** getting close to being in trouble here, so do what we can to make this
  560. ** one count.
  561. */
  562. spin_lock_irqsave(&ioc->saved_lock, flags);
  563. if (ioc->saved_cnt > 0) {
  564. struct sba_dma_pair *d;
  565. int cnt = ioc->saved_cnt;
  566. d = &(ioc->saved[ioc->saved_cnt - 1]);
  567. spin_lock(&ioc->res_lock);
  568. while (cnt--) {
  569. sba_mark_invalid(ioc, d->iova, d->size);
  570. sba_free_range(ioc, d->iova, d->size);
  571. d--;
  572. }
  573. ioc->saved_cnt = 0;
  574. READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
  575. spin_unlock(&ioc->res_lock);
  576. }
  577. spin_unlock_irqrestore(&ioc->saved_lock, flags);
  578. pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
  579. if (unlikely(pide >= (ioc->res_size << 3)))
  580. panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
  581. ioc->ioc_hpa);
  582. #else
  583. panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
  584. ioc->ioc_hpa);
  585. #endif
  586. }
  587. }
  588. #ifdef PDIR_SEARCH_TIMING
  589. ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
  590. ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
  591. #endif
  592. prefetchw(&(ioc->pdir_base[pide]));
  593. #ifdef ASSERT_PDIR_SANITY
  594. /* verify the first enable bit is clear */
  595. if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
  596. sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
  597. }
  598. #endif
  599. DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
  600. __func__, size, pages_needed, pide,
  601. (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
  602. ioc->res_bitshift );
  603. return (pide);
  604. }
  605. /**
  606. * sba_free_range - unmark bits in IO PDIR resource bitmap
  607. * @ioc: IO MMU structure which owns the pdir we are interested in.
  608. * @iova: IO virtual address which was previously allocated.
  609. * @size: number of bytes to create a mapping for
  610. *
  611. * clear bits in the ioc's resource map
  612. */
  613. static SBA_INLINE void
  614. sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
  615. {
  616. unsigned long iovp = SBA_IOVP(ioc, iova);
  617. unsigned int pide = PDIR_INDEX(iovp);
  618. unsigned int ridx = pide >> 3; /* convert bit to byte address */
  619. unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
  620. int bits_not_wanted = size >> iovp_shift;
  621. unsigned long m;
  622. /* Round up to power-of-two size: see AR2305 note above */
  623. bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift);
  624. for (; bits_not_wanted > 0 ; res_ptr++) {
  625. if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
  626. /* these mappings start 64bit aligned */
  627. *res_ptr = 0UL;
  628. bits_not_wanted -= BITS_PER_LONG;
  629. pide += BITS_PER_LONG;
  630. } else {
  631. /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
  632. m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
  633. bits_not_wanted = 0;
  634. DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size,
  635. bits_not_wanted, m, pide, res_ptr, *res_ptr);
  636. ASSERT(m != 0);
  637. ASSERT(bits_not_wanted);
  638. ASSERT((*res_ptr & m) == m); /* verify same bits are set */
  639. *res_ptr &= ~m;
  640. }
  641. }
  642. }
  643. /**************************************************************
  644. *
  645. * "Dynamic DMA Mapping" support (aka "Coherent I/O")
  646. *
  647. ***************************************************************/
  648. /**
  649. * sba_io_pdir_entry - fill in one IO PDIR entry
  650. * @pdir_ptr: pointer to IO PDIR entry
  651. * @vba: Virtual CPU address of buffer to map
  652. *
  653. * SBA Mapping Routine
  654. *
  655. * Given a virtual address (vba, arg1) sba_io_pdir_entry()
  656. * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
  657. * Each IO Pdir entry consists of 8 bytes as shown below
  658. * (LSB == bit 0):
  659. *
  660. * 63 40 11 7 0
  661. * +-+---------------------+----------------------------------+----+--------+
  662. * |V| U | PPN[39:12] | U | FF |
  663. * +-+---------------------+----------------------------------+----+--------+
  664. *
  665. * V == Valid Bit
  666. * U == Unused
  667. * PPN == Physical Page Number
  668. *
  669. * The physical address fields are filled with the results of virt_to_phys()
  670. * on the vba.
  671. */
  672. #if 1
  673. #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
  674. | 0x8000000000000000ULL)
  675. #else
  676. void SBA_INLINE
  677. sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
  678. {
  679. *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
  680. }
  681. #endif
  682. #ifdef ENABLE_MARK_CLEAN
  683. /**
  684. * Since DMA is i-cache coherent, any (complete) pages that were written via
  685. * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
  686. * flush them when they get mapped into an executable vm-area.
  687. */
  688. static void
  689. mark_clean (void *addr, size_t size)
  690. {
  691. unsigned long pg_addr, end;
  692. pg_addr = PAGE_ALIGN((unsigned long) addr);
  693. end = (unsigned long) addr + size;
  694. while (pg_addr + PAGE_SIZE <= end) {
  695. struct page *page = virt_to_page((void *)pg_addr);
  696. set_bit(PG_arch_1, &page->flags);
  697. pg_addr += PAGE_SIZE;
  698. }
  699. }
  700. #endif
  701. /**
  702. * sba_mark_invalid - invalidate one or more IO PDIR entries
  703. * @ioc: IO MMU structure which owns the pdir we are interested in.
  704. * @iova: IO Virtual Address mapped earlier
  705. * @byte_cnt: number of bytes this mapping covers.
  706. *
  707. * Marking the IO PDIR entry(ies) as Invalid and invalidate
  708. * corresponding IO TLB entry. The PCOM (Purge Command Register)
  709. * is to purge stale entries in the IO TLB when unmapping entries.
  710. *
  711. * The PCOM register supports purging of multiple pages, with a minium
  712. * of 1 page and a maximum of 2GB. Hardware requires the address be
  713. * aligned to the size of the range being purged. The size of the range
  714. * must be a power of 2. The "Cool perf optimization" in the
  715. * allocation routine helps keep that true.
  716. */
  717. static SBA_INLINE void
  718. sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
  719. {
  720. u32 iovp = (u32) SBA_IOVP(ioc,iova);
  721. int off = PDIR_INDEX(iovp);
  722. /* Must be non-zero and rounded up */
  723. ASSERT(byte_cnt > 0);
  724. ASSERT(0 == (byte_cnt & ~iovp_mask));
  725. #ifdef ASSERT_PDIR_SANITY
  726. /* Assert first pdir entry is set */
  727. if (!(ioc->pdir_base[off] >> 60)) {
  728. sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
  729. }
  730. #endif
  731. if (byte_cnt <= iovp_size)
  732. {
  733. ASSERT(off < ioc->pdir_size);
  734. iovp |= iovp_shift; /* set "size" field for PCOM */
  735. #ifndef FULL_VALID_PDIR
  736. /*
  737. ** clear I/O PDIR entry "valid" bit
  738. ** Do NOT clear the rest - save it for debugging.
  739. ** We should only clear bits that have previously
  740. ** been enabled.
  741. */
  742. ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
  743. #else
  744. /*
  745. ** If we want to maintain the PDIR as valid, put in
  746. ** the spill page so devices prefetching won't
  747. ** cause a hard fail.
  748. */
  749. ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
  750. #endif
  751. } else {
  752. u32 t = get_iovp_order(byte_cnt) + iovp_shift;
  753. iovp |= t;
  754. ASSERT(t <= 31); /* 2GB! Max value of "size" field */
  755. do {
  756. /* verify this pdir entry is enabled */
  757. ASSERT(ioc->pdir_base[off] >> 63);
  758. #ifndef FULL_VALID_PDIR
  759. /* clear I/O Pdir entry "valid" bit first */
  760. ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
  761. #else
  762. ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
  763. #endif
  764. off++;
  765. byte_cnt -= iovp_size;
  766. } while (byte_cnt > 0);
  767. }
  768. WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
  769. }
  770. /**
  771. * sba_map_single_attrs - map one buffer and return IOVA for DMA
  772. * @dev: instance of PCI owned by the driver that's asking.
  773. * @addr: driver buffer to map.
  774. * @size: number of bytes to map in driver buffer.
  775. * @dir: R/W or both.
  776. * @attrs: optional dma attributes
  777. *
  778. * See Documentation/DMA-mapping.txt
  779. */
  780. dma_addr_t
  781. sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
  782. struct dma_attrs *attrs)
  783. {
  784. struct ioc *ioc;
  785. dma_addr_t iovp;
  786. dma_addr_t offset;
  787. u64 *pdir_start;
  788. int pide;
  789. #ifdef ASSERT_PDIR_SANITY
  790. unsigned long flags;
  791. #endif
  792. #ifdef ALLOW_IOV_BYPASS
  793. unsigned long pci_addr = virt_to_phys(addr);
  794. #endif
  795. #ifdef ALLOW_IOV_BYPASS
  796. ASSERT(to_pci_dev(dev)->dma_mask);
  797. /*
  798. ** Check if the PCI device can DMA to ptr... if so, just return ptr
  799. */
  800. if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
  801. /*
  802. ** Device is bit capable of DMA'ing to the buffer...
  803. ** just return the PCI address of ptr
  804. */
  805. DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
  806. "0x%lx/0x%lx\n",
  807. to_pci_dev(dev)->dma_mask, pci_addr);
  808. return pci_addr;
  809. }
  810. #endif
  811. ioc = GET_IOC(dev);
  812. ASSERT(ioc);
  813. prefetch(ioc->res_hint);
  814. ASSERT(size > 0);
  815. ASSERT(size <= DMA_CHUNK_SIZE);
  816. /* save offset bits */
  817. offset = ((dma_addr_t) (long) addr) & ~iovp_mask;
  818. /* round up to nearest iovp_size */
  819. size = (size + offset + ~iovp_mask) & iovp_mask;
  820. #ifdef ASSERT_PDIR_SANITY
  821. spin_lock_irqsave(&ioc->res_lock, flags);
  822. if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
  823. panic("Sanity check failed");
  824. spin_unlock_irqrestore(&ioc->res_lock, flags);
  825. #endif
  826. pide = sba_alloc_range(ioc, dev, size);
  827. iovp = (dma_addr_t) pide << iovp_shift;
  828. DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset);
  829. pdir_start = &(ioc->pdir_base[pide]);
  830. while (size > 0) {
  831. ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
  832. sba_io_pdir_entry(pdir_start, (unsigned long) addr);
  833. DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
  834. addr += iovp_size;
  835. size -= iovp_size;
  836. pdir_start++;
  837. }
  838. /* force pdir update */
  839. wmb();
  840. /* form complete address */
  841. #ifdef ASSERT_PDIR_SANITY
  842. spin_lock_irqsave(&ioc->res_lock, flags);
  843. sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
  844. spin_unlock_irqrestore(&ioc->res_lock, flags);
  845. #endif
  846. return SBA_IOVA(ioc, iovp, offset);
  847. }
  848. EXPORT_SYMBOL(sba_map_single_attrs);
  849. #ifdef ENABLE_MARK_CLEAN
  850. static SBA_INLINE void
  851. sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
  852. {
  853. u32 iovp = (u32) SBA_IOVP(ioc,iova);
  854. int off = PDIR_INDEX(iovp);
  855. void *addr;
  856. if (size <= iovp_size) {
  857. addr = phys_to_virt(ioc->pdir_base[off] &
  858. ~0xE000000000000FFFULL);
  859. mark_clean(addr, size);
  860. } else {
  861. do {
  862. addr = phys_to_virt(ioc->pdir_base[off] &
  863. ~0xE000000000000FFFULL);
  864. mark_clean(addr, min(size, iovp_size));
  865. off++;
  866. size -= iovp_size;
  867. } while (size > 0);
  868. }
  869. }
  870. #endif
  871. /**
  872. * sba_unmap_single_attrs - unmap one IOVA and free resources
  873. * @dev: instance of PCI owned by the driver that's asking.
  874. * @iova: IOVA of driver buffer previously mapped.
  875. * @size: number of bytes mapped in driver buffer.
  876. * @dir: R/W or both.
  877. * @attrs: optional dma attributes
  878. *
  879. * See Documentation/DMA-mapping.txt
  880. */
  881. void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
  882. int dir, struct dma_attrs *attrs)
  883. {
  884. struct ioc *ioc;
  885. #if DELAYED_RESOURCE_CNT > 0
  886. struct sba_dma_pair *d;
  887. #endif
  888. unsigned long flags;
  889. dma_addr_t offset;
  890. ioc = GET_IOC(dev);
  891. ASSERT(ioc);
  892. #ifdef ALLOW_IOV_BYPASS
  893. if (likely((iova & ioc->imask) != ioc->ibase)) {
  894. /*
  895. ** Address does not fall w/in IOVA, must be bypassing
  896. */
  897. DBG_BYPASS("sba_unmap_single_atttrs() bypass addr: 0x%lx\n",
  898. iova);
  899. #ifdef ENABLE_MARK_CLEAN
  900. if (dir == DMA_FROM_DEVICE) {
  901. mark_clean(phys_to_virt(iova), size);
  902. }
  903. #endif
  904. return;
  905. }
  906. #endif
  907. offset = iova & ~iovp_mask;
  908. DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
  909. iova ^= offset; /* clear offset bits */
  910. size += offset;
  911. size = ROUNDUP(size, iovp_size);
  912. #ifdef ENABLE_MARK_CLEAN
  913. if (dir == DMA_FROM_DEVICE)
  914. sba_mark_clean(ioc, iova, size);
  915. #endif
  916. #if DELAYED_RESOURCE_CNT > 0
  917. spin_lock_irqsave(&ioc->saved_lock, flags);
  918. d = &(ioc->saved[ioc->saved_cnt]);
  919. d->iova = iova;
  920. d->size = size;
  921. if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
  922. int cnt = ioc->saved_cnt;
  923. spin_lock(&ioc->res_lock);
  924. while (cnt--) {
  925. sba_mark_invalid(ioc, d->iova, d->size);
  926. sba_free_range(ioc, d->iova, d->size);
  927. d--;
  928. }
  929. ioc->saved_cnt = 0;
  930. READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
  931. spin_unlock(&ioc->res_lock);
  932. }
  933. spin_unlock_irqrestore(&ioc->saved_lock, flags);
  934. #else /* DELAYED_RESOURCE_CNT == 0 */
  935. spin_lock_irqsave(&ioc->res_lock, flags);
  936. sba_mark_invalid(ioc, iova, size);
  937. sba_free_range(ioc, iova, size);
  938. READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
  939. spin_unlock_irqrestore(&ioc->res_lock, flags);
  940. #endif /* DELAYED_RESOURCE_CNT == 0 */
  941. }
  942. EXPORT_SYMBOL(sba_unmap_single_attrs);
  943. /**
  944. * sba_alloc_coherent - allocate/map shared mem for DMA
  945. * @dev: instance of PCI owned by the driver that's asking.
  946. * @size: number of bytes mapped in driver buffer.
  947. * @dma_handle: IOVA of new buffer.
  948. *
  949. * See Documentation/DMA-mapping.txt
  950. */
  951. void *
  952. sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
  953. {
  954. struct ioc *ioc;
  955. void *addr;
  956. ioc = GET_IOC(dev);
  957. ASSERT(ioc);
  958. #ifdef CONFIG_NUMA
  959. {
  960. struct page *page;
  961. page = alloc_pages_node(ioc->node == MAX_NUMNODES ?
  962. numa_node_id() : ioc->node, flags,
  963. get_order(size));
  964. if (unlikely(!page))
  965. return NULL;
  966. addr = page_address(page);
  967. }
  968. #else
  969. addr = (void *) __get_free_pages(flags, get_order(size));
  970. #endif
  971. if (unlikely(!addr))
  972. return NULL;
  973. memset(addr, 0, size);
  974. *dma_handle = virt_to_phys(addr);
  975. #ifdef ALLOW_IOV_BYPASS
  976. ASSERT(dev->coherent_dma_mask);
  977. /*
  978. ** Check if the PCI device can DMA to ptr... if so, just return ptr
  979. */
  980. if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {
  981. DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
  982. dev->coherent_dma_mask, *dma_handle);
  983. return addr;
  984. }
  985. #endif
  986. /*
  987. * If device can't bypass or bypass is disabled, pass the 32bit fake
  988. * device to map single to get an iova mapping.
  989. */
  990. *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
  991. size, 0, NULL);
  992. return addr;
  993. }
  994. /**
  995. * sba_free_coherent - free/unmap shared mem for DMA
  996. * @dev: instance of PCI owned by the driver that's asking.
  997. * @size: number of bytes mapped in driver buffer.
  998. * @vaddr: virtual address IOVA of "consistent" buffer.
  999. * @dma_handler: IO virtual address of "consistent" buffer.
  1000. *
  1001. * See Documentation/DMA-mapping.txt
  1002. */
  1003. void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
  1004. {
  1005. sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
  1006. free_pages((unsigned long) vaddr, get_order(size));
  1007. }
  1008. /*
  1009. ** Since 0 is a valid pdir_base index value, can't use that
  1010. ** to determine if a value is valid or not. Use a flag to indicate
  1011. ** the SG list entry contains a valid pdir index.
  1012. */
  1013. #define PIDE_FLAG 0x1UL
  1014. #ifdef DEBUG_LARGE_SG_ENTRIES
  1015. int dump_run_sg = 0;
  1016. #endif
  1017. /**
  1018. * sba_fill_pdir - write allocated SG entries into IO PDIR
  1019. * @ioc: IO MMU structure which owns the pdir we are interested in.
  1020. * @startsg: list of IOVA/size pairs
  1021. * @nents: number of entries in startsg list
  1022. *
  1023. * Take preprocessed SG list and write corresponding entries
  1024. * in the IO PDIR.
  1025. */
  1026. static SBA_INLINE int
  1027. sba_fill_pdir(
  1028. struct ioc *ioc,
  1029. struct scatterlist *startsg,
  1030. int nents)
  1031. {
  1032. struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
  1033. int n_mappings = 0;
  1034. u64 *pdirp = NULL;
  1035. unsigned long dma_offset = 0;
  1036. while (nents-- > 0) {
  1037. int cnt = startsg->dma_length;
  1038. startsg->dma_length = 0;
  1039. #ifdef DEBUG_LARGE_SG_ENTRIES
  1040. if (dump_run_sg)
  1041. printk(" %2d : %08lx/%05x %p\n",
  1042. nents, startsg->dma_address, cnt,
  1043. sba_sg_address(startsg));
  1044. #else
  1045. DBG_RUN_SG(" %d : %08lx/%05x %p\n",
  1046. nents, startsg->dma_address, cnt,
  1047. sba_sg_address(startsg));
  1048. #endif
  1049. /*
  1050. ** Look for the start of a new DMA stream
  1051. */
  1052. if (startsg->dma_address & PIDE_FLAG) {
  1053. u32 pide = startsg->dma_address & ~PIDE_FLAG;
  1054. dma_offset = (unsigned long) pide & ~iovp_mask;
  1055. startsg->dma_address = 0;
  1056. if (n_mappings)
  1057. dma_sg = sg_next(dma_sg);
  1058. dma_sg->dma_address = pide | ioc->ibase;
  1059. pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
  1060. n_mappings++;
  1061. }
  1062. /*
  1063. ** Look for a VCONTIG chunk
  1064. */
  1065. if (cnt) {
  1066. unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
  1067. ASSERT(pdirp);
  1068. /* Since multiple Vcontig blocks could make up
  1069. ** one DMA stream, *add* cnt to dma_len.
  1070. */
  1071. dma_sg->dma_length += cnt;
  1072. cnt += dma_offset;
  1073. dma_offset=0; /* only want offset on first chunk */
  1074. cnt = ROUNDUP(cnt, iovp_size);
  1075. do {
  1076. sba_io_pdir_entry(pdirp, vaddr);
  1077. vaddr += iovp_size;
  1078. cnt -= iovp_size;
  1079. pdirp++;
  1080. } while (cnt > 0);
  1081. }
  1082. startsg = sg_next(startsg);
  1083. }
  1084. /* force pdir update */
  1085. wmb();
  1086. #ifdef DEBUG_LARGE_SG_ENTRIES
  1087. dump_run_sg = 0;
  1088. #endif
  1089. return(n_mappings);
  1090. }
  1091. /*
  1092. ** Two address ranges are DMA contiguous *iff* "end of prev" and
  1093. ** "start of next" are both on an IOV page boundary.
  1094. **
  1095. ** (shift left is a quick trick to mask off upper bits)
  1096. */
  1097. #define DMA_CONTIG(__X, __Y) \
  1098. (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
  1099. /**
  1100. * sba_coalesce_chunks - preprocess the SG list
  1101. * @ioc: IO MMU structure which owns the pdir we are interested in.
  1102. * @startsg: list of IOVA/size pairs
  1103. * @nents: number of entries in startsg list
  1104. *
  1105. * First pass is to walk the SG list and determine where the breaks are
  1106. * in the DMA stream. Allocates PDIR entries but does not fill them.
  1107. * Returns the number of DMA chunks.
  1108. *
  1109. * Doing the fill separate from the coalescing/allocation keeps the
  1110. * code simpler. Future enhancement could make one pass through
  1111. * the sglist do both.
  1112. */
  1113. static SBA_INLINE int
  1114. sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
  1115. struct scatterlist *startsg,
  1116. int nents)
  1117. {
  1118. struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
  1119. unsigned long vcontig_len; /* len of VCONTIG chunk */
  1120. unsigned long vcontig_end;
  1121. struct scatterlist *dma_sg; /* next DMA stream head */
  1122. unsigned long dma_offset, dma_len; /* start/len of DMA stream */
  1123. int n_mappings = 0;
  1124. unsigned int max_seg_size = dma_get_max_seg_size(dev);
  1125. while (nents > 0) {
  1126. unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
  1127. /*
  1128. ** Prepare for first/next DMA stream
  1129. */
  1130. dma_sg = vcontig_sg = startsg;
  1131. dma_len = vcontig_len = vcontig_end = startsg->length;
  1132. vcontig_end += vaddr;
  1133. dma_offset = vaddr & ~iovp_mask;
  1134. /* PARANOID: clear entries */
  1135. startsg->dma_address = startsg->dma_length = 0;
  1136. /*
  1137. ** This loop terminates one iteration "early" since
  1138. ** it's always looking one "ahead".
  1139. */
  1140. while (--nents > 0) {
  1141. unsigned long vaddr; /* tmp */
  1142. startsg = sg_next(startsg);
  1143. /* PARANOID */
  1144. startsg->dma_address = startsg->dma_length = 0;
  1145. /* catch brokenness in SCSI layer */
  1146. ASSERT(startsg->length <= DMA_CHUNK_SIZE);
  1147. /*
  1148. ** First make sure current dma stream won't
  1149. ** exceed DMA_CHUNK_SIZE if we coalesce the
  1150. ** next entry.
  1151. */
  1152. if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)
  1153. > DMA_CHUNK_SIZE)
  1154. break;
  1155. if (dma_len + startsg->length > max_seg_size)
  1156. break;
  1157. /*
  1158. ** Then look for virtually contiguous blocks.
  1159. **
  1160. ** append the next transaction?
  1161. */
  1162. vaddr = (unsigned long) sba_sg_address(startsg);
  1163. if (vcontig_end == vaddr)
  1164. {
  1165. vcontig_len += startsg->length;
  1166. vcontig_end += startsg->length;
  1167. dma_len += startsg->length;
  1168. continue;
  1169. }
  1170. #ifdef DEBUG_LARGE_SG_ENTRIES
  1171. dump_run_sg = (vcontig_len > iovp_size);
  1172. #endif
  1173. /*
  1174. ** Not virtually contigous.
  1175. ** Terminate prev chunk.
  1176. ** Start a new chunk.
  1177. **
  1178. ** Once we start a new VCONTIG chunk, dma_offset
  1179. ** can't change. And we need the offset from the first
  1180. ** chunk - not the last one. Ergo Successive chunks
  1181. ** must start on page boundaries and dove tail
  1182. ** with it's predecessor.
  1183. */
  1184. vcontig_sg->dma_length = vcontig_len;
  1185. vcontig_sg = startsg;
  1186. vcontig_len = startsg->length;
  1187. /*
  1188. ** 3) do the entries end/start on page boundaries?
  1189. ** Don't update vcontig_end until we've checked.
  1190. */
  1191. if (DMA_CONTIG(vcontig_end, vaddr))
  1192. {
  1193. vcontig_end = vcontig_len + vaddr;
  1194. dma_len += vcontig_len;
  1195. continue;
  1196. } else {
  1197. break;
  1198. }
  1199. }
  1200. /*
  1201. ** End of DMA Stream
  1202. ** Terminate last VCONTIG block.
  1203. ** Allocate space for DMA stream.
  1204. */
  1205. vcontig_sg->dma_length = vcontig_len;
  1206. dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
  1207. ASSERT(dma_len <= DMA_CHUNK_SIZE);
  1208. dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG
  1209. | (sba_alloc_range(ioc, dev, dma_len) << iovp_shift)
  1210. | dma_offset);
  1211. n_mappings++;
  1212. }
  1213. return n_mappings;
  1214. }
  1215. /**
  1216. * sba_map_sg - map Scatter/Gather list
  1217. * @dev: instance of PCI owned by the driver that's asking.
  1218. * @sglist: array of buffer/length pairs
  1219. * @nents: number of entries in list
  1220. * @dir: R/W or both.
  1221. * @attrs: optional dma attributes
  1222. *
  1223. * See Documentation/DMA-mapping.txt
  1224. */
  1225. int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
  1226. int dir, struct dma_attrs *attrs)
  1227. {
  1228. struct ioc *ioc;
  1229. int coalesced, filled = 0;
  1230. #ifdef ASSERT_PDIR_SANITY
  1231. unsigned long flags;
  1232. #endif
  1233. #ifdef ALLOW_IOV_BYPASS_SG
  1234. struct scatterlist *sg;
  1235. #endif
  1236. DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
  1237. ioc = GET_IOC(dev);
  1238. ASSERT(ioc);
  1239. #ifdef ALLOW_IOV_BYPASS_SG
  1240. ASSERT(to_pci_dev(dev)->dma_mask);
  1241. if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
  1242. for_each_sg(sglist, sg, nents, filled) {
  1243. sg->dma_length = sg->length;
  1244. sg->dma_address = virt_to_phys(sba_sg_address(sg));
  1245. }
  1246. return filled;
  1247. }
  1248. #endif
  1249. /* Fast path single entry scatterlists. */
  1250. if (nents == 1) {
  1251. sglist->dma_length = sglist->length;
  1252. sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
  1253. return 1;
  1254. }
  1255. #ifdef ASSERT_PDIR_SANITY
  1256. spin_lock_irqsave(&ioc->res_lock, flags);
  1257. if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
  1258. {
  1259. sba_dump_sg(ioc, sglist, nents);
  1260. panic("Check before sba_map_sg_attrs()");
  1261. }
  1262. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1263. #endif
  1264. prefetch(ioc->res_hint);
  1265. /*
  1266. ** First coalesce the chunks and allocate I/O pdir space
  1267. **
  1268. ** If this is one DMA stream, we can properly map using the
  1269. ** correct virtual address associated with each DMA page.
  1270. ** w/o this association, we wouldn't have coherent DMA!
  1271. ** Access to the virtual address is what forces a two pass algorithm.
  1272. */
  1273. coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
  1274. /*
  1275. ** Program the I/O Pdir
  1276. **
  1277. ** map the virtual addresses to the I/O Pdir
  1278. ** o dma_address will contain the pdir index
  1279. ** o dma_len will contain the number of bytes to map
  1280. ** o address contains the virtual address.
  1281. */
  1282. filled = sba_fill_pdir(ioc, sglist, nents);
  1283. #ifdef ASSERT_PDIR_SANITY
  1284. spin_lock_irqsave(&ioc->res_lock, flags);
  1285. if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
  1286. {
  1287. sba_dump_sg(ioc, sglist, nents);
  1288. panic("Check after sba_map_sg_attrs()\n");
  1289. }
  1290. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1291. #endif
  1292. ASSERT(coalesced == filled);
  1293. DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
  1294. return filled;
  1295. }
  1296. EXPORT_SYMBOL(sba_map_sg_attrs);
  1297. /**
  1298. * sba_unmap_sg_attrs - unmap Scatter/Gather list
  1299. * @dev: instance of PCI owned by the driver that's asking.
  1300. * @sglist: array of buffer/length pairs
  1301. * @nents: number of entries in list
  1302. * @dir: R/W or both.
  1303. * @attrs: optional dma attributes
  1304. *
  1305. * See Documentation/DMA-mapping.txt
  1306. */
  1307. void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
  1308. int nents, int dir, struct dma_attrs *attrs)
  1309. {
  1310. #ifdef ASSERT_PDIR_SANITY
  1311. struct ioc *ioc;
  1312. unsigned long flags;
  1313. #endif
  1314. DBG_RUN_SG("%s() START %d entries, %p,%x\n",
  1315. __func__, nents, sba_sg_address(sglist), sglist->length);
  1316. #ifdef ASSERT_PDIR_SANITY
  1317. ioc = GET_IOC(dev);
  1318. ASSERT(ioc);
  1319. spin_lock_irqsave(&ioc->res_lock, flags);
  1320. sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
  1321. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1322. #endif
  1323. while (nents && sglist->dma_length) {
  1324. sba_unmap_single_attrs(dev, sglist->dma_address,
  1325. sglist->dma_length, dir, attrs);
  1326. sglist = sg_next(sglist);
  1327. nents--;
  1328. }
  1329. DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
  1330. #ifdef ASSERT_PDIR_SANITY
  1331. spin_lock_irqsave(&ioc->res_lock, flags);
  1332. sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
  1333. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1334. #endif
  1335. }
  1336. EXPORT_SYMBOL(sba_unmap_sg_attrs);
  1337. /**************************************************************
  1338. *
  1339. * Initialization and claim
  1340. *
  1341. ***************************************************************/
  1342. static void __init
  1343. ioc_iova_init(struct ioc *ioc)
  1344. {
  1345. int tcnfg;
  1346. int agp_found = 0;
  1347. struct pci_dev *device = NULL;
  1348. #ifdef FULL_VALID_PDIR
  1349. unsigned long index;
  1350. #endif
  1351. /*
  1352. ** Firmware programs the base and size of a "safe IOVA space"
  1353. ** (one that doesn't overlap memory or LMMIO space) in the
  1354. ** IBASE and IMASK registers.
  1355. */
  1356. ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
  1357. ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
  1358. ioc->iov_size = ~ioc->imask + 1;
  1359. DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
  1360. __func__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
  1361. ioc->iov_size >> 20);
  1362. switch (iovp_size) {
  1363. case 4*1024: tcnfg = 0; break;
  1364. case 8*1024: tcnfg = 1; break;
  1365. case 16*1024: tcnfg = 2; break;
  1366. case 64*1024: tcnfg = 3; break;
  1367. default:
  1368. panic(PFX "Unsupported IOTLB page size %ldK",
  1369. iovp_size >> 10);
  1370. break;
  1371. }
  1372. WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
  1373. ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
  1374. ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
  1375. get_order(ioc->pdir_size));
  1376. if (!ioc->pdir_base)
  1377. panic(PFX "Couldn't allocate I/O Page Table\n");
  1378. memset(ioc->pdir_base, 0, ioc->pdir_size);
  1379. DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__,
  1380. iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
  1381. ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
  1382. WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
  1383. /*
  1384. ** If an AGP device is present, only use half of the IOV space
  1385. ** for PCI DMA. Unfortunately we can't know ahead of time
  1386. ** whether GART support will actually be used, for now we
  1387. ** can just key on an AGP device found in the system.
  1388. ** We program the next pdir index after we stop w/ a key for
  1389. ** the GART code to handshake on.
  1390. */
  1391. for_each_pci_dev(device)
  1392. agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
  1393. if (agp_found && reserve_sba_gart) {
  1394. printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",
  1395. ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
  1396. ioc->pdir_size /= 2;
  1397. ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
  1398. }
  1399. #ifdef FULL_VALID_PDIR
  1400. /*
  1401. ** Check to see if the spill page has been allocated, we don't need more than
  1402. ** one across multiple SBAs.
  1403. */
  1404. if (!prefetch_spill_page) {
  1405. char *spill_poison = "SBAIOMMU POISON";
  1406. int poison_size = 16;
  1407. void *poison_addr, *addr;
  1408. addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));
  1409. if (!addr)
  1410. panic(PFX "Couldn't allocate PDIR spill page\n");
  1411. poison_addr = addr;
  1412. for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
  1413. memcpy(poison_addr, spill_poison, poison_size);
  1414. prefetch_spill_page = virt_to_phys(addr);
  1415. DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page);
  1416. }
  1417. /*
  1418. ** Set all the PDIR entries valid w/ the spill page as the target
  1419. */
  1420. for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
  1421. ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
  1422. #endif
  1423. /* Clear I/O TLB of any possible entries */
  1424. WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
  1425. READ_REG(ioc->ioc_hpa + IOC_PCOM);
  1426. /* Enable IOVA translation */
  1427. WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
  1428. READ_REG(ioc->ioc_hpa + IOC_IBASE);
  1429. }
  1430. static void __init
  1431. ioc_resource_init(struct ioc *ioc)
  1432. {
  1433. spin_lock_init(&ioc->res_lock);
  1434. #if DELAYED_RESOURCE_CNT > 0
  1435. spin_lock_init(&ioc->saved_lock);
  1436. #endif
  1437. /* resource map size dictated by pdir_size */
  1438. ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
  1439. ioc->res_size >>= 3; /* convert bit count to byte count */
  1440. DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
  1441. ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
  1442. get_order(ioc->res_size));
  1443. if (!ioc->res_map)
  1444. panic(PFX "Couldn't allocate resource map\n");
  1445. memset(ioc->res_map, 0, ioc->res_size);
  1446. /* next available IOVP - circular search */
  1447. ioc->res_hint = (unsigned long *) ioc->res_map;
  1448. #ifdef ASSERT_PDIR_SANITY
  1449. /* Mark first bit busy - ie no IOVA 0 */
  1450. ioc->res_map[0] = 0x1;
  1451. ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
  1452. #endif
  1453. #ifdef FULL_VALID_PDIR
  1454. /* Mark the last resource used so we don't prefetch beyond IOVA space */
  1455. ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
  1456. ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
  1457. | prefetch_spill_page);
  1458. #endif
  1459. DBG_INIT("%s() res_map %x %p\n", __func__,
  1460. ioc->res_size, (void *) ioc->res_map);
  1461. }
  1462. static void __init
  1463. ioc_sac_init(struct ioc *ioc)
  1464. {
  1465. struct pci_dev *sac = NULL;
  1466. struct pci_controller *controller = NULL;
  1467. /*
  1468. * pci_alloc_coherent() must return a DMA address which is
  1469. * SAC (single address cycle) addressable, so allocate a
  1470. * pseudo-device to enforce that.
  1471. */
  1472. sac = kzalloc(sizeof(*sac), GFP_KERNEL);
  1473. if (!sac)
  1474. panic(PFX "Couldn't allocate struct pci_dev");
  1475. controller = kzalloc(sizeof(*controller), GFP_KERNEL);
  1476. if (!controller)
  1477. panic(PFX "Couldn't allocate struct pci_controller");
  1478. controller->iommu = ioc;
  1479. sac->sysdata = controller;
  1480. sac->dma_mask = 0xFFFFFFFFUL;
  1481. #ifdef CONFIG_PCI
  1482. sac->dev.bus = &pci_bus_type;
  1483. #endif
  1484. ioc->sac_only_dev = sac;
  1485. }
  1486. static void __init
  1487. ioc_zx1_init(struct ioc *ioc)
  1488. {
  1489. unsigned long rope_config;
  1490. unsigned int i;
  1491. if (ioc->rev < 0x20)
  1492. panic(PFX "IOC 2.0 or later required for IOMMU support\n");
  1493. /* 38 bit memory controller + extra bit for range displaced by MMIO */
  1494. ioc->dma_mask = (0x1UL << 39) - 1;
  1495. /*
  1496. ** Clear ROPE(N)_CONFIG AO bit.
  1497. ** Disables "NT Ordering" (~= !"Relaxed Ordering")
  1498. ** Overrides bit 1 in DMA Hint Sets.
  1499. ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
  1500. */
  1501. for (i=0; i<(8*8); i+=8) {
  1502. rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
  1503. rope_config &= ~IOC_ROPE_AO;
  1504. WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
  1505. }
  1506. }
  1507. typedef void (initfunc)(struct ioc *);
  1508. struct ioc_iommu {
  1509. u32 func_id;
  1510. char *name;
  1511. initfunc *init;
  1512. };
  1513. static struct ioc_iommu ioc_iommu_info[] __initdata = {
  1514. { ZX1_IOC_ID, "zx1", ioc_zx1_init },
  1515. { ZX2_IOC_ID, "zx2", NULL },
  1516. { SX1000_IOC_ID, "sx1000", NULL },
  1517. { SX2000_IOC_ID, "sx2000", NULL },
  1518. };
  1519. static struct ioc * __init
  1520. ioc_init(u64 hpa, void *handle)
  1521. {
  1522. struct ioc *ioc;
  1523. struct ioc_iommu *info;
  1524. ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
  1525. if (!ioc)
  1526. return NULL;
  1527. ioc->next = ioc_list;
  1528. ioc_list = ioc;
  1529. ioc->handle = handle;
  1530. ioc->ioc_hpa = ioremap(hpa, 0x1000);
  1531. ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
  1532. ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
  1533. ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */
  1534. for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
  1535. if (ioc->func_id == info->func_id) {
  1536. ioc->name = info->name;
  1537. if (info->init)
  1538. (info->init)(ioc);
  1539. }
  1540. }
  1541. iovp_size = (1 << iovp_shift);
  1542. iovp_mask = ~(iovp_size - 1);
  1543. DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__,
  1544. PAGE_SIZE >> 10, iovp_size >> 10);
  1545. if (!ioc->name) {
  1546. ioc->name = kmalloc(24, GFP_KERNEL);
  1547. if (ioc->name)
  1548. sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
  1549. ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
  1550. else
  1551. ioc->name = "Unknown";
  1552. }
  1553. ioc_iova_init(ioc);
  1554. ioc_resource_init(ioc);
  1555. ioc_sac_init(ioc);
  1556. if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask)
  1557. ia64_max_iommu_merge_mask = ~iovp_mask;
  1558. printk(KERN_INFO PFX
  1559. "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
  1560. ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
  1561. hpa, ioc->iov_size >> 20, ioc->ibase);
  1562. return ioc;
  1563. }
  1564. /**************************************************************************
  1565. **
  1566. ** SBA initialization code (HW and SW)
  1567. **
  1568. ** o identify SBA chip itself
  1569. ** o FIXME: initialize DMA hints for reasonable defaults
  1570. **
  1571. **************************************************************************/
  1572. #ifdef CONFIG_PROC_FS
  1573. static void *
  1574. ioc_start(struct seq_file *s, loff_t *pos)
  1575. {
  1576. struct ioc *ioc;
  1577. loff_t n = *pos;
  1578. for (ioc = ioc_list; ioc; ioc = ioc->next)
  1579. if (!n--)
  1580. return ioc;
  1581. return NULL;
  1582. }
  1583. static void *
  1584. ioc_next(struct seq_file *s, void *v, loff_t *pos)
  1585. {
  1586. struct ioc *ioc = v;
  1587. ++*pos;
  1588. return ioc->next;
  1589. }
  1590. static void
  1591. ioc_stop(struct seq_file *s, void *v)
  1592. {
  1593. }
  1594. static int
  1595. ioc_show(struct seq_file *s, void *v)
  1596. {
  1597. struct ioc *ioc = v;
  1598. unsigned long *res_ptr = (unsigned long *)ioc->res_map;
  1599. int i, used = 0;
  1600. seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
  1601. ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
  1602. #ifdef CONFIG_NUMA
  1603. if (ioc->node != MAX_NUMNODES)
  1604. seq_printf(s, "NUMA node : %d\n", ioc->node);
  1605. #endif
  1606. seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
  1607. seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024);
  1608. for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
  1609. used += hweight64(*res_ptr);
  1610. seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3);
  1611. seq_printf(s, "PDIR used : %d entries\n", used);
  1612. #ifdef PDIR_SEARCH_TIMING
  1613. {
  1614. unsigned long i = 0, avg = 0, min, max;
  1615. min = max = ioc->avg_search[0];
  1616. for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
  1617. avg += ioc->avg_search[i];
  1618. if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
  1619. if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
  1620. }
  1621. avg /= SBA_SEARCH_SAMPLE;
  1622. seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
  1623. min, avg, max);
  1624. }
  1625. #endif
  1626. #ifndef ALLOW_IOV_BYPASS
  1627. seq_printf(s, "IOVA bypass disabled\n");
  1628. #endif
  1629. return 0;
  1630. }
  1631. static const struct seq_operations ioc_seq_ops = {
  1632. .start = ioc_start,
  1633. .next = ioc_next,
  1634. .stop = ioc_stop,
  1635. .show = ioc_show
  1636. };
  1637. static int
  1638. ioc_open(struct inode *inode, struct file *file)
  1639. {
  1640. return seq_open(file, &ioc_seq_ops);
  1641. }
  1642. static const struct file_operations ioc_fops = {
  1643. .open = ioc_open,
  1644. .read = seq_read,
  1645. .llseek = seq_lseek,
  1646. .release = seq_release
  1647. };
  1648. static void __init
  1649. ioc_proc_init(void)
  1650. {
  1651. struct proc_dir_entry *dir;
  1652. dir = proc_mkdir("bus/mckinley", NULL);
  1653. if (!dir)
  1654. return;
  1655. proc_create(ioc_list->name, 0, dir, &ioc_fops);
  1656. }
  1657. #endif
  1658. static void
  1659. sba_connect_bus(struct pci_bus *bus)
  1660. {
  1661. acpi_handle handle, parent;
  1662. acpi_status status;
  1663. struct ioc *ioc;
  1664. if (!PCI_CONTROLLER(bus))
  1665. panic(PFX "no sysdata on bus %d!\n", bus->number);
  1666. if (PCI_CONTROLLER(bus)->iommu)
  1667. return;
  1668. handle = PCI_CONTROLLER(bus)->acpi_handle;
  1669. if (!handle)
  1670. return;
  1671. /*
  1672. * The IOC scope encloses PCI root bridges in the ACPI
  1673. * namespace, so work our way out until we find an IOC we
  1674. * claimed previously.
  1675. */
  1676. do {
  1677. for (ioc = ioc_list; ioc; ioc = ioc->next)
  1678. if (ioc->handle == handle) {
  1679. PCI_CONTROLLER(bus)->iommu = ioc;
  1680. return;
  1681. }
  1682. status = acpi_get_parent(handle, &parent);
  1683. handle = parent;
  1684. } while (ACPI_SUCCESS(status));
  1685. printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number);
  1686. }
  1687. #ifdef CONFIG_NUMA
  1688. static void __init
  1689. sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
  1690. {
  1691. unsigned int node;
  1692. int pxm;
  1693. ioc->node = MAX_NUMNODES;
  1694. pxm = acpi_get_pxm(handle);
  1695. if (pxm < 0)
  1696. return;
  1697. node = pxm_to_node(pxm);
  1698. if (node >= MAX_NUMNODES || !node_online(node))
  1699. return;
  1700. ioc->node = node;
  1701. return;
  1702. }
  1703. #else
  1704. #define sba_map_ioc_to_node(ioc, handle)
  1705. #endif
  1706. static int __init
  1707. acpi_sba_ioc_add(struct acpi_device *device)
  1708. {
  1709. struct ioc *ioc;
  1710. acpi_status status;
  1711. u64 hpa, length;
  1712. struct acpi_buffer buffer;
  1713. struct acpi_device_info *dev_info;
  1714. status = hp_acpi_csr_space(device->handle, &hpa, &length);
  1715. if (ACPI_FAILURE(status))
  1716. return 1;
  1717. buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
  1718. status = acpi_get_object_info(device->handle, &buffer);
  1719. if (ACPI_FAILURE(status))
  1720. return 1;
  1721. dev_info = buffer.pointer;
  1722. /*
  1723. * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
  1724. * root bridges, and its CSR space includes the IOC function.
  1725. */
  1726. if (strncmp("HWP0001", dev_info->hardware_id.value, 7) == 0) {
  1727. hpa += ZX1_IOC_OFFSET;
  1728. /* zx1 based systems default to kernel page size iommu pages */
  1729. if (!iovp_shift)
  1730. iovp_shift = min(PAGE_SHIFT, 16);
  1731. }
  1732. kfree(dev_info);
  1733. /*
  1734. * default anything not caught above or specified on cmdline to 4k
  1735. * iommu page size
  1736. */
  1737. if (!iovp_shift)
  1738. iovp_shift = 12;
  1739. ioc = ioc_init(hpa, device->handle);
  1740. if (!ioc)
  1741. return 1;
  1742. /* setup NUMA node association */
  1743. sba_map_ioc_to_node(ioc, device->handle);
  1744. return 0;
  1745. }
  1746. static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
  1747. {"HWP0001", 0},
  1748. {"HWP0004", 0},
  1749. {"", 0},
  1750. };
  1751. static struct acpi_driver acpi_sba_ioc_driver = {
  1752. .name = "IOC IOMMU Driver",
  1753. .ids = hp_ioc_iommu_device_ids,
  1754. .ops = {
  1755. .add = acpi_sba_ioc_add,
  1756. },
  1757. };
  1758. static int __init
  1759. sba_init(void)
  1760. {
  1761. if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
  1762. return 0;
  1763. #if defined(CONFIG_IA64_GENERIC) && defined(CONFIG_CRASH_DUMP) && \
  1764. defined(CONFIG_PROC_FS)
  1765. /* If we are booting a kdump kernel, the sba_iommu will
  1766. * cause devices that were not shutdown properly to MCA
  1767. * as soon as they are turned back on. Our only option for
  1768. * a successful kdump kernel boot is to use the swiotlb.
  1769. */
  1770. if (elfcorehdr_addr < ELFCORE_ADDR_MAX) {
  1771. if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
  1772. panic("Unable to initialize software I/O TLB:"
  1773. " Try machvec=dig boot option");
  1774. machvec_init("dig");
  1775. return 0;
  1776. }
  1777. #endif
  1778. acpi_bus_register_driver(&acpi_sba_ioc_driver);
  1779. if (!ioc_list) {
  1780. #ifdef CONFIG_IA64_GENERIC
  1781. /*
  1782. * If we didn't find something sba_iommu can claim, we
  1783. * need to setup the swiotlb and switch to the dig machvec.
  1784. */
  1785. if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
  1786. panic("Unable to find SBA IOMMU or initialize "
  1787. "software I/O TLB: Try machvec=dig boot option");
  1788. machvec_init("dig");
  1789. #else
  1790. panic("Unable to find SBA IOMMU: Try a generic or DIG kernel");
  1791. #endif
  1792. return 0;
  1793. }
  1794. #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
  1795. /*
  1796. * hpzx1_swiotlb needs to have a fairly small swiotlb bounce
  1797. * buffer setup to support devices with smaller DMA masks than
  1798. * sba_iommu can handle.
  1799. */
  1800. if (ia64_platform_is("hpzx1_swiotlb")) {
  1801. extern void hwsw_init(void);
  1802. hwsw_init();
  1803. }
  1804. #endif
  1805. #ifdef CONFIG_PCI
  1806. {
  1807. struct pci_bus *b = NULL;
  1808. while ((b = pci_find_next_bus(b)) != NULL)
  1809. sba_connect_bus(b);
  1810. }
  1811. #endif
  1812. #ifdef CONFIG_PROC_FS
  1813. ioc_proc_init();
  1814. #endif
  1815. return 0;
  1816. }
  1817. subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
  1818. static int __init
  1819. nosbagart(char *str)
  1820. {
  1821. reserve_sba_gart = 0;
  1822. return 1;
  1823. }
  1824. int
  1825. sba_dma_supported (struct device *dev, u64 mask)
  1826. {
  1827. /* make sure it's at least 32bit capable */
  1828. return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
  1829. }
  1830. int
  1831. sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  1832. {
  1833. return 0;
  1834. }
  1835. __setup("nosbagart", nosbagart);
  1836. static int __init
  1837. sba_page_override(char *str)
  1838. {
  1839. unsigned long page_size;
  1840. page_size = memparse(str, &str);
  1841. switch (page_size) {
  1842. case 4096:
  1843. case 8192:
  1844. case 16384:
  1845. case 65536:
  1846. iovp_shift = ffs(page_size) - 1;
  1847. break;
  1848. default:
  1849. printk("%s: unknown/unsupported iommu page size %ld\n",
  1850. __func__, page_size);
  1851. }
  1852. return 1;
  1853. }
  1854. __setup("sbapagesize=",sba_page_override);
  1855. EXPORT_SYMBOL(sba_dma_mapping_error);
  1856. EXPORT_SYMBOL(sba_dma_supported);
  1857. EXPORT_SYMBOL(sba_alloc_coherent);
  1858. EXPORT_SYMBOL(sba_free_coherent);