sba_iommu.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129
  1. /*
  2. ** IA64 System Bus Adapter (SBA) I/O MMU manager
  3. **
  4. ** (c) Copyright 2002-2005 Alex Williamson
  5. ** (c) Copyright 2002-2003 Grant Grundler
  6. ** (c) Copyright 2002-2005 Hewlett-Packard Company
  7. **
  8. ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
  9. ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
  10. **
  11. ** This program is free software; you can redistribute it and/or modify
  12. ** it under the terms of the GNU General Public License as published by
  13. ** the Free Software Foundation; either version 2 of the License, or
  14. ** (at your option) any later version.
  15. **
  16. **
  17. ** This module initializes the IOC (I/O Controller) found on HP
  18. ** McKinley machines and their successors.
  19. **
  20. */
  21. #include <linux/types.h>
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/slab.h>
  26. #include <linux/init.h>
  27. #include <linux/mm.h>
  28. #include <linux/string.h>
  29. #include <linux/pci.h>
  30. #include <linux/proc_fs.h>
  31. #include <linux/seq_file.h>
  32. #include <linux/acpi.h>
  33. #include <linux/efi.h>
  34. #include <linux/nodemask.h>
  35. #include <linux/bitops.h> /* hweight64() */
  36. #include <asm/delay.h> /* ia64_get_itc() */
  37. #include <asm/io.h>
  38. #include <asm/page.h> /* PAGE_OFFSET */
  39. #include <asm/dma.h>
  40. #include <asm/system.h> /* wmb() */
  41. #include <asm/acpi-ext.h>
  42. #define PFX "IOC: "
  43. /*
  44. ** Enabling timing search of the pdir resource map. Output in /proc.
  45. ** Disabled by default to optimize performance.
  46. */
  47. #undef PDIR_SEARCH_TIMING
  48. /*
  49. ** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
  50. ** not defined, all DMA will be 32bit and go through the TLB.
  51. ** There's potentially a conflict in the bio merge code with us
  52. ** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
  53. ** appears to give more performance than bio-level virtual merging, we'll
  54. ** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to
  55. ** completely restrict DMA to the IOMMU.
  56. */
  57. #define ALLOW_IOV_BYPASS
  58. /*
  59. ** This option specifically allows/disallows bypassing scatterlists with
  60. ** multiple entries. Coalescing these entries can allow better DMA streaming
  61. ** and in some cases shows better performance than entirely bypassing the
  62. ** IOMMU. Performance increase on the order of 1-2% sequential output/input
  63. ** using bonnie++ on a RAID0 MD device (sym2 & mpt).
  64. */
  65. #undef ALLOW_IOV_BYPASS_SG
  66. /*
  67. ** If a device prefetches beyond the end of a valid pdir entry, it will cause
  68. ** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
  69. ** disconnect on 4k boundaries and prevent such issues. If the device is
  70. ** particularly aggressive, this option will keep the entire pdir valid such
  71. ** that prefetching will hit a valid address. This could severely impact
  72. ** error containment, and is therefore off by default. The page that is
  73. ** used for spill-over is poisoned, so that should help debugging somewhat.
  74. */
  75. #undef FULL_VALID_PDIR
  76. #define ENABLE_MARK_CLEAN
  77. /*
  78. ** The number of debug flags is a clue - this code is fragile. NOTE: since
  79. ** tightening the use of res_lock the resource bitmap and actual pdir are no
  80. ** longer guaranteed to stay in sync. The sanity checking code isn't going to
  81. ** like that.
  82. */
  83. #undef DEBUG_SBA_INIT
  84. #undef DEBUG_SBA_RUN
  85. #undef DEBUG_SBA_RUN_SG
  86. #undef DEBUG_SBA_RESOURCE
  87. #undef ASSERT_PDIR_SANITY
  88. #undef DEBUG_LARGE_SG_ENTRIES
  89. #undef DEBUG_BYPASS
  90. #if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
  91. #error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
  92. #endif
  93. #define SBA_INLINE __inline__
  94. /* #define SBA_INLINE */
  95. #ifdef DEBUG_SBA_INIT
  96. #define DBG_INIT(x...) printk(x)
  97. #else
  98. #define DBG_INIT(x...)
  99. #endif
  100. #ifdef DEBUG_SBA_RUN
  101. #define DBG_RUN(x...) printk(x)
  102. #else
  103. #define DBG_RUN(x...)
  104. #endif
  105. #ifdef DEBUG_SBA_RUN_SG
  106. #define DBG_RUN_SG(x...) printk(x)
  107. #else
  108. #define DBG_RUN_SG(x...)
  109. #endif
  110. #ifdef DEBUG_SBA_RESOURCE
  111. #define DBG_RES(x...) printk(x)
  112. #else
  113. #define DBG_RES(x...)
  114. #endif
  115. #ifdef DEBUG_BYPASS
  116. #define DBG_BYPASS(x...) printk(x)
  117. #else
  118. #define DBG_BYPASS(x...)
  119. #endif
  120. #ifdef ASSERT_PDIR_SANITY
  121. #define ASSERT(expr) \
  122. if(!(expr)) { \
  123. printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
  124. panic(#expr); \
  125. }
  126. #else
  127. #define ASSERT(expr)
  128. #endif
  129. /*
  130. ** The number of pdir entries to "free" before issuing
  131. ** a read to PCOM register to flush out PCOM writes.
  132. ** Interacts with allocation granularity (ie 4 or 8 entries
  133. ** allocated and free'd/purged at a time might make this
  134. ** less interesting).
  135. */
  136. #define DELAYED_RESOURCE_CNT 64
  137. #define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec
  138. #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
  139. #define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
  140. #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
  141. #define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
  142. #define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
  143. #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
  144. #define IOC_FUNC_ID 0x000
  145. #define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
  146. #define IOC_IBASE 0x300 /* IO TLB */
  147. #define IOC_IMASK 0x308
  148. #define IOC_PCOM 0x310
  149. #define IOC_TCNFG 0x318
  150. #define IOC_PDIR_BASE 0x320
  151. #define IOC_ROPE0_CFG 0x500
  152. #define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
  153. /* AGP GART driver looks for this */
  154. #define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
  155. /*
  156. ** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
  157. **
  158. ** Some IOCs (sx1000) can run at the above pages sizes, but are
  159. ** really only supported using the IOC at a 4k page size.
  160. **
  161. ** iovp_size could only be greater than PAGE_SIZE if we are
  162. ** confident the drivers really only touch the next physical
  163. ** page iff that driver instance owns it.
  164. */
  165. static unsigned long iovp_size;
  166. static unsigned long iovp_shift;
  167. static unsigned long iovp_mask;
  168. struct ioc {
  169. void __iomem *ioc_hpa; /* I/O MMU base address */
  170. char *res_map; /* resource map, bit == pdir entry */
  171. u64 *pdir_base; /* physical base address */
  172. unsigned long ibase; /* pdir IOV Space base */
  173. unsigned long imask; /* pdir IOV Space mask */
  174. unsigned long *res_hint; /* next avail IOVP - circular search */
  175. unsigned long dma_mask;
  176. spinlock_t res_lock; /* protects the resource bitmap, but must be held when */
  177. /* clearing pdir to prevent races with allocations. */
  178. unsigned int res_bitshift; /* from the RIGHT! */
  179. unsigned int res_size; /* size of resource map in bytes */
  180. #ifdef CONFIG_NUMA
  181. unsigned int node; /* node where this IOC lives */
  182. #endif
  183. #if DELAYED_RESOURCE_CNT > 0
  184. spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */
  185. /* than res_lock for bigger systems. */
  186. int saved_cnt;
  187. struct sba_dma_pair {
  188. dma_addr_t iova;
  189. size_t size;
  190. } saved[DELAYED_RESOURCE_CNT];
  191. #endif
  192. #ifdef PDIR_SEARCH_TIMING
  193. #define SBA_SEARCH_SAMPLE 0x100
  194. unsigned long avg_search[SBA_SEARCH_SAMPLE];
  195. unsigned long avg_idx; /* current index into avg_search */
  196. #endif
  197. /* Stuff we don't need in performance path */
  198. struct ioc *next; /* list of IOC's in system */
  199. acpi_handle handle; /* for multiple IOC's */
  200. const char *name;
  201. unsigned int func_id;
  202. unsigned int rev; /* HW revision of chip */
  203. u32 iov_size;
  204. unsigned int pdir_size; /* in bytes, determined by IOV Space size */
  205. struct pci_dev *sac_only_dev;
  206. };
  207. static struct ioc *ioc_list;
  208. static int reserve_sba_gart = 1;
  209. static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
  210. static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
  211. #define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset)
  212. #ifdef FULL_VALID_PDIR
  213. static u64 prefetch_spill_page;
  214. #endif
  215. #ifdef CONFIG_PCI
  216. # define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \
  217. ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
  218. #else
  219. # define GET_IOC(dev) NULL
  220. #endif
  221. /*
  222. ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
  223. ** (or rather not merge) DMAs into manageable chunks.
  224. ** On parisc, this is more of the software/tuning constraint
  225. ** rather than the HW. I/O MMU allocation algorithms can be
  226. ** faster with smaller sizes (to some degree).
  227. */
  228. #define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
  229. #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
  230. /************************************
  231. ** SBA register read and write support
  232. **
  233. ** BE WARNED: register writes are posted.
  234. ** (ie follow writes which must reach HW with a read)
  235. **
  236. */
  237. #define READ_REG(addr) __raw_readq(addr)
  238. #define WRITE_REG(val, addr) __raw_writeq(val, addr)
  239. #ifdef DEBUG_SBA_INIT
  240. /**
  241. * sba_dump_tlb - debugging only - print IOMMU operating parameters
  242. * @hpa: base address of the IOMMU
  243. *
  244. * Print the size/location of the IO MMU PDIR.
  245. */
  246. static void
  247. sba_dump_tlb(char *hpa)
  248. {
  249. DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
  250. DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE));
  251. DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK));
  252. DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG));
  253. DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
  254. DBG_INIT("\n");
  255. }
  256. #endif
  257. #ifdef ASSERT_PDIR_SANITY
  258. /**
  259. * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
  260. * @ioc: IO MMU structure which owns the pdir we are interested in.
  261. * @msg: text to print ont the output line.
  262. * @pide: pdir index.
  263. *
  264. * Print one entry of the IO MMU PDIR in human readable form.
  265. */
  266. static void
  267. sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
  268. {
  269. /* start printing from lowest pde in rval */
  270. u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)];
  271. unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
  272. uint rcnt;
  273. printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
  274. msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
  275. rcnt = 0;
  276. while (rcnt < BITS_PER_LONG) {
  277. printk(KERN_DEBUG "%s %2d %p %016Lx\n",
  278. (rcnt == (pide & (BITS_PER_LONG - 1)))
  279. ? " -->" : " ",
  280. rcnt, ptr, (unsigned long long) *ptr );
  281. rcnt++;
  282. ptr++;
  283. }
  284. printk(KERN_DEBUG "%s", msg);
  285. }
  286. /**
  287. * sba_check_pdir - debugging only - consistency checker
  288. * @ioc: IO MMU structure which owns the pdir we are interested in.
  289. * @msg: text to print ont the output line.
  290. *
  291. * Verify the resource map and pdir state is consistent
  292. */
  293. static int
  294. sba_check_pdir(struct ioc *ioc, char *msg)
  295. {
  296. u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
  297. u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */
  298. u64 *pptr = ioc->pdir_base; /* pdir ptr */
  299. uint pide = 0;
  300. while (rptr < rptr_end) {
  301. u64 rval;
  302. int rcnt; /* number of bits we might check */
  303. rval = *rptr;
  304. rcnt = 64;
  305. while (rcnt) {
  306. /* Get last byte and highest bit from that */
  307. u32 pde = ((u32)((*pptr >> (63)) & 0x1));
  308. if ((rval & 0x1) ^ pde)
  309. {
  310. /*
  311. ** BUMMER! -- res_map != pdir --
  312. ** Dump rval and matching pdir entries
  313. */
  314. sba_dump_pdir_entry(ioc, msg, pide);
  315. return(1);
  316. }
  317. rcnt--;
  318. rval >>= 1; /* try the next bit */
  319. pptr++;
  320. pide++;
  321. }
  322. rptr++; /* look at next word of res_map */
  323. }
  324. /* It'd be nice if we always got here :^) */
  325. return 0;
  326. }
  327. /**
  328. * sba_dump_sg - debugging only - print Scatter-Gather list
  329. * @ioc: IO MMU structure which owns the pdir we are interested in.
  330. * @startsg: head of the SG list
  331. * @nents: number of entries in SG list
  332. *
  333. * print the SG list so we can verify it's correct by hand.
  334. */
  335. static void
  336. sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
  337. {
  338. while (nents-- > 0) {
  339. printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
  340. startsg->dma_address, startsg->dma_length,
  341. sba_sg_address(startsg));
  342. startsg++;
  343. }
  344. }
  345. static void
  346. sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
  347. {
  348. struct scatterlist *the_sg = startsg;
  349. int the_nents = nents;
  350. while (the_nents-- > 0) {
  351. if (sba_sg_address(the_sg) == 0x0UL)
  352. sba_dump_sg(NULL, startsg, nents);
  353. the_sg++;
  354. }
  355. }
  356. #endif /* ASSERT_PDIR_SANITY */
  357. /**************************************************************
  358. *
  359. * I/O Pdir Resource Management
  360. *
  361. * Bits set in the resource map are in use.
  362. * Each bit can represent a number of pages.
  363. * LSbs represent lower addresses (IOVA's).
  364. *
  365. ***************************************************************/
  366. #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
  367. /* Convert from IOVP to IOVA and vice versa. */
  368. #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
  369. #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
  370. #define PDIR_ENTRY_SIZE sizeof(u64)
  371. #define PDIR_INDEX(iovp) ((iovp)>>iovp_shift)
  372. #define RESMAP_MASK(n) ~(~0UL << (n))
  373. #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
  374. /**
  375. * For most cases the normal get_order is sufficient, however it limits us
  376. * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
  377. * It only incurs about 1 clock cycle to use this one with the static variable
  378. * and makes the code more intuitive.
  379. */
  380. static SBA_INLINE int
  381. get_iovp_order (unsigned long size)
  382. {
  383. long double d = size - 1;
  384. long order;
  385. order = ia64_getf_exp(d);
  386. order = order - iovp_shift - 0xffff + 1;
  387. if (order < 0)
  388. order = 0;
  389. return order;
  390. }
  391. /**
  392. * sba_search_bitmap - find free space in IO PDIR resource bitmap
  393. * @ioc: IO MMU structure which owns the pdir we are interested in.
  394. * @bits_wanted: number of entries we need.
  395. * @use_hint: use res_hint to indicate where to start looking
  396. *
  397. * Find consecutive free bits in resource bitmap.
  398. * Each bit represents one entry in the IO Pdir.
  399. * Cool perf optimization: search for log2(size) bits at a time.
  400. */
  401. static SBA_INLINE unsigned long
  402. sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
  403. {
  404. unsigned long *res_ptr;
  405. unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
  406. unsigned long flags, pide = ~0UL;
  407. ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
  408. ASSERT(res_ptr < res_end);
  409. spin_lock_irqsave(&ioc->res_lock, flags);
  410. /* Allow caller to force a search through the entire resource space */
  411. if (likely(use_hint)) {
  412. res_ptr = ioc->res_hint;
  413. } else {
  414. res_ptr = (ulong *)ioc->res_map;
  415. ioc->res_bitshift = 0;
  416. }
  417. /*
  418. * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
  419. * if a TLB entry is purged while in use. sba_mark_invalid()
  420. * purges IOTLB entries in power-of-two sizes, so we also
  421. * allocate IOVA space in power-of-two sizes.
  422. */
  423. bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift);
  424. if (likely(bits_wanted == 1)) {
  425. unsigned int bitshiftcnt;
  426. for(; res_ptr < res_end ; res_ptr++) {
  427. if (likely(*res_ptr != ~0UL)) {
  428. bitshiftcnt = ffz(*res_ptr);
  429. *res_ptr |= (1UL << bitshiftcnt);
  430. pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
  431. pide <<= 3; /* convert to bit address */
  432. pide += bitshiftcnt;
  433. ioc->res_bitshift = bitshiftcnt + bits_wanted;
  434. goto found_it;
  435. }
  436. }
  437. goto not_found;
  438. }
  439. if (likely(bits_wanted <= BITS_PER_LONG/2)) {
  440. /*
  441. ** Search the resource bit map on well-aligned values.
  442. ** "o" is the alignment.
  443. ** We need the alignment to invalidate I/O TLB using
  444. ** SBA HW features in the unmap path.
  445. */
  446. unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
  447. uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
  448. unsigned long mask, base_mask;
  449. base_mask = RESMAP_MASK(bits_wanted);
  450. mask = base_mask << bitshiftcnt;
  451. DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr);
  452. for(; res_ptr < res_end ; res_ptr++)
  453. {
  454. DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
  455. ASSERT(0 != mask);
  456. for (; mask ; mask <<= o, bitshiftcnt += o) {
  457. if(0 == ((*res_ptr) & mask)) {
  458. *res_ptr |= mask; /* mark resources busy! */
  459. pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
  460. pide <<= 3; /* convert to bit address */
  461. pide += bitshiftcnt;
  462. ioc->res_bitshift = bitshiftcnt + bits_wanted;
  463. goto found_it;
  464. }
  465. }
  466. bitshiftcnt = 0;
  467. mask = base_mask;
  468. }
  469. } else {
  470. int qwords, bits, i;
  471. unsigned long *end;
  472. qwords = bits_wanted >> 6; /* /64 */
  473. bits = bits_wanted - (qwords * BITS_PER_LONG);
  474. end = res_end - qwords;
  475. for (; res_ptr < end; res_ptr++) {
  476. for (i = 0 ; i < qwords ; i++) {
  477. if (res_ptr[i] != 0)
  478. goto next_ptr;
  479. }
  480. if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits))
  481. continue;
  482. /* Found it, mark it */
  483. for (i = 0 ; i < qwords ; i++)
  484. res_ptr[i] = ~0UL;
  485. res_ptr[i] |= RESMAP_MASK(bits);
  486. pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
  487. pide <<= 3; /* convert to bit address */
  488. res_ptr += qwords;
  489. ioc->res_bitshift = bits;
  490. goto found_it;
  491. next_ptr:
  492. ;
  493. }
  494. }
  495. not_found:
  496. prefetch(ioc->res_map);
  497. ioc->res_hint = (unsigned long *) ioc->res_map;
  498. ioc->res_bitshift = 0;
  499. spin_unlock_irqrestore(&ioc->res_lock, flags);
  500. return (pide);
  501. found_it:
  502. ioc->res_hint = res_ptr;
  503. spin_unlock_irqrestore(&ioc->res_lock, flags);
  504. return (pide);
  505. }
  506. /**
  507. * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
  508. * @ioc: IO MMU structure which owns the pdir we are interested in.
  509. * @size: number of bytes to create a mapping for
  510. *
  511. * Given a size, find consecutive unmarked and then mark those bits in the
  512. * resource bit map.
  513. */
  514. static int
  515. sba_alloc_range(struct ioc *ioc, size_t size)
  516. {
  517. unsigned int pages_needed = size >> iovp_shift;
  518. #ifdef PDIR_SEARCH_TIMING
  519. unsigned long itc_start;
  520. #endif
  521. unsigned long pide;
  522. ASSERT(pages_needed);
  523. ASSERT(0 == (size & ~iovp_mask));
  524. #ifdef PDIR_SEARCH_TIMING
  525. itc_start = ia64_get_itc();
  526. #endif
  527. /*
  528. ** "seek and ye shall find"...praying never hurts either...
  529. */
  530. pide = sba_search_bitmap(ioc, pages_needed, 1);
  531. if (unlikely(pide >= (ioc->res_size << 3))) {
  532. pide = sba_search_bitmap(ioc, pages_needed, 0);
  533. if (unlikely(pide >= (ioc->res_size << 3))) {
  534. #if DELAYED_RESOURCE_CNT > 0
  535. unsigned long flags;
  536. /*
  537. ** With delayed resource freeing, we can give this one more shot. We're
  538. ** getting close to being in trouble here, so do what we can to make this
  539. ** one count.
  540. */
  541. spin_lock_irqsave(&ioc->saved_lock, flags);
  542. if (ioc->saved_cnt > 0) {
  543. struct sba_dma_pair *d;
  544. int cnt = ioc->saved_cnt;
  545. d = &(ioc->saved[ioc->saved_cnt - 1]);
  546. spin_lock(&ioc->res_lock);
  547. while (cnt--) {
  548. sba_mark_invalid(ioc, d->iova, d->size);
  549. sba_free_range(ioc, d->iova, d->size);
  550. d--;
  551. }
  552. ioc->saved_cnt = 0;
  553. READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
  554. spin_unlock(&ioc->res_lock);
  555. }
  556. spin_unlock_irqrestore(&ioc->saved_lock, flags);
  557. pide = sba_search_bitmap(ioc, pages_needed, 0);
  558. if (unlikely(pide >= (ioc->res_size << 3)))
  559. panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
  560. ioc->ioc_hpa);
  561. #else
  562. panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
  563. ioc->ioc_hpa);
  564. #endif
  565. }
  566. }
  567. #ifdef PDIR_SEARCH_TIMING
  568. ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
  569. ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
  570. #endif
  571. prefetchw(&(ioc->pdir_base[pide]));
  572. #ifdef ASSERT_PDIR_SANITY
  573. /* verify the first enable bit is clear */
  574. if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
  575. sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
  576. }
  577. #endif
  578. DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
  579. __FUNCTION__, size, pages_needed, pide,
  580. (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
  581. ioc->res_bitshift );
  582. return (pide);
  583. }
  584. /**
  585. * sba_free_range - unmark bits in IO PDIR resource bitmap
  586. * @ioc: IO MMU structure which owns the pdir we are interested in.
  587. * @iova: IO virtual address which was previously allocated.
  588. * @size: number of bytes to create a mapping for
  589. *
  590. * clear bits in the ioc's resource map
  591. */
  592. static SBA_INLINE void
  593. sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
  594. {
  595. unsigned long iovp = SBA_IOVP(ioc, iova);
  596. unsigned int pide = PDIR_INDEX(iovp);
  597. unsigned int ridx = pide >> 3; /* convert bit to byte address */
  598. unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
  599. int bits_not_wanted = size >> iovp_shift;
  600. unsigned long m;
  601. /* Round up to power-of-two size: see AR2305 note above */
  602. bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift);
  603. for (; bits_not_wanted > 0 ; res_ptr++) {
  604. if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
  605. /* these mappings start 64bit aligned */
  606. *res_ptr = 0UL;
  607. bits_not_wanted -= BITS_PER_LONG;
  608. pide += BITS_PER_LONG;
  609. } else {
  610. /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
  611. m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
  612. bits_not_wanted = 0;
  613. DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __FUNCTION__, (uint) iova, size,
  614. bits_not_wanted, m, pide, res_ptr, *res_ptr);
  615. ASSERT(m != 0);
  616. ASSERT(bits_not_wanted);
  617. ASSERT((*res_ptr & m) == m); /* verify same bits are set */
  618. *res_ptr &= ~m;
  619. }
  620. }
  621. }
  622. /**************************************************************
  623. *
  624. * "Dynamic DMA Mapping" support (aka "Coherent I/O")
  625. *
  626. ***************************************************************/
  627. /**
  628. * sba_io_pdir_entry - fill in one IO PDIR entry
  629. * @pdir_ptr: pointer to IO PDIR entry
  630. * @vba: Virtual CPU address of buffer to map
  631. *
  632. * SBA Mapping Routine
  633. *
  634. * Given a virtual address (vba, arg1) sba_io_pdir_entry()
  635. * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
  636. * Each IO Pdir entry consists of 8 bytes as shown below
  637. * (LSB == bit 0):
  638. *
  639. * 63 40 11 7 0
  640. * +-+---------------------+----------------------------------+----+--------+
  641. * |V| U | PPN[39:12] | U | FF |
  642. * +-+---------------------+----------------------------------+----+--------+
  643. *
  644. * V == Valid Bit
  645. * U == Unused
  646. * PPN == Physical Page Number
  647. *
  648. * The physical address fields are filled with the results of virt_to_phys()
  649. * on the vba.
  650. */
  651. #if 1
  652. #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
  653. | 0x8000000000000000ULL)
  654. #else
  655. void SBA_INLINE
  656. sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
  657. {
  658. *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
  659. }
  660. #endif
  661. #ifdef ENABLE_MARK_CLEAN
  662. /**
  663. * Since DMA is i-cache coherent, any (complete) pages that were written via
  664. * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
  665. * flush them when they get mapped into an executable vm-area.
  666. */
  667. static void
  668. mark_clean (void *addr, size_t size)
  669. {
  670. unsigned long pg_addr, end;
  671. pg_addr = PAGE_ALIGN((unsigned long) addr);
  672. end = (unsigned long) addr + size;
  673. while (pg_addr + PAGE_SIZE <= end) {
  674. struct page *page = virt_to_page((void *)pg_addr);
  675. set_bit(PG_arch_1, &page->flags);
  676. pg_addr += PAGE_SIZE;
  677. }
  678. }
  679. #endif
  680. /**
  681. * sba_mark_invalid - invalidate one or more IO PDIR entries
  682. * @ioc: IO MMU structure which owns the pdir we are interested in.
  683. * @iova: IO Virtual Address mapped earlier
  684. * @byte_cnt: number of bytes this mapping covers.
  685. *
  686. * Marking the IO PDIR entry(ies) as Invalid and invalidate
  687. * corresponding IO TLB entry. The PCOM (Purge Command Register)
  688. * is to purge stale entries in the IO TLB when unmapping entries.
  689. *
  690. * The PCOM register supports purging of multiple pages, with a minium
  691. * of 1 page and a maximum of 2GB. Hardware requires the address be
  692. * aligned to the size of the range being purged. The size of the range
  693. * must be a power of 2. The "Cool perf optimization" in the
  694. * allocation routine helps keep that true.
  695. */
  696. static SBA_INLINE void
  697. sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
  698. {
  699. u32 iovp = (u32) SBA_IOVP(ioc,iova);
  700. int off = PDIR_INDEX(iovp);
  701. /* Must be non-zero and rounded up */
  702. ASSERT(byte_cnt > 0);
  703. ASSERT(0 == (byte_cnt & ~iovp_mask));
  704. #ifdef ASSERT_PDIR_SANITY
  705. /* Assert first pdir entry is set */
  706. if (!(ioc->pdir_base[off] >> 60)) {
  707. sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
  708. }
  709. #endif
  710. if (byte_cnt <= iovp_size)
  711. {
  712. ASSERT(off < ioc->pdir_size);
  713. iovp |= iovp_shift; /* set "size" field for PCOM */
  714. #ifndef FULL_VALID_PDIR
  715. /*
  716. ** clear I/O PDIR entry "valid" bit
  717. ** Do NOT clear the rest - save it for debugging.
  718. ** We should only clear bits that have previously
  719. ** been enabled.
  720. */
  721. ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
  722. #else
  723. /*
  724. ** If we want to maintain the PDIR as valid, put in
  725. ** the spill page so devices prefetching won't
  726. ** cause a hard fail.
  727. */
  728. ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
  729. #endif
  730. } else {
  731. u32 t = get_iovp_order(byte_cnt) + iovp_shift;
  732. iovp |= t;
  733. ASSERT(t <= 31); /* 2GB! Max value of "size" field */
  734. do {
  735. /* verify this pdir entry is enabled */
  736. ASSERT(ioc->pdir_base[off] >> 63);
  737. #ifndef FULL_VALID_PDIR
  738. /* clear I/O Pdir entry "valid" bit first */
  739. ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
  740. #else
  741. ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
  742. #endif
  743. off++;
  744. byte_cnt -= iovp_size;
  745. } while (byte_cnt > 0);
  746. }
  747. WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
  748. }
  749. /**
  750. * sba_map_single - map one buffer and return IOVA for DMA
  751. * @dev: instance of PCI owned by the driver that's asking.
  752. * @addr: driver buffer to map.
  753. * @size: number of bytes to map in driver buffer.
  754. * @dir: R/W or both.
  755. *
  756. * See Documentation/DMA-mapping.txt
  757. */
  758. dma_addr_t
  759. sba_map_single(struct device *dev, void *addr, size_t size, int dir)
  760. {
  761. struct ioc *ioc;
  762. dma_addr_t iovp;
  763. dma_addr_t offset;
  764. u64 *pdir_start;
  765. int pide;
  766. #ifdef ASSERT_PDIR_SANITY
  767. unsigned long flags;
  768. #endif
  769. #ifdef ALLOW_IOV_BYPASS
  770. unsigned long pci_addr = virt_to_phys(addr);
  771. #endif
  772. #ifdef ALLOW_IOV_BYPASS
  773. ASSERT(to_pci_dev(dev)->dma_mask);
  774. /*
  775. ** Check if the PCI device can DMA to ptr... if so, just return ptr
  776. */
  777. if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
  778. /*
  779. ** Device is bit capable of DMA'ing to the buffer...
  780. ** just return the PCI address of ptr
  781. */
  782. DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n",
  783. to_pci_dev(dev)->dma_mask, pci_addr);
  784. return pci_addr;
  785. }
  786. #endif
  787. ioc = GET_IOC(dev);
  788. ASSERT(ioc);
  789. prefetch(ioc->res_hint);
  790. ASSERT(size > 0);
  791. ASSERT(size <= DMA_CHUNK_SIZE);
  792. /* save offset bits */
  793. offset = ((dma_addr_t) (long) addr) & ~iovp_mask;
  794. /* round up to nearest iovp_size */
  795. size = (size + offset + ~iovp_mask) & iovp_mask;
  796. #ifdef ASSERT_PDIR_SANITY
  797. spin_lock_irqsave(&ioc->res_lock, flags);
  798. if (sba_check_pdir(ioc,"Check before sba_map_single()"))
  799. panic("Sanity check failed");
  800. spin_unlock_irqrestore(&ioc->res_lock, flags);
  801. #endif
  802. pide = sba_alloc_range(ioc, size);
  803. iovp = (dma_addr_t) pide << iovp_shift;
  804. DBG_RUN("%s() 0x%p -> 0x%lx\n",
  805. __FUNCTION__, addr, (long) iovp | offset);
  806. pdir_start = &(ioc->pdir_base[pide]);
  807. while (size > 0) {
  808. ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
  809. sba_io_pdir_entry(pdir_start, (unsigned long) addr);
  810. DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
  811. addr += iovp_size;
  812. size -= iovp_size;
  813. pdir_start++;
  814. }
  815. /* force pdir update */
  816. wmb();
  817. /* form complete address */
  818. #ifdef ASSERT_PDIR_SANITY
  819. spin_lock_irqsave(&ioc->res_lock, flags);
  820. sba_check_pdir(ioc,"Check after sba_map_single()");
  821. spin_unlock_irqrestore(&ioc->res_lock, flags);
  822. #endif
  823. return SBA_IOVA(ioc, iovp, offset);
  824. }
  825. #ifdef ENABLE_MARK_CLEAN
  826. static SBA_INLINE void
  827. sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
  828. {
  829. u32 iovp = (u32) SBA_IOVP(ioc,iova);
  830. int off = PDIR_INDEX(iovp);
  831. void *addr;
  832. if (size <= iovp_size) {
  833. addr = phys_to_virt(ioc->pdir_base[off] &
  834. ~0xE000000000000FFFULL);
  835. mark_clean(addr, size);
  836. } else {
  837. do {
  838. addr = phys_to_virt(ioc->pdir_base[off] &
  839. ~0xE000000000000FFFULL);
  840. mark_clean(addr, min(size, iovp_size));
  841. off++;
  842. size -= iovp_size;
  843. } while (size > 0);
  844. }
  845. }
  846. #endif
  847. /**
  848. * sba_unmap_single - unmap one IOVA and free resources
  849. * @dev: instance of PCI owned by the driver that's asking.
  850. * @iova: IOVA of driver buffer previously mapped.
  851. * @size: number of bytes mapped in driver buffer.
  852. * @dir: R/W or both.
  853. *
  854. * See Documentation/DMA-mapping.txt
  855. */
  856. void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
  857. {
  858. struct ioc *ioc;
  859. #if DELAYED_RESOURCE_CNT > 0
  860. struct sba_dma_pair *d;
  861. #endif
  862. unsigned long flags;
  863. dma_addr_t offset;
  864. ioc = GET_IOC(dev);
  865. ASSERT(ioc);
  866. #ifdef ALLOW_IOV_BYPASS
  867. if (likely((iova & ioc->imask) != ioc->ibase)) {
  868. /*
  869. ** Address does not fall w/in IOVA, must be bypassing
  870. */
  871. DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova);
  872. #ifdef ENABLE_MARK_CLEAN
  873. if (dir == DMA_FROM_DEVICE) {
  874. mark_clean(phys_to_virt(iova), size);
  875. }
  876. #endif
  877. return;
  878. }
  879. #endif
  880. offset = iova & ~iovp_mask;
  881. DBG_RUN("%s() iovp 0x%lx/%x\n",
  882. __FUNCTION__, (long) iova, size);
  883. iova ^= offset; /* clear offset bits */
  884. size += offset;
  885. size = ROUNDUP(size, iovp_size);
  886. #ifdef ENABLE_MARK_CLEAN
  887. if (dir == DMA_FROM_DEVICE)
  888. sba_mark_clean(ioc, iova, size);
  889. #endif
  890. #if DELAYED_RESOURCE_CNT > 0
  891. spin_lock_irqsave(&ioc->saved_lock, flags);
  892. d = &(ioc->saved[ioc->saved_cnt]);
  893. d->iova = iova;
  894. d->size = size;
  895. if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
  896. int cnt = ioc->saved_cnt;
  897. spin_lock(&ioc->res_lock);
  898. while (cnt--) {
  899. sba_mark_invalid(ioc, d->iova, d->size);
  900. sba_free_range(ioc, d->iova, d->size);
  901. d--;
  902. }
  903. ioc->saved_cnt = 0;
  904. READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
  905. spin_unlock(&ioc->res_lock);
  906. }
  907. spin_unlock_irqrestore(&ioc->saved_lock, flags);
  908. #else /* DELAYED_RESOURCE_CNT == 0 */
  909. spin_lock_irqsave(&ioc->res_lock, flags);
  910. sba_mark_invalid(ioc, iova, size);
  911. sba_free_range(ioc, iova, size);
  912. READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
  913. spin_unlock_irqrestore(&ioc->res_lock, flags);
  914. #endif /* DELAYED_RESOURCE_CNT == 0 */
  915. }
  916. /**
  917. * sba_alloc_coherent - allocate/map shared mem for DMA
  918. * @dev: instance of PCI owned by the driver that's asking.
  919. * @size: number of bytes mapped in driver buffer.
  920. * @dma_handle: IOVA of new buffer.
  921. *
  922. * See Documentation/DMA-mapping.txt
  923. */
  924. void *
  925. sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
  926. {
  927. struct ioc *ioc;
  928. void *addr;
  929. ioc = GET_IOC(dev);
  930. ASSERT(ioc);
  931. #ifdef CONFIG_NUMA
  932. {
  933. struct page *page;
  934. page = alloc_pages_node(ioc->node == MAX_NUMNODES ?
  935. numa_node_id() : ioc->node, flags,
  936. get_order(size));
  937. if (unlikely(!page))
  938. return NULL;
  939. addr = page_address(page);
  940. }
  941. #else
  942. addr = (void *) __get_free_pages(flags, get_order(size));
  943. #endif
  944. if (unlikely(!addr))
  945. return NULL;
  946. memset(addr, 0, size);
  947. *dma_handle = virt_to_phys(addr);
  948. #ifdef ALLOW_IOV_BYPASS
  949. ASSERT(dev->coherent_dma_mask);
  950. /*
  951. ** Check if the PCI device can DMA to ptr... if so, just return ptr
  952. */
  953. if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {
  954. DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
  955. dev->coherent_dma_mask, *dma_handle);
  956. return addr;
  957. }
  958. #endif
  959. /*
  960. * If device can't bypass or bypass is disabled, pass the 32bit fake
  961. * device to map single to get an iova mapping.
  962. */
  963. *dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0);
  964. return addr;
  965. }
  966. /**
  967. * sba_free_coherent - free/unmap shared mem for DMA
  968. * @dev: instance of PCI owned by the driver that's asking.
  969. * @size: number of bytes mapped in driver buffer.
  970. * @vaddr: virtual address IOVA of "consistent" buffer.
  971. * @dma_handler: IO virtual address of "consistent" buffer.
  972. *
  973. * See Documentation/DMA-mapping.txt
  974. */
  975. void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
  976. {
  977. sba_unmap_single(dev, dma_handle, size, 0);
  978. free_pages((unsigned long) vaddr, get_order(size));
  979. }
  980. /*
  981. ** Since 0 is a valid pdir_base index value, can't use that
  982. ** to determine if a value is valid or not. Use a flag to indicate
  983. ** the SG list entry contains a valid pdir index.
  984. */
  985. #define PIDE_FLAG 0x1UL
  986. #ifdef DEBUG_LARGE_SG_ENTRIES
  987. int dump_run_sg = 0;
  988. #endif
  989. /**
  990. * sba_fill_pdir - write allocated SG entries into IO PDIR
  991. * @ioc: IO MMU structure which owns the pdir we are interested in.
  992. * @startsg: list of IOVA/size pairs
  993. * @nents: number of entries in startsg list
  994. *
  995. * Take preprocessed SG list and write corresponding entries
  996. * in the IO PDIR.
  997. */
  998. static SBA_INLINE int
  999. sba_fill_pdir(
  1000. struct ioc *ioc,
  1001. struct scatterlist *startsg,
  1002. int nents)
  1003. {
  1004. struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
  1005. int n_mappings = 0;
  1006. u64 *pdirp = NULL;
  1007. unsigned long dma_offset = 0;
  1008. dma_sg--;
  1009. while (nents-- > 0) {
  1010. int cnt = startsg->dma_length;
  1011. startsg->dma_length = 0;
  1012. #ifdef DEBUG_LARGE_SG_ENTRIES
  1013. if (dump_run_sg)
  1014. printk(" %2d : %08lx/%05x %p\n",
  1015. nents, startsg->dma_address, cnt,
  1016. sba_sg_address(startsg));
  1017. #else
  1018. DBG_RUN_SG(" %d : %08lx/%05x %p\n",
  1019. nents, startsg->dma_address, cnt,
  1020. sba_sg_address(startsg));
  1021. #endif
  1022. /*
  1023. ** Look for the start of a new DMA stream
  1024. */
  1025. if (startsg->dma_address & PIDE_FLAG) {
  1026. u32 pide = startsg->dma_address & ~PIDE_FLAG;
  1027. dma_offset = (unsigned long) pide & ~iovp_mask;
  1028. startsg->dma_address = 0;
  1029. dma_sg++;
  1030. dma_sg->dma_address = pide | ioc->ibase;
  1031. pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
  1032. n_mappings++;
  1033. }
  1034. /*
  1035. ** Look for a VCONTIG chunk
  1036. */
  1037. if (cnt) {
  1038. unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
  1039. ASSERT(pdirp);
  1040. /* Since multiple Vcontig blocks could make up
  1041. ** one DMA stream, *add* cnt to dma_len.
  1042. */
  1043. dma_sg->dma_length += cnt;
  1044. cnt += dma_offset;
  1045. dma_offset=0; /* only want offset on first chunk */
  1046. cnt = ROUNDUP(cnt, iovp_size);
  1047. do {
  1048. sba_io_pdir_entry(pdirp, vaddr);
  1049. vaddr += iovp_size;
  1050. cnt -= iovp_size;
  1051. pdirp++;
  1052. } while (cnt > 0);
  1053. }
  1054. startsg++;
  1055. }
  1056. /* force pdir update */
  1057. wmb();
  1058. #ifdef DEBUG_LARGE_SG_ENTRIES
  1059. dump_run_sg = 0;
  1060. #endif
  1061. return(n_mappings);
  1062. }
  1063. /*
  1064. ** Two address ranges are DMA contiguous *iff* "end of prev" and
  1065. ** "start of next" are both on an IOV page boundary.
  1066. **
  1067. ** (shift left is a quick trick to mask off upper bits)
  1068. */
  1069. #define DMA_CONTIG(__X, __Y) \
  1070. (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
  1071. /**
  1072. * sba_coalesce_chunks - preprocess the SG list
  1073. * @ioc: IO MMU structure which owns the pdir we are interested in.
  1074. * @startsg: list of IOVA/size pairs
  1075. * @nents: number of entries in startsg list
  1076. *
  1077. * First pass is to walk the SG list and determine where the breaks are
  1078. * in the DMA stream. Allocates PDIR entries but does not fill them.
  1079. * Returns the number of DMA chunks.
  1080. *
  1081. * Doing the fill separate from the coalescing/allocation keeps the
  1082. * code simpler. Future enhancement could make one pass through
  1083. * the sglist do both.
  1084. */
  1085. static SBA_INLINE int
  1086. sba_coalesce_chunks( struct ioc *ioc,
  1087. struct scatterlist *startsg,
  1088. int nents)
  1089. {
  1090. struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
  1091. unsigned long vcontig_len; /* len of VCONTIG chunk */
  1092. unsigned long vcontig_end;
  1093. struct scatterlist *dma_sg; /* next DMA stream head */
  1094. unsigned long dma_offset, dma_len; /* start/len of DMA stream */
  1095. int n_mappings = 0;
  1096. while (nents > 0) {
  1097. unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
  1098. /*
  1099. ** Prepare for first/next DMA stream
  1100. */
  1101. dma_sg = vcontig_sg = startsg;
  1102. dma_len = vcontig_len = vcontig_end = startsg->length;
  1103. vcontig_end += vaddr;
  1104. dma_offset = vaddr & ~iovp_mask;
  1105. /* PARANOID: clear entries */
  1106. startsg->dma_address = startsg->dma_length = 0;
  1107. /*
  1108. ** This loop terminates one iteration "early" since
  1109. ** it's always looking one "ahead".
  1110. */
  1111. while (--nents > 0) {
  1112. unsigned long vaddr; /* tmp */
  1113. startsg++;
  1114. /* PARANOID */
  1115. startsg->dma_address = startsg->dma_length = 0;
  1116. /* catch brokenness in SCSI layer */
  1117. ASSERT(startsg->length <= DMA_CHUNK_SIZE);
  1118. /*
  1119. ** First make sure current dma stream won't
  1120. ** exceed DMA_CHUNK_SIZE if we coalesce the
  1121. ** next entry.
  1122. */
  1123. if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)
  1124. > DMA_CHUNK_SIZE)
  1125. break;
  1126. /*
  1127. ** Then look for virtually contiguous blocks.
  1128. **
  1129. ** append the next transaction?
  1130. */
  1131. vaddr = (unsigned long) sba_sg_address(startsg);
  1132. if (vcontig_end == vaddr)
  1133. {
  1134. vcontig_len += startsg->length;
  1135. vcontig_end += startsg->length;
  1136. dma_len += startsg->length;
  1137. continue;
  1138. }
  1139. #ifdef DEBUG_LARGE_SG_ENTRIES
  1140. dump_run_sg = (vcontig_len > iovp_size);
  1141. #endif
  1142. /*
  1143. ** Not virtually contigous.
  1144. ** Terminate prev chunk.
  1145. ** Start a new chunk.
  1146. **
  1147. ** Once we start a new VCONTIG chunk, dma_offset
  1148. ** can't change. And we need the offset from the first
  1149. ** chunk - not the last one. Ergo Successive chunks
  1150. ** must start on page boundaries and dove tail
  1151. ** with it's predecessor.
  1152. */
  1153. vcontig_sg->dma_length = vcontig_len;
  1154. vcontig_sg = startsg;
  1155. vcontig_len = startsg->length;
  1156. /*
  1157. ** 3) do the entries end/start on page boundaries?
  1158. ** Don't update vcontig_end until we've checked.
  1159. */
  1160. if (DMA_CONTIG(vcontig_end, vaddr))
  1161. {
  1162. vcontig_end = vcontig_len + vaddr;
  1163. dma_len += vcontig_len;
  1164. continue;
  1165. } else {
  1166. break;
  1167. }
  1168. }
  1169. /*
  1170. ** End of DMA Stream
  1171. ** Terminate last VCONTIG block.
  1172. ** Allocate space for DMA stream.
  1173. */
  1174. vcontig_sg->dma_length = vcontig_len;
  1175. dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
  1176. ASSERT(dma_len <= DMA_CHUNK_SIZE);
  1177. dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG
  1178. | (sba_alloc_range(ioc, dma_len) << iovp_shift)
  1179. | dma_offset);
  1180. n_mappings++;
  1181. }
  1182. return n_mappings;
  1183. }
  1184. /**
  1185. * sba_map_sg - map Scatter/Gather list
  1186. * @dev: instance of PCI owned by the driver that's asking.
  1187. * @sglist: array of buffer/length pairs
  1188. * @nents: number of entries in list
  1189. * @dir: R/W or both.
  1190. *
  1191. * See Documentation/DMA-mapping.txt
  1192. */
  1193. int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir)
  1194. {
  1195. struct ioc *ioc;
  1196. int coalesced, filled = 0;
  1197. #ifdef ASSERT_PDIR_SANITY
  1198. unsigned long flags;
  1199. #endif
  1200. #ifdef ALLOW_IOV_BYPASS_SG
  1201. struct scatterlist *sg;
  1202. #endif
  1203. DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);
  1204. ioc = GET_IOC(dev);
  1205. ASSERT(ioc);
  1206. #ifdef ALLOW_IOV_BYPASS_SG
  1207. ASSERT(to_pci_dev(dev)->dma_mask);
  1208. if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
  1209. for (sg = sglist ; filled < nents ; filled++, sg++){
  1210. sg->dma_length = sg->length;
  1211. sg->dma_address = virt_to_phys(sba_sg_address(sg));
  1212. }
  1213. return filled;
  1214. }
  1215. #endif
  1216. /* Fast path single entry scatterlists. */
  1217. if (nents == 1) {
  1218. sglist->dma_length = sglist->length;
  1219. sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length, dir);
  1220. return 1;
  1221. }
  1222. #ifdef ASSERT_PDIR_SANITY
  1223. spin_lock_irqsave(&ioc->res_lock, flags);
  1224. if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
  1225. {
  1226. sba_dump_sg(ioc, sglist, nents);
  1227. panic("Check before sba_map_sg()");
  1228. }
  1229. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1230. #endif
  1231. prefetch(ioc->res_hint);
  1232. /*
  1233. ** First coalesce the chunks and allocate I/O pdir space
  1234. **
  1235. ** If this is one DMA stream, we can properly map using the
  1236. ** correct virtual address associated with each DMA page.
  1237. ** w/o this association, we wouldn't have coherent DMA!
  1238. ** Access to the virtual address is what forces a two pass algorithm.
  1239. */
  1240. coalesced = sba_coalesce_chunks(ioc, sglist, nents);
  1241. /*
  1242. ** Program the I/O Pdir
  1243. **
  1244. ** map the virtual addresses to the I/O Pdir
  1245. ** o dma_address will contain the pdir index
  1246. ** o dma_len will contain the number of bytes to map
  1247. ** o address contains the virtual address.
  1248. */
  1249. filled = sba_fill_pdir(ioc, sglist, nents);
  1250. #ifdef ASSERT_PDIR_SANITY
  1251. spin_lock_irqsave(&ioc->res_lock, flags);
  1252. if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
  1253. {
  1254. sba_dump_sg(ioc, sglist, nents);
  1255. panic("Check after sba_map_sg()\n");
  1256. }
  1257. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1258. #endif
  1259. ASSERT(coalesced == filled);
  1260. DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);
  1261. return filled;
  1262. }
  1263. /**
  1264. * sba_unmap_sg - unmap Scatter/Gather list
  1265. * @dev: instance of PCI owned by the driver that's asking.
  1266. * @sglist: array of buffer/length pairs
  1267. * @nents: number of entries in list
  1268. * @dir: R/W or both.
  1269. *
  1270. * See Documentation/DMA-mapping.txt
  1271. */
  1272. void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
  1273. {
  1274. #ifdef ASSERT_PDIR_SANITY
  1275. struct ioc *ioc;
  1276. unsigned long flags;
  1277. #endif
  1278. DBG_RUN_SG("%s() START %d entries, %p,%x\n",
  1279. __FUNCTION__, nents, sba_sg_address(sglist), sglist->length);
  1280. #ifdef ASSERT_PDIR_SANITY
  1281. ioc = GET_IOC(dev);
  1282. ASSERT(ioc);
  1283. spin_lock_irqsave(&ioc->res_lock, flags);
  1284. sba_check_pdir(ioc,"Check before sba_unmap_sg()");
  1285. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1286. #endif
  1287. while (nents && sglist->dma_length) {
  1288. sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
  1289. sglist++;
  1290. nents--;
  1291. }
  1292. DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents);
  1293. #ifdef ASSERT_PDIR_SANITY
  1294. spin_lock_irqsave(&ioc->res_lock, flags);
  1295. sba_check_pdir(ioc,"Check after sba_unmap_sg()");
  1296. spin_unlock_irqrestore(&ioc->res_lock, flags);
  1297. #endif
  1298. }
  1299. /**************************************************************
  1300. *
  1301. * Initialization and claim
  1302. *
  1303. ***************************************************************/
  1304. static void __init
  1305. ioc_iova_init(struct ioc *ioc)
  1306. {
  1307. int tcnfg;
  1308. int agp_found = 0;
  1309. struct pci_dev *device = NULL;
  1310. #ifdef FULL_VALID_PDIR
  1311. unsigned long index;
  1312. #endif
  1313. /*
  1314. ** Firmware programs the base and size of a "safe IOVA space"
  1315. ** (one that doesn't overlap memory or LMMIO space) in the
  1316. ** IBASE and IMASK registers.
  1317. */
  1318. ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
  1319. ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
  1320. ioc->iov_size = ~ioc->imask + 1;
  1321. DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
  1322. __FUNCTION__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
  1323. ioc->iov_size >> 20);
  1324. switch (iovp_size) {
  1325. case 4*1024: tcnfg = 0; break;
  1326. case 8*1024: tcnfg = 1; break;
  1327. case 16*1024: tcnfg = 2; break;
  1328. case 64*1024: tcnfg = 3; break;
  1329. default:
  1330. panic(PFX "Unsupported IOTLB page size %ldK",
  1331. iovp_size >> 10);
  1332. break;
  1333. }
  1334. WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
  1335. ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
  1336. ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
  1337. get_order(ioc->pdir_size));
  1338. if (!ioc->pdir_base)
  1339. panic(PFX "Couldn't allocate I/O Page Table\n");
  1340. memset(ioc->pdir_base, 0, ioc->pdir_size);
  1341. DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__,
  1342. iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
  1343. ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
  1344. WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
  1345. /*
  1346. ** If an AGP device is present, only use half of the IOV space
  1347. ** for PCI DMA. Unfortunately we can't know ahead of time
  1348. ** whether GART support will actually be used, for now we
  1349. ** can just key on an AGP device found in the system.
  1350. ** We program the next pdir index after we stop w/ a key for
  1351. ** the GART code to handshake on.
  1352. */
  1353. for_each_pci_dev(device)
  1354. agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
  1355. if (agp_found && reserve_sba_gart) {
  1356. printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",
  1357. ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
  1358. ioc->pdir_size /= 2;
  1359. ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
  1360. }
  1361. #ifdef FULL_VALID_PDIR
  1362. /*
  1363. ** Check to see if the spill page has been allocated, we don't need more than
  1364. ** one across multiple SBAs.
  1365. */
  1366. if (!prefetch_spill_page) {
  1367. char *spill_poison = "SBAIOMMU POISON";
  1368. int poison_size = 16;
  1369. void *poison_addr, *addr;
  1370. addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));
  1371. if (!addr)
  1372. panic(PFX "Couldn't allocate PDIR spill page\n");
  1373. poison_addr = addr;
  1374. for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
  1375. memcpy(poison_addr, spill_poison, poison_size);
  1376. prefetch_spill_page = virt_to_phys(addr);
  1377. DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page);
  1378. }
  1379. /*
  1380. ** Set all the PDIR entries valid w/ the spill page as the target
  1381. */
  1382. for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
  1383. ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
  1384. #endif
  1385. /* Clear I/O TLB of any possible entries */
  1386. WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
  1387. READ_REG(ioc->ioc_hpa + IOC_PCOM);
  1388. /* Enable IOVA translation */
  1389. WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
  1390. READ_REG(ioc->ioc_hpa + IOC_IBASE);
  1391. }
  1392. static void __init
  1393. ioc_resource_init(struct ioc *ioc)
  1394. {
  1395. spin_lock_init(&ioc->res_lock);
  1396. #if DELAYED_RESOURCE_CNT > 0
  1397. spin_lock_init(&ioc->saved_lock);
  1398. #endif
  1399. /* resource map size dictated by pdir_size */
  1400. ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
  1401. ioc->res_size >>= 3; /* convert bit count to byte count */
  1402. DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size);
  1403. ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
  1404. get_order(ioc->res_size));
  1405. if (!ioc->res_map)
  1406. panic(PFX "Couldn't allocate resource map\n");
  1407. memset(ioc->res_map, 0, ioc->res_size);
  1408. /* next available IOVP - circular search */
  1409. ioc->res_hint = (unsigned long *) ioc->res_map;
  1410. #ifdef ASSERT_PDIR_SANITY
  1411. /* Mark first bit busy - ie no IOVA 0 */
  1412. ioc->res_map[0] = 0x1;
  1413. ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
  1414. #endif
  1415. #ifdef FULL_VALID_PDIR
  1416. /* Mark the last resource used so we don't prefetch beyond IOVA space */
  1417. ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
  1418. ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
  1419. | prefetch_spill_page);
  1420. #endif
  1421. DBG_INIT("%s() res_map %x %p\n", __FUNCTION__,
  1422. ioc->res_size, (void *) ioc->res_map);
  1423. }
  1424. static void __init
  1425. ioc_sac_init(struct ioc *ioc)
  1426. {
  1427. struct pci_dev *sac = NULL;
  1428. struct pci_controller *controller = NULL;
  1429. /*
  1430. * pci_alloc_coherent() must return a DMA address which is
  1431. * SAC (single address cycle) addressable, so allocate a
  1432. * pseudo-device to enforce that.
  1433. */
  1434. sac = kzalloc(sizeof(*sac), GFP_KERNEL);
  1435. if (!sac)
  1436. panic(PFX "Couldn't allocate struct pci_dev");
  1437. controller = kzalloc(sizeof(*controller), GFP_KERNEL);
  1438. if (!controller)
  1439. panic(PFX "Couldn't allocate struct pci_controller");
  1440. controller->iommu = ioc;
  1441. sac->sysdata = controller;
  1442. sac->dma_mask = 0xFFFFFFFFUL;
  1443. #ifdef CONFIG_PCI
  1444. sac->dev.bus = &pci_bus_type;
  1445. #endif
  1446. ioc->sac_only_dev = sac;
  1447. }
  1448. static void __init
  1449. ioc_zx1_init(struct ioc *ioc)
  1450. {
  1451. unsigned long rope_config;
  1452. unsigned int i;
  1453. if (ioc->rev < 0x20)
  1454. panic(PFX "IOC 2.0 or later required for IOMMU support\n");
  1455. /* 38 bit memory controller + extra bit for range displaced by MMIO */
  1456. ioc->dma_mask = (0x1UL << 39) - 1;
  1457. /*
  1458. ** Clear ROPE(N)_CONFIG AO bit.
  1459. ** Disables "NT Ordering" (~= !"Relaxed Ordering")
  1460. ** Overrides bit 1 in DMA Hint Sets.
  1461. ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
  1462. */
  1463. for (i=0; i<(8*8); i+=8) {
  1464. rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
  1465. rope_config &= ~IOC_ROPE_AO;
  1466. WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
  1467. }
  1468. }
  1469. typedef void (initfunc)(struct ioc *);
  1470. struct ioc_iommu {
  1471. u32 func_id;
  1472. char *name;
  1473. initfunc *init;
  1474. };
  1475. static struct ioc_iommu ioc_iommu_info[] __initdata = {
  1476. { ZX1_IOC_ID, "zx1", ioc_zx1_init },
  1477. { ZX2_IOC_ID, "zx2", NULL },
  1478. { SX1000_IOC_ID, "sx1000", NULL },
  1479. { SX2000_IOC_ID, "sx2000", NULL },
  1480. };
  1481. static struct ioc * __init
  1482. ioc_init(u64 hpa, void *handle)
  1483. {
  1484. struct ioc *ioc;
  1485. struct ioc_iommu *info;
  1486. ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
  1487. if (!ioc)
  1488. return NULL;
  1489. ioc->next = ioc_list;
  1490. ioc_list = ioc;
  1491. ioc->handle = handle;
  1492. ioc->ioc_hpa = ioremap(hpa, 0x1000);
  1493. ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
  1494. ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
  1495. ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */
  1496. for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
  1497. if (ioc->func_id == info->func_id) {
  1498. ioc->name = info->name;
  1499. if (info->init)
  1500. (info->init)(ioc);
  1501. }
  1502. }
  1503. iovp_size = (1 << iovp_shift);
  1504. iovp_mask = ~(iovp_size - 1);
  1505. DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __FUNCTION__,
  1506. PAGE_SIZE >> 10, iovp_size >> 10);
  1507. if (!ioc->name) {
  1508. ioc->name = kmalloc(24, GFP_KERNEL);
  1509. if (ioc->name)
  1510. sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
  1511. ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
  1512. else
  1513. ioc->name = "Unknown";
  1514. }
  1515. ioc_iova_init(ioc);
  1516. ioc_resource_init(ioc);
  1517. ioc_sac_init(ioc);
  1518. if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask)
  1519. ia64_max_iommu_merge_mask = ~iovp_mask;
  1520. printk(KERN_INFO PFX
  1521. "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
  1522. ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
  1523. hpa, ioc->iov_size >> 20, ioc->ibase);
  1524. return ioc;
  1525. }
  1526. /**************************************************************************
  1527. **
  1528. ** SBA initialization code (HW and SW)
  1529. **
  1530. ** o identify SBA chip itself
  1531. ** o FIXME: initialize DMA hints for reasonable defaults
  1532. **
  1533. **************************************************************************/
  1534. #ifdef CONFIG_PROC_FS
  1535. static void *
  1536. ioc_start(struct seq_file *s, loff_t *pos)
  1537. {
  1538. struct ioc *ioc;
  1539. loff_t n = *pos;
  1540. for (ioc = ioc_list; ioc; ioc = ioc->next)
  1541. if (!n--)
  1542. return ioc;
  1543. return NULL;
  1544. }
  1545. static void *
  1546. ioc_next(struct seq_file *s, void *v, loff_t *pos)
  1547. {
  1548. struct ioc *ioc = v;
  1549. ++*pos;
  1550. return ioc->next;
  1551. }
  1552. static void
  1553. ioc_stop(struct seq_file *s, void *v)
  1554. {
  1555. }
  1556. static int
  1557. ioc_show(struct seq_file *s, void *v)
  1558. {
  1559. struct ioc *ioc = v;
  1560. unsigned long *res_ptr = (unsigned long *)ioc->res_map;
  1561. int i, used = 0;
  1562. seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
  1563. ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
  1564. #ifdef CONFIG_NUMA
  1565. if (ioc->node != MAX_NUMNODES)
  1566. seq_printf(s, "NUMA node : %d\n", ioc->node);
  1567. #endif
  1568. seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
  1569. seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024);
  1570. for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
  1571. used += hweight64(*res_ptr);
  1572. seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3);
  1573. seq_printf(s, "PDIR used : %d entries\n", used);
  1574. #ifdef PDIR_SEARCH_TIMING
  1575. {
  1576. unsigned long i = 0, avg = 0, min, max;
  1577. min = max = ioc->avg_search[0];
  1578. for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
  1579. avg += ioc->avg_search[i];
  1580. if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
  1581. if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
  1582. }
  1583. avg /= SBA_SEARCH_SAMPLE;
  1584. seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
  1585. min, avg, max);
  1586. }
  1587. #endif
  1588. #ifndef ALLOW_IOV_BYPASS
  1589. seq_printf(s, "IOVA bypass disabled\n");
  1590. #endif
  1591. return 0;
  1592. }
  1593. static struct seq_operations ioc_seq_ops = {
  1594. .start = ioc_start,
  1595. .next = ioc_next,
  1596. .stop = ioc_stop,
  1597. .show = ioc_show
  1598. };
  1599. static int
  1600. ioc_open(struct inode *inode, struct file *file)
  1601. {
  1602. return seq_open(file, &ioc_seq_ops);
  1603. }
  1604. static const struct file_operations ioc_fops = {
  1605. .open = ioc_open,
  1606. .read = seq_read,
  1607. .llseek = seq_lseek,
  1608. .release = seq_release
  1609. };
  1610. static void __init
  1611. ioc_proc_init(void)
  1612. {
  1613. struct proc_dir_entry *dir, *entry;
  1614. dir = proc_mkdir("bus/mckinley", NULL);
  1615. if (!dir)
  1616. return;
  1617. entry = create_proc_entry(ioc_list->name, 0, dir);
  1618. if (entry)
  1619. entry->proc_fops = &ioc_fops;
  1620. }
  1621. #endif
  1622. static void
  1623. sba_connect_bus(struct pci_bus *bus)
  1624. {
  1625. acpi_handle handle, parent;
  1626. acpi_status status;
  1627. struct ioc *ioc;
  1628. if (!PCI_CONTROLLER(bus))
  1629. panic(PFX "no sysdata on bus %d!\n", bus->number);
  1630. if (PCI_CONTROLLER(bus)->iommu)
  1631. return;
  1632. handle = PCI_CONTROLLER(bus)->acpi_handle;
  1633. if (!handle)
  1634. return;
  1635. /*
  1636. * The IOC scope encloses PCI root bridges in the ACPI
  1637. * namespace, so work our way out until we find an IOC we
  1638. * claimed previously.
  1639. */
  1640. do {
  1641. for (ioc = ioc_list; ioc; ioc = ioc->next)
  1642. if (ioc->handle == handle) {
  1643. PCI_CONTROLLER(bus)->iommu = ioc;
  1644. return;
  1645. }
  1646. status = acpi_get_parent(handle, &parent);
  1647. handle = parent;
  1648. } while (ACPI_SUCCESS(status));
  1649. printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number);
  1650. }
  1651. #ifdef CONFIG_NUMA
  1652. static void __init
  1653. sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
  1654. {
  1655. unsigned int node;
  1656. int pxm;
  1657. ioc->node = MAX_NUMNODES;
  1658. pxm = acpi_get_pxm(handle);
  1659. if (pxm < 0)
  1660. return;
  1661. node = pxm_to_node(pxm);
  1662. if (node >= MAX_NUMNODES || !node_online(node))
  1663. return;
  1664. ioc->node = node;
  1665. return;
  1666. }
  1667. #else
  1668. #define sba_map_ioc_to_node(ioc, handle)
  1669. #endif
  1670. static int __init
  1671. acpi_sba_ioc_add(struct acpi_device *device)
  1672. {
  1673. struct ioc *ioc;
  1674. acpi_status status;
  1675. u64 hpa, length;
  1676. struct acpi_buffer buffer;
  1677. struct acpi_device_info *dev_info;
  1678. status = hp_acpi_csr_space(device->handle, &hpa, &length);
  1679. if (ACPI_FAILURE(status))
  1680. return 1;
  1681. buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
  1682. status = acpi_get_object_info(device->handle, &buffer);
  1683. if (ACPI_FAILURE(status))
  1684. return 1;
  1685. dev_info = buffer.pointer;
  1686. /*
  1687. * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
  1688. * root bridges, and its CSR space includes the IOC function.
  1689. */
  1690. if (strncmp("HWP0001", dev_info->hardware_id.value, 7) == 0) {
  1691. hpa += ZX1_IOC_OFFSET;
  1692. /* zx1 based systems default to kernel page size iommu pages */
  1693. if (!iovp_shift)
  1694. iovp_shift = min(PAGE_SHIFT, 16);
  1695. }
  1696. kfree(dev_info);
  1697. /*
  1698. * default anything not caught above or specified on cmdline to 4k
  1699. * iommu page size
  1700. */
  1701. if (!iovp_shift)
  1702. iovp_shift = 12;
  1703. ioc = ioc_init(hpa, device->handle);
  1704. if (!ioc)
  1705. return 1;
  1706. /* setup NUMA node association */
  1707. sba_map_ioc_to_node(ioc, device->handle);
  1708. return 0;
  1709. }
  1710. static struct acpi_driver acpi_sba_ioc_driver = {
  1711. .name = "IOC IOMMU Driver",
  1712. .ids = "HWP0001,HWP0004",
  1713. .ops = {
  1714. .add = acpi_sba_ioc_add,
  1715. },
  1716. };
  1717. static int __init
  1718. sba_init(void)
  1719. {
  1720. if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
  1721. return 0;
  1722. acpi_bus_register_driver(&acpi_sba_ioc_driver);
  1723. if (!ioc_list) {
  1724. #ifdef CONFIG_IA64_GENERIC
  1725. extern int swiotlb_late_init_with_default_size (size_t size);
  1726. /*
  1727. * If we didn't find something sba_iommu can claim, we
  1728. * need to setup the swiotlb and switch to the dig machvec.
  1729. */
  1730. if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
  1731. panic("Unable to find SBA IOMMU or initialize "
  1732. "software I/O TLB: Try machvec=dig boot option");
  1733. machvec_init("dig");
  1734. #else
  1735. panic("Unable to find SBA IOMMU: Try a generic or DIG kernel");
  1736. #endif
  1737. return 0;
  1738. }
  1739. #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
  1740. /*
  1741. * hpzx1_swiotlb needs to have a fairly small swiotlb bounce
  1742. * buffer setup to support devices with smaller DMA masks than
  1743. * sba_iommu can handle.
  1744. */
  1745. if (ia64_platform_is("hpzx1_swiotlb")) {
  1746. extern void hwsw_init(void);
  1747. hwsw_init();
  1748. }
  1749. #endif
  1750. #ifdef CONFIG_PCI
  1751. {
  1752. struct pci_bus *b = NULL;
  1753. while ((b = pci_find_next_bus(b)) != NULL)
  1754. sba_connect_bus(b);
  1755. }
  1756. #endif
  1757. #ifdef CONFIG_PROC_FS
  1758. ioc_proc_init();
  1759. #endif
  1760. return 0;
  1761. }
  1762. subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
  1763. static int __init
  1764. nosbagart(char *str)
  1765. {
  1766. reserve_sba_gart = 0;
  1767. return 1;
  1768. }
  1769. int
  1770. sba_dma_supported (struct device *dev, u64 mask)
  1771. {
  1772. /* make sure it's at least 32bit capable */
  1773. return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
  1774. }
  1775. int
  1776. sba_dma_mapping_error (dma_addr_t dma_addr)
  1777. {
  1778. return 0;
  1779. }
  1780. __setup("nosbagart", nosbagart);
  1781. static int __init
  1782. sba_page_override(char *str)
  1783. {
  1784. unsigned long page_size;
  1785. page_size = memparse(str, &str);
  1786. switch (page_size) {
  1787. case 4096:
  1788. case 8192:
  1789. case 16384:
  1790. case 65536:
  1791. iovp_shift = ffs(page_size) - 1;
  1792. break;
  1793. default:
  1794. printk("%s: unknown/unsupported iommu page size %ld\n",
  1795. __FUNCTION__, page_size);
  1796. }
  1797. return 1;
  1798. }
  1799. __setup("sbapagesize=",sba_page_override);
  1800. EXPORT_SYMBOL(sba_dma_mapping_error);
  1801. EXPORT_SYMBOL(sba_map_single);
  1802. EXPORT_SYMBOL(sba_unmap_single);
  1803. EXPORT_SYMBOL(sba_map_sg);
  1804. EXPORT_SYMBOL(sba_unmap_sg);
  1805. EXPORT_SYMBOL(sba_dma_supported);
  1806. EXPORT_SYMBOL(sba_alloc_coherent);
  1807. EXPORT_SYMBOL(sba_free_coherent);